query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Return multinomial selection with different probabilities.
|
def multinomial(prob, unique=False, rng=np.random):
rechoose = np.where(prob.sum(axis=1) > 0.0)[0]
choice = np.zeros(prob.shape[0], int) - 1
while len(rechoose) > 0:
prob = prob / prob.sum(axis=1, keepdims=True)
rnd = rng.rand(len(rechoose))[:, None]
choice[rechoose] = (rnd > prob[rechoose, :].cumsum(axis=1)).sum(axis=1)
if unique:
# Would be good to find a way to vectorise this
prob[:, choice] = 0.0
rechoose = np.array([idx for idx in range(len(choice))
if np.any(choice[:idx] == choice[idx]) and
(prob[idx, :].sum() > 0)])
hamstrung = np.array([idx for idx in range(len(choice))
if np.any(choice[:idx] == choice[idx]) and
(prob[idx, :].sum() == 0)]).astype(int)
choice[hamstrung] = -1
else:
rechoose = np.array([])
return choice
|
[
"def _sample_multinomial(data=None, shape=_Null, get_prob=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,)",
"def multinomial_pmf(observation, probabilities):\n # TODO\n probability = {}\n n = 0\n for word, num in observation.items():\n n = n + num\n # pi = xi/n\n for word, num in observation.items():\n probability[word] = num/n\n \n return probability",
"def multinomial_once(W):\n return np.searchsorted(np.cumsum(W), random.rand())",
"def multinomial(inputs, num_samples=1, epsilon=0, normalize=False, **kwargs):\n args = ArgHelper.parse(locals())\n args['epsilon'] = float(epsilon)\n op_lib = array_ops_lib.Multinomial\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(\n num_samples=num_samples,\n epsilon=args['epsilon'],\n normalize=normalize,\n ).apply([inputs])\n else:\n return op_lib.blend(**args)",
"def get_multinomial_nb_classifier(self):\n \n classifier = MultinomialNB()\n \n return classifier",
"def rand_selector(population, num_to_select=3):\n return random.sample(population, num_to_select)",
"def multinomial_estimation(data, alpha_prior=None):\n n_samp, n_cat = data.shape\n v = np.sum(data, axis=1)\n if np.any(np.abs(v - v[0]) > 0.):\n raise ValueError(\"All columns of the input `data` should sum to the same value, the number of trials \"\n \"per sample.\")\n\n n_trials = int(v[0])\n if alpha_prior is None:\n # Uniform prior which results in the maximum likelihood estimate\n alpha_prior = np.ones(n_cat)\n\n den = np.sum(alpha_prior) - n_cat + n_samp * n_trials\n proba = (alpha_prior - 1. + np.sum(data, axis=0)) / max(den, 1.0)\n\n return proba",
"def uniform_selection(random, population, args):\r\n num_selected = args.setdefault('num_selected', 1)\r\n pop = list(population)\r\n selected = []\r\n for _ in range(num_selected):\r\n selected.append(pop[random.randint(0, len(pop) - 1)])\r\n return selected",
"def random_subset(self, perc=0.5):",
"def get_multinomial_model(sequence):\n return nuc_frequency(sequence)",
"def select(self, competences):\n if competences.ndim < 2:\n competences = competences.reshape(1, -1)\n\n selected_classifiers = []\n best_index = np.argmax(competences, axis=1)\n\n if self.selection_method == 'best':\n # Select the classifier with highest competence level\n selected_classifiers = best_index\n\n elif self.selection_method == 'diff':\n \"\"\"Selects a base classifier if its competence level is significant better than the rest. \n If there is no such classifier, select randomly a base model.\n\n the best classifier will always have diff < diff_thresh. In a case it is\n superior than all others, it will be the only member selected. Otherwise,\n a random classifier from this list is selected.\n \"\"\"\n rng = check_random_state(self.random_state)\n best_competence = competences[np.arange(competences.shape[0]), best_index]\n # best_competence = np.max(competences)\n diff = best_competence.reshape(-1, 1) - competences\n # TODO: Improve this part of the code\n selected_classifiers = np.zeros(diff.shape[0], dtype=np.int)\n for row in range(diff.shape[0]):\n diff_list = list(diff[row, :])\n indices = [idx for idx, _ in enumerate(diff_list) if diff_list[idx] < self.diff_thresh]\n\n if len(indices) == 0:\n indices = range(self.n_classifiers_)\n\n selected_classifiers[row] = rng.choice(indices)\n\n elif self.selection_method == 'random':\n # TODO: Improve this part of the code\n rng = check_random_state(self.random_state)\n selected_classifiers = np.zeros(competences.shape[0], dtype=np.int)\n best_competence = competences[np.arange(competences.shape[0]), best_index]\n for row in range(competences.shape[0]):\n competence_list = list(competences[row, :])\n # Select a random classifier among all with same competence level\n indices = [idx for idx, _ in enumerate(competence_list) if competence_list[idx] == best_competence[row]]\n\n selected_classifiers[row] = rng.choice(indices)\n\n elif self.selection_method == 'all':\n # select all base classifiers with max competence estimates.\n max_value = np.max(competences, axis=1)\n selected_classifiers = (competences == max_value.reshape(competences.shape[0], -1))\n\n return selected_classifiers",
"def random_subset(indicator_arr, sample_prob):\n subset_arr = (np.random.random(indicator_arr.shape) < sample_prob) & indicator_arr\n return subset_arr",
"def RandomSelect(entries, weights):\n assert(len(entries) == len(weights))\n x = random.random()\n i = 0\n tot_probability = 0.0\n while i < len(weights) - 1:\n tot_probability += weights[i]\n if x <= tot_probability:\n break\n i += 1\n return entries[i]",
"def multinomial_as_basic(multinomial, *symbols):\n l = []\n for powers, k in multinomial.iteritems():\n term = [k]\n for i,e in enumerate(powers):\n term.append(Pow(symbols[i], powers[i]))\n l.append(Mul(*term))\n result = Add(*l)\n return result",
"def test_multiselect_max_selections_form(app: Page):\n select_for_kth_multiselect(app, \"male\", 8, False)\n expect(app.locator(\"li\")).to_have_text(\n \"You can only select up to 1 option. Remove an option first.\",\n use_inner_text=True,\n )",
"def rv_outcomes(match_counts, win_prob):\n p_outcome = wins_to_outcomes(win_prob)\n return tfp.distributions.Multinomial(match_counts, probs=p_outcome)",
"def select(self, competences):\n if competences.ndim < 2:\n competences = competences.reshape(1, -1)\n\n # Select classifier if it correctly classified at least one sample\n selected_classifiers = (competences > 0)\n\n # For the rows that are all False (i.e., no base classifier was\n # selected, select all classifiers (set all True)\n selected_classifiers[~np.any(selected_classifiers, axis=1), :] = True\n\n return selected_classifiers",
"def __init__(self):\n\t\tself.MultinomialNB = MultinomialNB()",
"def weighted_choose(total_n, p_dict):\n keys = p_dict.keys()\n r = np.random.choice(keys, p=[p_dict[k] for k in keys], size=total_n) # pylint: disable=no-member\n c = Counter(r)\n return {k:c[k] for k in keys}",
"def select_probability(rates):\n\n probability = [rate / sum(rates) for rate in rates]\n\n r = random.uniform(0, 1)\n selection = 0\n\n for index in range(len(probability)):\n if r <= sum(probability[0:index+1]):\n break\n else:\n selection += 1\n\n return selection"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return bool array of women having babies.
|
def assign_babies(age, female, partner, alive, fr_a, rng):
n_agent = len(age)
if age.max() >= len(fr_a):
# Extend fertility array, assuming fertility rate of 0
fr_a = np.hstack((fr_a, np.zeros(age.max() + 1 - len(fr_a))))
have_baby = alive & female & (partner >= 0) & (rng.rand(n_agent) < fr_a[age])
return have_baby
|
[
"def find_binary(self):\n binary=[]\n for col in self.categorical_variables:\n if len(self.data[col].value_counts())==2:\n binary.append(col)\n return binary",
"def boys(self):\n return self._boys",
"def find_binary(self):\n binary=[]\n for col in self.data.columns:\n if len(self.data[col].value_counts())==2:\n binary.append(col)\n return binary",
"def have_placed_our_bomb(self):\n arr = self.world.bombs.values()\n lis = list(arr)\n if len(lis) > 0:\n for i in range(0, len(lis)):\n if self.name == lis[i].owner.name:\n return True\n else:\n return False",
"def bifacial(self):\n if self._bifacial is None:\n self._bifacial = False\n for v in self.inputs.values():\n bi_flags = ('bifaciality', 'spe_is_bifacial',\n 'cec_is_bifacial', '6par_is_bifacial')\n bi_bools = [bool(v.get(flag, 0)) for flag in bi_flags]\n self._bifacial = any(bi_bools + [self._bifacial])\n\n return self._bifacial",
"def get_sets_of_balls(self):\n LOGGER.debug(\"LEM:gsob\")\n return [self._main_balls, self._lucky_stars]",
"def get_sets_of_balls(self):\n return [self._balls]",
"def women(self):\n return self._women",
"def isAllergic(self, item):\n result = []\n for allergy in self.getAllergies(): \n if allergy.lower() in map(lambda x : x.lower(), item.getContains()): \n result.append(allergy)\n return result",
"def has_hungry_veg(self):\n return all(self.verify_species(i, Species.is_hungry) for i in range(len(self.species_list)))",
"def buoyancy(self):\n\n if \"buoyancy\" not in self.ds:\n var = xroms.buoyancy(self.sig0, self.ds.rho0)\n self.ds[\"buoyancy\"] = var\n return self.ds.buoyancy",
"def all_hobbies(people):\n\n return {one_hobby\n for one_person in people\n for one_hobby in one_person['hobbies']}",
"def in_galaxy_candidates(self):\n return np.array(self._query['in_galaxy_candidates'], dtype=bool)",
"def others(self):\n return [b for b in self.boids if b is not self and self.canSee(b)]",
"def professors_full():\n return np.sum(prof_avail) == 0",
"def young_women(self):\n return self._young_women",
"def test_pentaquarks_are_baryons(PDGIDs):\n _pentaquarks = (PDGIDs.UCbarCUDPentaquark, PDGIDs.AntiUCbarCUDPentaquark)\n for pid in _pentaquarks:\n assert is_baryon(pid)",
"def user_story_14(indi, fam):\n birthdays = []\n for family in fam:\n for child in fam[family].chil:\n if indi[child].birth != \"NA\":\n birthdays.append(indi[child].birth)\n else:\n return False\n for birthday in birthdays:\n count = Counter(birthdays)\n\n multi_births = dict((k, v) for k, v in count.items() if v >= 5)\n if multi_births.items() is None:\n return True\n # print(\"Multiple Birthdays: \" + str(multi_births))\n for key in multi_births:\n print(\"ERROR: US14: There are more than 5 siblings born on \" + str(key))\n return False",
"def test_aromaticity_perception_benzene(self):\n mol = Molecule(smiles='c1ccccc1')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 1)\n self.assertEqual(len(aromatic_bonds), 1)\n for bond in aromatic_bonds[0]:\n self.assertTrue(bond.atom1 in aromatic_atoms[0] and bond.atom2 in aromatic_atoms[0])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Ensures that the val is the default str() type for python2 or 3
|
def _str(val):
if str == bytes:
if isinstance(val, str):
return val
else:
return str(val)
else:
if isinstance(val, str):
return val
else:
return str(val, 'ascii')
|
[
"def __expectString(val):\n if type(val) != str:\n raise Exception('Expected string, received {}'.format(type(val)))",
"def _assert_type_string(self, name, val):\n self._assert_type(name, val, basestring)",
"def can_to_str(_type):\n return isinstance(_type, String)",
"def convert_str_or_none(val: Optional[str]) -> Optional[str]:\n return str(val) if val is not None else val",
"def _ensure_str(obj):\r\n if isinstance(obj, basestring):\r\n return obj\r\n raise TypeError('coercing to Unicode: need string or buffer, %s found' % \\\r\n _type_name(obj))",
"def validate_basestring(option, value):\n if isinstance(value, basestring):\n return value\n raise TypeError(\"Wrong type for %s, value must be an \"\n \"instance of basestring\" % (option,))",
"def validate_string(self, value):\n if value is not None:\n assert isinstance(value, str)\n return value",
"def ensure_string (self, option, default=None):\r\n self._ensure_stringlike(option, \"string\", default)",
"def validateString(value):\n if value is None:\n value = six.u('')\n if not isinstance(value, six.text_type):\n value = six.text_type(value)\n return value",
"def strx(arg):\n if isinstance(arg, StringTypes):\n return str(arg)\n raise TypeError",
"def _assert_is_string(value: typing.Any) -> str:\n assert isinstance(value, str)\n return value",
"def is_string(self):\n return type(self.value) == str",
"def is_string_like ( v ) :\n return isinstance ( v , string_types )",
"def _ValidateString(arg_internal_name, arg_value):\n if isinstance(arg_value, basestring):\n return arg_value\n if isinstance(arg_value, int): # convert int->str if str is really expected\n return str(arg_value)\n raise InvalidArgException(arg_internal_name, arg_value)",
"def _validate_allocation_str(val) -> Union[str, None]:\n\n if isinstance(val, str):\n if val.lower() == 'false':\n return False\n elif val.lower() == 'max':\n return val.lower()\n else:\n return True\n else:\n return True",
"def check_str(cls, **kwargs):\r\n for value in kwargs:\r\n if not isinstance(kwargs[value], str):\r\n raise TypeError(value+' must be of type string')",
"def str_if_not_none(value):\n ...",
"def is_str(x):\n return type(x) == str",
"def _kv_to_str(self, value):\n if isinstance(value, str):\n return value\n elif isinstance(value, bool):\n return str(value).lower()\n elif isinstance(value, Number):\n return str(value)\n else:\n # don't coerce unrecognized types, TypeError will be raised later\n return value",
"def test_datatype_string_default(self):\n result = arcpy.QA_IDLTaskEngine_DataType_String_TEST()\n\n self.assertEqual(result.getOutput(0), \"cat\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Samples a random row (polygon) in the shapefile
|
def _sample(self):
return self.shp.sample(1)['geometry'].values[0]
|
[
"def random_polygon(cx, cy, avg_r, variance, frequency, num_verts):\n def clip(_x, vmin, vmax):\n if vmin > vmax:\n return _x\n elif _x < vmin:\n return vmin\n elif _x > vmax:\n return vmax\n else:\n return _x\n\n variance = clip(variance, 0, 1) * 2 * np.pi / num_verts\n frequency = clip(frequency, 0, 1) * avg_r\n\n angle_steps = []\n lower = (2 * np.pi / num_verts) - variance\n upper = (2 * np.pi / num_verts) + variance\n tot = 0\n for i in range(num_verts):\n tmp = np.random.uniform(lower, upper)\n angle_steps.append(tmp)\n tot = tot + tmp\n\n k = tot / (2 * np.pi)\n for i in range(num_verts):\n angle_steps[i] = angle_steps[i] / k\n\n points = []\n angle = np.random.uniform(0, 2 * np.pi)\n for i in range(num_verts):\n r_i = clip(np.random.normal(avg_r, frequency), 0, 2 * avg_r)\n x = cx + r_i * np.cos(angle)\n y = cy + r_i * np.sin(angle)\n points.append((int(x), int(y)))\n\n angle = angle + angle_steps[i]\n\n return points",
"def sample_shape(s, r, n=50, tri=None, threshold=0):\n triangulation = []\n if tri:\n for polygon in tri:\n if polygon_area(polygon) > threshold:\n for triangle in polygon:\n triangulation.append(triangle)\n else:\n for polygon in s:\n tri = triangulate_polygon(polygon)\n if polygon_area(tri) > threshold:\n for triangle in tri:\n triangulation.append(triangle)\n \n polygon_size = sum([triangle_area(t) for t in triangulation])\n triangulation = [(t, triangle_area(t) / polygon_size) for t in triangulation]\n\n triangulation[0] = triangulation[0][0], triangulation[0][1], 0\n for i in range(1, len(triangulation)):\n triangulation[i] = (triangulation[i][0], triangulation[i][1] + triangulation[i-1][1], triangulation[i-1][1])\n\n samples = np.random.uniform(0, 1, n)\n points = []\n for s in samples:\n for t, ma, mi in triangulation:\n if mi <= s <= ma:\n p = point_from_triangle(t)\n points.append(p)\n\n return points",
"def random_grid(xmin, ymin, xmax, ymax, xsize, ysize, count, \n crs=None, mask=None):\n\n x = np.random.rand(count) * (xmax-xmin-xsize) + xmin\n y = np.random.rand(count) * (ymax-ymin-ysize) + ymin\n polys = [box(x, y, x+xsize, y+ysize) for x,y in np.nditer([x,y])]\n\n gdf = gpd.GeoDataFrame({'geometry':polys})\n gdf.crs = crs\n\n if mask:\n gdf = mask_samples(gdf, mask)\n\n return gdf",
"def randomSamples(feature, img, numPoints, seed, classBand, classValues, classPoints, scale = 30):\n img = img.addBands(ee.Image.pixelLonLat())\n \n points = img.stratifiedSample(\n numPoints = numPoints,\n classBand = classBand, \n region = feature,\n seed = seed,\n classValues = classValues, # valores a serem classificados \n classPoints = classPoints, \n dropNulls = True, \n scale = scale\n )\n \n points = points.randomColumn('randCol', 0)\n\n return points.map(setGeometry)",
"def get_random():\n number_of_vertices = np.random.randint(3, 8)\n size_bound = np.random.randint(2, 10)\n bop = BagOfPoints.generate_random(number_of_vertices, size_bound)\n print(\"Random polygon has points = \" + str(bop.list_of_points) + \"\\n\")\n # Get the minimum area bounding box for this object and return it\n # shape = AbstractShape(bop.list_of_points, np.eye(3))\n base_polygon = Polygon(bop.list_of_points)\n return PlacementObject(base_polygon, np.eye(3), \"random\")",
"def sample(self, shape=(), seed=None):\n raise TypeError(\"cannot sample from a flat distribution\")",
"def sample_random(target_geom, n_samples=50, within=True, buffer=None, grid_ref=None, grid_size=None, min_distance=None):\n # Initialise values\n samples = []\n xmin, xmax, ymin, ymax = target_geom.GetEnvelope()\n distance_logical = False\n\n if grid_ref:\n x_ref = grid_ref[0]\n y_ref = grid_ref[1]\n\n xmin = x_ref + (int((xmin - x_ref) / grid_size)) * grid_size\n xmax = x_ref + (int((xmax - x_ref) / grid_size)) * grid_size\n ymin = y_ref + (int((ymin - y_ref) / grid_size)) * grid_size\n ymax = y_ref + (int((ymax - y_ref) / grid_size)) * grid_size\n\n while len(samples) < n_samples:\n sample_x = np.random.choice(np.arange(xmin, xmax, grid_size))\n sample_y = np.random.choice(np.arange(ymin, ymax, grid_size))\n\n if min_distance & len(samples) > 0:\n distance = abs(np.subtract(samples, (sample_x, sample_y)))\n distance_logical = np.any(distance < min_distance)\n\n if not distance_logical:\n\n if within:\n # construct geometry of random point\n point_geometry = ogr.Geometry(ogr.wkbPoint)\n point_geometry.AddPoint(sample_x, sample_y)\n\n if buffer:\n point_geometry = point_geometry.Buffer(buffer)\n\n if point_geometry.Within(target_geom):\n samples.append((sample_x, sample_y))\n\n else:\n samples.append((sample_x, sample_y))\n\n return samples",
"def sample(self, shape=(), seed=None):\n raise TypeError(\"cannot sample from a half flat distribution\")",
"def cell_sample(self):\n if self._cell_sample is None and self.params['cell_frac'] >= 1 and not os.path.exists(self.cell_sample_f):\n self._cell_sample = np.arange(sum(self.cell_subset))\n else:\n if self._cell_sample is None:\n if self.has_cell_sample:\n self._cell_sample = np.loadtxt(self.cell_sample_f, dtype=int)\n else:\n # TODO: balance between random sample and simplex sample\n k, cells = self.w.shape\n n_samples = int(cells*self.params['cell_frac'])\n n_simplex_samples = int(n_samples/2)\n n_rand_samples = n_samples - n_simplex_samples\n samples = simplex_sample.sample(k, n_simplex_samples)\n indices = simplex_sample.data_sample(self.w, samples,\n replace=False)\n unsampled_indices = [x for x in range(cells) if x not in set(indices)]\n import random\n indices_random = random.sample(unsampled_indices, n_rand_samples)\n full_indices = np.concatenate([indices, indices_random])\n full_indices.sort()\n indices = full_indices\n np.savetxt(self.cell_sample_f, indices, fmt='%d')\n self.has_cell_sample = True\n self._cell_sample = indices\n return self._cell_sample",
"def randomPointsOnLine(feature, numPoints):\n points = []\n linestring = shapely.geometry.LineString(feature.geometry.coordinates)\n length = linestring.length\n i = 0\n while i < numPoints:\n dist = random.uniform(0, length)\n point = linestring.interpolate(dist)\n points.append(point)\n i += 1\n return points",
"def random_point_in(polygon):\n minx, miny, maxx, maxy = polygon.envelope.bounds\n while True:\n x = random.uniform(minx, maxx)\n y = random.uniform(miny, maxy)\n if polygon.contains(Point(x, y)):\n return x, y",
"def sample(self, n=None):\n if n is None:\n n = dataiter.DEFAULT_PEEK_ROWS\n n = min(self.nrow, n)\n rows = np.random.choice(self.nrow, n, replace=False)\n return self.slice(np.sort(rows))",
"def _draw_sample(self):\n sample = np.random.random_sample(2)*10\n return sample",
"def view_random(self):\n\n feature = random.choice(self.geography.features)\n self.view_precinct(feature.properties[\"GEOID10\"])",
"def sample_uniform(extent):\n pt = np.random.random(2) # in [0, 1]^2\n lower_corner = np.array([extent.x_min, extent.y_min])\n dim = np.array([extent.x(), extent.y()])\n return np.multiply(dim, pt) + lower_corner",
"def sample_rows_broken(filename, portion):\n\n df = core.read_csv(filename)\n\n num_samples = int(len(df.index) * portion)\n\n return df.sample(n = num_samples).to_records()",
"def draw_sample(self):\n m = self.data[1].shape[0]\n select = np.random.choice(m,self.mtrain,replace=False)\n return tuple([d[select,:] for d in self.data])",
"def sample(self, size):",
"def sample_trajectory(self):\n ind = np.random.choice(self.N, 1, p=self.W[-1, :])\n return self.genealogy(ind)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test colour using kwargs
|
def test_kwarg_colour(self):
colour = adapter.SFFRGBA(
red=self.red,
green=self.green,
blue=self.blue,
alpha=self.alpha
)
self.assertEqual(colour.red, self.red)
self.assertEqual(colour.green, self.green)
self.assertEqual(colour.blue, self.blue)
self.assertEqual(colour.alpha, self.alpha)
|
[
"def hasColor(*args, **kwargs):\n \n pass",
"def test_kwarg_colour(self):\n colour = schema.SFFRGBA(\n red=self.red,\n green=self.green,\n blue=self.blue,\n alpha=self.alpha\n )\n self.assertEqual(colour.red, self.red)\n self.assertEqual(colour.green, self.green)\n self.assertEqual(colour.blue, self.blue)\n self.assertEqual(colour.alpha, self.alpha)",
"def color(objects, userDefined=int, rgbColor=float):\n pass",
"def is_checkmated(self, color):",
"def test_setColor(self):\n assert_equal(self.testTile.numColor, (10, 10, 10) )\n self.testTile.setColor( (150, 150, 150) )\n assert_equal(self.testTile.numColor, (150, 150, 150) )",
"def test_setColorType(self):\n assert_equal(self.testTile.numColor, (10, 10, 10) )\n # make sure other strings do nothing to the text\n self.testTile.setColorType(\"dummy\")\n assert_equal(self.testTile.numColor, (10, 10, 10) )\n # check the grey text on putting in input\n self.testTile.setColorType(\"input\")\n assert_equal(self.testTile.numColor, (100, 100, 100) )\n # check the purple text on inputting solve\n self.testTile.setColorType(\"solve\")\n assert_equal(self.testTile.numColor, (200, 10, 250) )",
"def with_colors() :\n global __with_colors__\n return bool(__with_colors__)",
"def test_colours(self):\n msg = \"This is a test message\" # type: str\n\n log = HammerVLSILogging.context(\"test\")\n\n HammerVLSILogging.enable_buffering = True # we need this for test\n HammerVLSILogging.clear_callbacks()\n HammerVLSILogging.add_callback(HammerVLSILogging.callback_buffering)\n\n HammerVLSILogging.enable_colour = True\n log.info(msg)\n assert HammerVLSILogging.get_colour_escape(Level.INFO) + \"[test] \" + msg + HammerVLSILogging.COLOUR_CLEAR == HammerVLSILogging.get_buffer()[0]\n\n HammerVLSILogging.enable_colour = False\n log.info(msg)\n assert \"[test] \" + msg == HammerVLSILogging.get_buffer()[0]",
"def is_colorstr(arg):\n try:\n assert len(arg) == 6\n for c in arg:\n assert c in COLORMAP\n except AssertionError:\n raise argparse.ArgumentTypeError('%s is not a valid color string' % arg)\n return arg",
"def test_color_change(generic_task):\n generic_task.set_color('#000')\n assert generic_task.get_color() == '#000'",
"def test_create_rgb_color(self):\n self.assertEqual(self.sut.type, 'rgb')\n self.assertEqual(self.sut.vector, Vector(100, 150, 200))",
"def vtkColor3(*args, **kwargs):\n ...",
"def randomly_color_image(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def test_spectrum_bulb_custom_color():\n bulb = light(LIGHT_WS_CUSTOM_COLOR)\n\n assert bulb.hex_color == \"0\"\n assert bulb.xy_color == (32228, 27203)\n assert bulb.supports_dimmer\n assert bulb.supports_color_temp\n assert bulb.supports_hex_color\n assert bulb.supports_xy_color\n assert not bulb.supports_hsb_xy_color",
"def test_native_random_colour(self):\n colour = adapter.SFFRGBA(random_colour=True)\n self.assertTrue(0 <= colour.red <= 1)\n self.assertTrue(0 <= colour.green <= 1)\n self.assertTrue(0 <= colour.blue <= 1)\n self.assertTrue(0 <= colour.alpha <= 1)",
"def _iscolor(color):\n if color is None:\n return True\n if isinstance(color, (tuple, list, _Color)):\n return len(color) == 3\n if isinstance(color, _INTTYPES):\n return True\n return False",
"def test_set_color(self):\n xknx = XKNX()\n light = Light(\n xknx,\n name=\"TestLight\",\n group_address_switch=\"1/2/3\",\n group_address_color=\"1/2/5\",\n )\n self.loop.run_until_complete(light.set_color((23, 24, 25)))\n self.assertEqual(xknx.telegrams.qsize(), 1)\n telegram = xknx.telegrams.get_nowait()\n self.assertEqual(\n telegram,\n Telegram(\n destination_address=GroupAddress(\"1/2/5\"),\n payload=GroupValueWrite(DPTArray((23, 24, 25))),\n ),\n )\n self.loop.run_until_complete(xknx.devices.process(telegram))\n self.assertEqual(light.current_color, ((23, 24, 25), None))",
"def _parse_scatter_color_args(c, edgecolors, kwargs, xsize,\n get_next_color_func):\n facecolors = kwargs.pop('facecolors', None)\n facecolors = kwargs.pop('facecolor', facecolors)\n edgecolors = kwargs.pop('edgecolor', edgecolors)\n\n kwcolor = kwargs.pop('color', None)\n\n if kwcolor is not None and c is not None:\n raise ValueError(\"Supply a 'c' argument or a 'color'\"\n \" kwarg but not both; they differ but\"\n \" their functionalities overlap.\")\n\n if kwcolor is not None:\n try:\n mcolors.to_rgba_array(kwcolor)\n except ValueError as err:\n raise ValueError(\n \"'color' kwarg must be a color or sequence of color \"\n \"specs. For a sequence of values to be color-mapped, use \"\n \"the 'c' argument instead.\") from err\n if edgecolors is None:\n edgecolors = kwcolor\n if facecolors is None:\n facecolors = kwcolor\n\n if edgecolors is None and not mpl.rcParams['_internal.classic_mode']:\n edgecolors = mpl.rcParams['scatter.edgecolors']\n\n c_was_none = c is None\n if c is None:\n c = (facecolors if facecolors is not None\n else \"b\" if mpl.rcParams['_internal.classic_mode']\n else get_next_color_func())\n c_is_string_or_strings = (\n isinstance(c, str)\n or (np.iterable(c) and len(c) > 0\n and isinstance(cbook._safe_first_finite(c), str)))\n\n def invalid_shape_exception(csize, xsize):\n return ValueError(\n f\"'c' argument has {csize} elements, which is inconsistent \"\n f\"with 'x' and 'y' with size {xsize}.\")\n\n c_is_mapped = False # Unless proven otherwise below.\n valid_shape = True # Unless proven otherwise below.\n if not c_was_none and kwcolor is None and not c_is_string_or_strings:\n try: # First, does 'c' look suitable for value-mapping?\n c = np.asanyarray(c, dtype=float)\n except ValueError:\n pass # Failed to convert to float array; must be color specs.\n else:\n # handle the documented special case of a 2D array with 1\n # row which as RGB(A) to broadcast.\n if c.shape == (1, 4) or c.shape == (1, 3):\n c_is_mapped = False\n if c.size != xsize:\n valid_shape = False\n # If c can be either mapped values or an RGB(A) color, prefer\n # the former if shapes match, the latter otherwise.\n elif c.size == xsize:\n c = c.ravel()\n c_is_mapped = True\n else: # Wrong size; it must not be intended for mapping.\n if c.shape in ((3,), (4,)):\n _api.warn_external(\n \"*c* argument looks like a single numeric RGB or \"\n \"RGBA sequence, which should be avoided as value-\"\n \"mapping will have precedence in case its length \"\n \"matches with *x* & *y*. Please use the *color* \"\n \"keyword-argument or provide a 2D array \"\n \"with a single row if you intend to specify \"\n \"the same RGB or RGBA value for all points.\")\n valid_shape = False\n if not c_is_mapped:\n try: # Is 'c' acceptable as PathCollection facecolors?\n colors = mcolors.to_rgba_array(c)\n except (TypeError, ValueError) as err:\n if \"RGBA values should be within 0-1 range\" in str(err):\n raise\n else:\n if not valid_shape:\n raise invalid_shape_exception(c.size, xsize) from err\n # Both the mapping *and* the RGBA conversion failed: pretty\n # severe failure => one may appreciate a verbose feedback.\n raise ValueError(\n f\"'c' argument must be a color, a sequence of colors, \"\n f\"or a sequence of numbers, not {c!r}\") from err\n else:\n if len(colors) not in (0, 1, xsize):\n # NB: remember that a single color is also acceptable.\n # Besides *colors* will be an empty array if c == 'none'.\n raise invalid_shape_exception(len(colors), xsize)\n else:\n colors = None # use cmap, norm after collection is created\n return c, colors, edgecolors",
"def _background_color(self, title, color_edit, rgb_color_ints, func_name):\n passed, rgb_color_ints, rgb_color_floats = self.on_color(\n color_edit, rgb_color_ints, title)\n if passed and 0:\n if self.win_parent is not None:\n settings = self.win_parent.settings\n func_background_color = getattr(settings, func_name)\n func_background_color(rgb_color_floats)\n return passed, rgb_color_ints, rgb_color_floats",
"def test_rgba():\n red = uniform(0, 1)\n green = uniform(0, 1)\n blue = uniform(0, 1)\n alpha = uniform(0, 1)\n rgba = color.RGBA(red, green, blue, alpha)\n assert rgba.red == red\n assert rgba.green == green\n assert rgba.blue == blue\n assert rgba.alpha == alpha"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that using a kwarg random_colour will set random colours
|
def test_native_random_colour(self):
colour = adapter.SFFRGBA(random_colour=True)
self.assertTrue(0 <= colour.red <= 1)
self.assertTrue(0 <= colour.green <= 1)
self.assertTrue(0 <= colour.blue <= 1)
self.assertTrue(0 <= colour.alpha <= 1)
|
[
"def setRandomColor(self):\n self.color=mycolors.random()",
"def randomColor():\n return Color((_random.randint(0,255),_random.randint(0,255),_random.randint(0,255)))",
"def random_color():\n colors = [\n Color.HEADER,\n Color.OKBLUE,\n Color.WARNING,\n Color.FAIL\n ]\n return random.choice(colors)",
"def random_color():\n r = lambda: random.randint(0,255)\n return('#%02X%02X%02X' % (r(),r(),r()))",
"def random_color_gen():\n r = lambda: random.randint(0, 255)\n return 'ff%02X%02X%02X' % (r(), r(), r())",
"def random_color():\n return systemrandom.randint(0x000000, 0xFFFFFF)",
"def random_colors():\n def r():\n return random.randint(0, 255)\n return 'rgb({},{},{})'.format(r(), r(), r())",
"def random_color():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n\n return r, g, b",
"def test_random_color(self):\n color = random_color()\n self.assertTrue(re.match(\"^[0-9a-fA-F]{6}$\", color))",
"def randomly_color_image(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def randcolor():\n r = random(0.0, 1.0)\n g = random(0.0, 1.0)\n b = random(0.0, 1.0)\n return vector(r, g, b) # A color is a three-element vector",
"def randomcolor(eps=.1):\n r = round(random()/eps)*eps\n g = round(random()/eps)*eps\n b = round(random()/eps)*eps\n return (r,g,b)",
"def pick_new_color(self):\n colors = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']\n choice = random.randrange(len(colors))\n self.fav_color = colors[choice]",
"def change_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.gauss(0, change_color_sigma)\n color_green = random.gauss(0, change_color_sigma)\n color_blue = random.randint(0, change_color_sigma)\n color = mutated_genome[index][0]\n newcolor = (color[0] + color_red, color[1] + color_green, color[2] + color_blue)\n else: #color_mode == 'L':\n color_diff = random.randint(0, change_color_sigma)\n color = mutated_genome[index][0]\n newcolor = color + color_diff\n mutated_genome[index][0] = newcolor",
"def mutate_color(mutated_genome):\n change_color_range_sum = new_color_range + change_color_range\n range = switch_color_range + change_color_range_sum\n seed = random.uniform(0,range)\n if seed < new_color_range:\n new_color(mutated_genome)\n elif seed < change_color_range_sum:\n change_color(mutated_genome)\n else: #seed < range:\n switch_colors(mutated_genome)\n #else: # depricated\n # shuffle_colors(mutated_genome)",
"def change_colors():\n global t,u,v,w,x,y,z\n t = randint(0,27)\n u = randint(0,27)\n v = randint(0,27)\n w = randint(0,27)\n x = randint(0,27)\n y = randint(0,27)\n z = randint(0,27)\n return t,u,v,w,x,y,z,",
"def randomColouring(nodes, colours='RGB'):\n return {n: choice(colours) for n in nodes}",
"def randcolor(value=1):\n return rgb_float_to_int(colorsys.hsv_to_rgb(random.random(), 1, value))",
"def get_random_color():\n R = random.randint(200, 250)\n G = random.randint(200, 250)\n B = random.randint(200, 250)\n random_rgb = (R, G, B)\n return random_rgb"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can create an SFFExternalReferenceList from a literal list
|
def test_create_from_list(self):
ee = [adapter.SFFExternalReference(
resource=self.rr[i],
url=self.uu[i],
accession=self.aa[i],
label=self.ll[i],
description=self.dd[i]
) for i in _xrange(self._no_items)]
E = adapter.SFFExternalReferenceList()
print(ee)
print(E)
ES = adapter.SFFExternalReferenceList(new_obj=True, )
print(ES)
|
[
"def _build_feature_references(feature_ref_strs: List[str]) -> List[FeatureRefProto]:\n\n feature_refs = [FeatureRef.from_str(ref_str) for ref_str in feature_ref_strs]\n feature_ref_protos = [ref.to_proto() for ref in feature_refs]\n\n return feature_ref_protos",
"def parse_list(list_bytes):\n return _v1(list_bytes)",
"def res_list_literal(self, ns, elt_types):\n raise NotImplementedError('subclasses must implement')",
"def create_list_example(self, text_list):\n examples = []\n set_type = \"ltest\"\n for (i, text) in enumerate(text_list):\n guid = \"%s-%s\" % (set_type, i)\n text_a = text\n text_b = None\n label = '0'\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n \n return examples",
"def test_listr(self, list_, expected):\n res = _listr(list_)\n assert res == expected",
"def test_list_validation():\n with pytest.raises(ValueError):\n # labels must be a list of string, but contains an int\n IngredientSpec(labels=[\"Label 1\", 17], name=\"foo\")\n\n ingredient = IngredientSpec(labels=[\"Label 1\", \"label 2\"], name=\"foo\")\n with pytest.raises(TypeError):\n # cannot append an int to a list of strings\n ingredient.labels.append(17)\n\n with pytest.raises(ValueError):\n # list of conditions cannot contain a property\n MeasurementRun(\"A measurement\", conditions=[Property(\"not a condition\")])",
"def test_is_list_true(test_rlp_reader_contract):\n contract = test_rlp_reader_contract\n rlp_encoded_item = rlp.encode([1, 2, 3])\n\n assert contract.functions.testIsList(rlp_encoded_item).call() is True",
"def create_source_list(uris_list):\n return(manifest.Sources(\n [{\"uris\": manifest.Uris([tmp1])} for tmp1 in uris_list]\n ))",
"def test_list(self):\n payloads = [\n b'payload A',\n b'second payload'\n b'payload 3+'\n ]\n res = []\n provider = payload_provider.List(payloads)\n for payload in provider:\n res.append(payload)\n for num, payload in enumerate(payloads):\n self.assertEqual(res[num], payload, 'Payload not expected in position {0}'.format(num))",
"def test_list_concepts(self):\r\n concepts = analyse.list_concepts(self.dom)\r\n test_concepts = [\r\n 'http://concept.net/1',\r\n 'http://concept.net/2',\r\n 'http://concept.net/3'\r\n ]\r\n self.assertEqual(concepts, test_concepts)",
"def testListInit(self):\n cm = CoordinateMapper(exons)\n # FIXME CompoundLocation does not have __eq__\n self.assertEqual(str(cm.exons), str(self.sf.location))",
"def test_list(self):\n mylist = List()\n pylist = []\n self.assertEqual(str(mylist), str(pylist))\n\n for i in range(10, 20, 2):\n mylist.add(i)\n pylist.append(i)\n self.assertEqual(str(mylist), str(pylist))",
"def test_list_schemeless_concepts(self):\r\n schemeless = analyse.list_schemeless_concepts(self.dom)\r\n test_schemeless = [\r\n 'http://concept.net/2',\r\n 'http://concept.net/3'\r\n ]\r\n self.assertEquals(schemeless, test_schemeless)",
"def test_nsx_struct_get_list_create(self):\n\n # create\n nsx_object = {}\n self.assertEqual(\n common.nsx_struct_get_list(nsx_object, 'a/b/c'),\n []\n )\n self.assertEqual(\n nsx_object,\n {\n 'a': {\n 'b': {\n 'c': [\n ]\n }\n }\n }\n )",
"def testList(self):\n self.assertEqual(bit.list(0), [ 0 ])\n self.assertEqual(bit.list(1), [ 1 ])\n self.assertEqual(bit.list(2), [ 1, 0 ])\n self.assertEqual(bit.list(3), [ 1, 1 ])\n self.assertEqual(bit.list(4), [ 1, 0, 0 ])\n self.assertEqual(bit.list(10), [ 1, 0, 1, 0 ])",
"def try_adding_struc_list_to_a_version(struc_list, version, restrict_to_these_atoms_list):\r\n\r\n struc_list = [struc for struc in struc_list if struc]\r\n if not struc_list:\r\n return []\r\n\r\n candidate_atoms = try_to_match_tentative_struc_list_to_dict(struc_list, restrict_to_these_atoms_list)\r\n instatoms = [InstAtom(atom, struc_list, quality_match_score, numeric_mappings) for (atom, quality_match_score, numeric_mappings) in candidate_atoms]\r\n new_versions = [version.add_instatoms([instatom]) for instatom in instatoms]\r\n return new_versions",
"def test_gcb_create_reference_list_command_when_list_already_exists(client):\n from GoogleChronicleBackstory import gcb_create_reference_list_command\n args = {\n \"name\": \"dummy_name\",\n \"description\": \"dummy_description\",\n \"lines\": \"dummy\"\n }\n with open('test_data/gcb_create_reference_list_400.json') as f:\n response = f.read()\n mock_response = (\n Response(dict(status=409)),\n response\n )\n client.http_client.request.return_value = mock_response\n with pytest.raises(ValueError) as e:\n gcb_create_reference_list_command(client, args)\n assert str(e.value) == 'Status code: 409\\nError: generic::already_exists: list with name' \\\n ' demo_list14_created_from_api already exists'",
"def test_list_to_rlp_item(test_rlp_reader_contract):\n contract = test_rlp_reader_contract\n rlp_encoded_item = rlp.encode([\"cat\", \"dog\"])\n rlp_item_from_contract = contract.functions.testToRlpItem(rlp_encoded_item).call()\n\n assert rlp_item_from_contract[0] == 9",
"def test_list_value():\n\tbackup_and_restore(\n\t\tlambda context: put_values(lib.SET, \"key\", LIST_VALUES),\n\t\tNone,\n\t\tlambda context: check_values(lib.SET, \"key\", LIST_VALUES)\n\t)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can create from a gds_type
|
def test_create_from_gds_type(self):
_b = emdb_sff.biological_annotationType(
name=self.name,
description=self.description,
number_of_instances=self.no,
external_references=self._external_references
)
b = adapter.SFFBiologicalAnnotation.from_gds_type(_b)
self.assertRegex(
_str(b),
r"""SFFBiologicalAnnotation\(""" \
r"""name="{}", description="{}", """ \
r"""number_of_instances={}, """ \
r"""external_references=SFFExternalReferenceList\(\[.*\]\)\)""".format(
self.name,
self.description,
self.no
)
)
self.assertEqual(b.name, self.name)
self.assertEqual(b.description, self.description)
self.assertEqual(b.number_of_instances, self.no)
self.assertEqual(b.external_references, self.external_references)
|
[
"def test_create_from_gds_type(self):\n _S = emdb_sff.software_type()\n S = adapter.SFFSoftware.from_gds_type(_S)\n self.assertRegex(\n _str(S),\n r\"\"\"SFFSoftware\\(id={}, name={}, version={}, processing_details={}\\)\"\"\".format(\n S.id, None, None, None\n )\n )\n self.assertIsNone(S.id)\n self.assertIsNone(S.name)\n self.assertIsNone(S.version)\n self.assertIsNone(S.processing_details)\n # no id\n name = rw.random_word()\n version = rw.random_word()\n processing_details = li.get_sentences(sentences=_random_integer(start=2, stop=5))\n _S = emdb_sff.software_type(\n name=name,\n version=version,\n processing_details=processing_details\n )\n S = adapter.SFFSoftware.from_gds_type(_S)\n self.assertRegex(\n _str(S),\n r\"\"\"SFFSoftware\\(id=None, name=\".+\", version=\".+\", processing_details=\".+\"\\)\"\"\"\n )\n self.assertIsNone(S.id)\n self.assertEqual(S.name, name)\n self.assertEqual(S.version, version)\n self.assertEqual(S.processing_details, processing_details)\n # with id\n _id = _random_integer()\n name = rw.random_word()\n version = rw.random_word()\n processing_details = li.get_sentences(sentences=_random_integer(start=2, stop=5))\n _S = emdb_sff.software_type(\n id=_id,\n name=name,\n version=version,\n processing_details=processing_details\n )\n S = adapter.SFFSoftware.from_gds_type(_S)\n self.assertRegex(\n _str(S),\n r\"\"\"SFFSoftware\\(id=\\d+, name=\".+\", version=\".+\", processing_details=\".+\"\\)\"\"\"\n )\n self.assertEqual(S.id, _id)\n self.assertEqual(S.name, name)\n self.assertEqual(S.version, version)\n self.assertEqual(S.processing_details, processing_details)",
"def test_create_from_gds_type(self):\n # without ids\n _TT = emdb_sff.transform_listType(self.gds_txs)\n TT = adapter.SFFTransformList.from_gds_type(_TT)\n self.assertEqual(self.tx_count, len(TT))\n self.assertEqual(len(TT.get_ids()), 0)\n # with ids\n _TT = emdb_sff.transform_listType(self.gds_txs_with_ids)\n TT = adapter.SFFTransformList.from_gds_type(_TT)\n self.assertEqual(self.tx_count, len(TT))\n self.assertEqual(list(TT.get_ids()), list(_xrange(len(TT))))",
"def test_tool_types_create(self):\n pass",
"def test_type_object_creation(self):\n\t\trestaurant_type = Type.objects.create(name=\"Test Restaurant Type\")\n\t\tself.assertIs(isinstance(restaurant_type, Type), True)\n\t\tself.assertEqual(restaurant_type.__str__(), restaurant_type.name)",
"def test_create_type_no_parent(self, app):\n\n with app.app_context():\n conn = get_connection(current_app)\n\n name = 'Book'\n desc = 'A physical or digital book'\n resp = conn.create_type(name, desc)\n\n assert type(resp) == LtpType\n assert str(resp.name) == name\n assert str(resp.description) == desc",
"def test_genre_creation(self):\n\t\tgenre = self.create_genre()\n\t\tself.assertTrue(isinstance(genre, Genre))",
"def test_type_mapping(store_session):\n\n _, session = store_session\n Thing = session.get_class(surf.ns.OWL.Thing)\n\n t1 = Thing(\"http://t1\")\n t1.surf_string_value = \"text\"\n t1.surf_bool_value = True\n t1.surf_float_value = 3.14\n t1.surf_int_value = 2010\n t1.save()\n\n t1 = Thing(\"http://t1\")\n assert type(t1.surf_string_value.first) == str\n assert type(t1.surf_bool_value.first) == bool\n assert type(t1.surf_float_value.first) == float\n assert type(t1.surf_int_value.first) == int",
"def test_by_type_field_instantiate(self):\n pet_owner = PetOwner(by_type_contact='foo@bar.com')\n self.assertEqual(pet_owner.by_type_contact, 'foo@bar.com')",
"def test_service_discovery_instance_type_post(self):\n pass",
"def test_instantiating_a_new_type_returns_expected_type():\n NewType = make_type(int, \"NewType\", [numeric.Minimum(0), numeric.Maximum(10)])\n instance = NewType(5)\n assert isinstance(instance, NewType)\n assert isinstance(instance, int)",
"def test_create(self):\n model = DescriptorModel(field=DescriptorType(value=1))\n self.assertEqual(model.field.value, 3)",
"def test_donor_type():\n donor = Donor('test')\n assert type(donor) is Donor",
"def test_parameterized_serializer_create(self):\n parent = test_serializers.ExampleTypeFieldSerializer(\n data=self.type_field_data)\n parent.is_valid(raise_exception=True)\n create_result = parent.create(validated_data=parent.validated_data)\n self.assertEqual(\n create_result, models.Person.objects.get(),\n 'Wrong type field serializer create results')",
"def create(self, validated_data):\n\t\treturn Type.objects.create(**validated_data)",
"def test_check_genotype_sanity():\n\n with pytest.raises(ValueError):\n check.genotype_sanity([\"00\",\"1\"])\n\n with pytest.raises(ValueError):\n check.genotype_sanity([[1],\"1\"])\n\n with pytest.raises(ValueError):\n check.genotype_sanity([5,\"1\"])\n\n with pytest.raises(ValueError):\n check.genotype_sanity([\"00\",\"01\"],wildtype=\"000\")\n\n # Should not throw error\n check.genotype_sanity([\"00\",\"01\"])\n check.genotype_sanity([\"00\",\"01\"],wildtype=\"00\")",
"def test_typed_key_factory(value_type):\n key = Key(identifier=IdentifierFactory(source=value_type), pseudonym=PseudonymFactory())\n\n typed_key = KeyTypeFactory().create_typed_key(key)\n assert typed_key.value_type == value_type",
"def test_Regression_dtype():\n some = \"A wrong data type of type string\" \n with pytest.raises(TypeError):\n Regression(some)",
"def sample_consumption_type(user, cons_type=\"Rain\"):\n return Consumption_type.objects.create(user=user, cons_type=cons_type)",
"def test_source_dataset_factory_create(self):\n source_dataset = factories.SourceDatasetFactory.create()\n self.assertIsInstance(source_dataset, models.SourceDataset)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can create from unicode using __init__
|
def test_create_init_unicode(self):
v = adapter.SFFVertices(
num_vertices=self.num_vertices,
mode=self.mode,
endianness=self.endian,
data=self.unicode
)
self.assertIsInstance(v, adapter.SFFVertices)
self.assertEqual(v.mode, self.mode)
self.assertEqual(v.endianness, self.endian)
self.assertEqual(v.data, adapter.SFFVertices._encode(self.data, mode=self.mode, endianness=self.endian))
self.assertEqual(v.data_array.flatten().tolist(), self.data.flatten().tolist())
if len(v.data) < 100:
_data = _decode(v.data, u"utf-8")
else:
_data = _decode(v.data[:100] + u"...", u"utf-8")
self.assertEqual(
_str(v),
u"""SFFVertices(num_vertices={}, mode="{}", endianness="{}", data="{}")""".format(
self.num_vertices,
self.mode,
self.endian,
_data
)
)
with self.assertRaisesRegex(ValueError, r".*mismatch.*stated.*retrieved.*"):
v = adapter.SFFVertices(
num_vertices=self.num_vertices * 2,
mode=self.mode,
endianness=self.endian,
data=self.bytes
)
|
[
"def _init_unicode():\n global _unicode_properties\n global _unicode_key_pattern\n _unicode_properties = _build_unicode_property_table((0x0000, 0x10FFFF))\n _unicode_key_pattern = _build_unicode_key_pattern()",
"def __init__(self, length=None, **kwargs):\n kwargs.setdefault('convert_unicode', True)\n kwargs.setdefault('assert_unicode', 'warn')\n super(UnicodeText, self).__init__(length=length, **kwargs)",
"def from_str(cls, as_str):",
"def test__literal__handles_unicode(self):\n renderer = Renderer(string_encoding='ascii')\n\n literal = renderer.literal\n\n self.assertEqual(literal(u\"foo\"), \"foo\")",
"def test_unicode_conversion():\n assert m.good_utf8_string() == \"Say utf8‽ 🎂 𝐀\"\n assert m.good_utf16_string() == \"b‽🎂𝐀z\"\n assert m.good_utf32_string() == \"a𝐀🎂‽z\"\n assert m.good_wchar_string() == \"a⸘𝐀z\"\n if hasattr(m, \"has_u8string\"):\n assert m.good_utf8_u8string() == \"Say utf8‽ 🎂 𝐀\"\n\n with pytest.raises(UnicodeDecodeError):\n m.bad_utf8_string()\n\n with pytest.raises(UnicodeDecodeError):\n m.bad_utf16_string()\n\n # These are provided only if they actually fail (they don't when 32-bit)\n if hasattr(m, \"bad_utf32_string\"):\n with pytest.raises(UnicodeDecodeError):\n m.bad_utf32_string()\n if hasattr(m, \"bad_wchar_string\"):\n with pytest.raises(UnicodeDecodeError):\n m.bad_wchar_string()\n if hasattr(m, \"has_u8string\"):\n with pytest.raises(UnicodeDecodeError):\n m.bad_utf8_u8string()\n\n assert m.u8_Z() == \"Z\"\n assert m.u8_eacute() == \"é\"\n assert m.u16_ibang() == \"‽\"\n assert m.u32_mathbfA() == \"𝐀\"\n assert m.wchar_heart() == \"♥\"\n if hasattr(m, \"has_u8string\"):\n assert m.u8_char8_Z() == \"Z\"",
"def __new__(cls,value):\r\n if type(value) is unicode:\r\n was_unicode = True\r\n if str is not unicode:\r\n value = value.encode(\"utf-8\")\r\n else:\r\n was_unicode = False\r\n if str is not bytes:\r\n raise Error(\"Don't pass bytestrings to pyenchant\")\r\n self = str.__new__(cls,value)\r\n self._was_unicode = was_unicode\r\n return self",
"def test__literal__returns_unicode(self):\n renderer = Renderer(string_encoding='ascii')\n literal = renderer.literal\n\n self.assertEqual(type(literal(\"foo\")), unicode)\n\n class MyUnicode(unicode):\n pass\n\n s = MyUnicode(\"abc\")\n\n self.assertEqual(type(s), MyUnicode)\n self.assertTrue(isinstance(s, unicode))\n self.assertEqual(type(literal(s)), unicode)",
"def testTemplateUnicode(self):\n # And they will be converted to UTF8 eventually\n template = u'We \\u2665 Python'\n self.assertEqual(self.parse(template), template.encode('UTF8'))",
"def test__literal__uses_renderer_unicode(self):\n renderer = self._make_renderer()\n renderer.unicode = mock_unicode\n\n literal = renderer.literal\n\n b = u\"foo\".encode(\"ascii\")\n self.assertEqual(literal(b), \"FOO\")",
"def from_unicode(cls, source, text_encoding='auto',\n bytes_encoding='auto'):\n\n # Give preference to pdfdocencoding, since it only\n # requires one raw byte per character, rather than two.\n if text_encoding != 'utf16':\n force_pdfdoc = text_encoding == 'pdfdocencoding'\n if text_encoding != 'auto' and not force_pdfdoc:\n raise ValueError('Invalid text_encoding value: %s'\n % text_encoding)\n\n if source.startswith(cls.bad_pdfdoc_prefix):\n if force_pdfdoc:\n raise UnicodeError('Prefix of string %r cannot be encoded '\n 'in pdfdocencoding' % source[:20])\n else:\n try:\n raw = source.encode('pdfdocencoding')\n except UnicodeError:\n if force_pdfdoc:\n raise\n else:\n return cls.from_bytes(raw, bytes_encoding)\n\n # If the user is not forcing literal strings,\n # it makes much more sense to use hexadecimal with 2-byte chars\n raw = cls.bytes_bom + source.encode('utf-16-be')\n encoding = 'hex' if bytes_encoding == 'auto' else bytes_encoding\n return cls.from_bytes(raw, encoding)",
"def test_unicode1(self):\r\n # TODO: find something that actually returns suggestions\r\n us1 = raw_unicode(r\"he\\u2149lo\")\r\n self.assertTrue(type(us1) is unicode)\r\n self.assertFalse(self.dict.check(us1))\r\n for s in self.dict.suggest(us1):\r\n self.assertTrue(type(s) is unicode)",
"def test_type_latin(self):\n self.assert_input(\n 'Failed to type latin string',\n u'Hello World')",
"def test_convert_unicode_to_string(self):\n u_string = u'test string'\n result = util.convert_unicode_to_string(u_string)\n assert result == \"test string\"",
"def test_json_unicode(self):\n unicode_string = u\"東西\"\n encoded_and_decoded_string = json.loads(json.dumps(unicode_string))\n self.assertEquals(encoded_and_decoded_string, unicode_string)\n self.failUnless(isinstance(encoded_and_decoded_string, unicode))",
"def test_default(self):\n self.assertEqual(Codec.default(), Latin1Codec())",
"def test_ensureBytesUnicode(self):\n self.assertEqual(b\"hello\", ensureBytes(u\"hello\"))",
"def test_unicode(snapshot):\n expect = u'pépère'\n snapshot.assert_match(expect)",
"def test_unicode(self):\r\n text = raw_unicode(\"\"\"I am a unicode strng with unicode erors.\"\"\")\r\n chkr = SpellChecker(\"en_US\",text)\r\n for n,err in enumerate(chkr):\r\n if n == 0:\r\n self.assertEqual(err.word,raw_unicode(\"unicode\"))\r\n self.assertEqual(err.wordpos,7)\r\n chkr.ignore_always()\r\n if n == 1:\r\n self.assertEqual(err.word,raw_unicode(\"strng\"))\r\n chkr.replace_always(\"string\")\r\n self.assertEqual(chkr._replace_words[raw_unicode(\"strng\")],raw_unicode(\"string\"))\r\n if n == 2:\r\n self.assertEqual(err.word,raw_unicode(\"erors\"))\r\n chkr.replace(\"erros\")\r\n chkr.set_offset(-6)\r\n if n == 3:\r\n self.assertEqual(err.word,raw_unicode(\"erros\"))\r\n chkr.replace(\"errors\")\r\n self.assertEqual(n,3)\r\n self.assertEqual(chkr.get_text(),raw_unicode(\"I am a unicode string with unicode errors.\"))",
"def test_training_with_unicode_bytestring(self):\n conversation = [\n 'Hi, how are you?',\n '\\xe4\\xbd\\xa0\\xe5\\xa5\\xbd\\xe5\\x90\\x97',\n 'Superb!'\n ]\n\n self.trainer.train(conversation)\n\n response = self.chatbot.get_response(conversation[1])\n\n self.assertEqual(response.text, conversation[2])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can create from gds_type
|
def test_create_from_gds_type(self):
_S = emdb_sff.software_type()
S = adapter.SFFSoftware.from_gds_type(_S)
self.assertRegex(
_str(S),
r"""SFFSoftware\(id={}, name={}, version={}, processing_details={}\)""".format(
S.id, None, None, None
)
)
self.assertIsNone(S.id)
self.assertIsNone(S.name)
self.assertIsNone(S.version)
self.assertIsNone(S.processing_details)
# no id
name = rw.random_word()
version = rw.random_word()
processing_details = li.get_sentences(sentences=_random_integer(start=2, stop=5))
_S = emdb_sff.software_type(
name=name,
version=version,
processing_details=processing_details
)
S = adapter.SFFSoftware.from_gds_type(_S)
self.assertRegex(
_str(S),
r"""SFFSoftware\(id=None, name=".+", version=".+", processing_details=".+"\)"""
)
self.assertIsNone(S.id)
self.assertEqual(S.name, name)
self.assertEqual(S.version, version)
self.assertEqual(S.processing_details, processing_details)
# with id
_id = _random_integer()
name = rw.random_word()
version = rw.random_word()
processing_details = li.get_sentences(sentences=_random_integer(start=2, stop=5))
_S = emdb_sff.software_type(
id=_id,
name=name,
version=version,
processing_details=processing_details
)
S = adapter.SFFSoftware.from_gds_type(_S)
self.assertRegex(
_str(S),
r"""SFFSoftware\(id=\d+, name=".+", version=".+", processing_details=".+"\)"""
)
self.assertEqual(S.id, _id)
self.assertEqual(S.name, name)
self.assertEqual(S.version, version)
self.assertEqual(S.processing_details, processing_details)
|
[
"def test_create_from_gds_type(self):\n _b = emdb_sff.biological_annotationType(\n name=self.name,\n description=self.description,\n number_of_instances=self.no,\n external_references=self._external_references\n )\n b = adapter.SFFBiologicalAnnotation.from_gds_type(_b)\n self.assertRegex(\n _str(b),\n r\"\"\"SFFBiologicalAnnotation\\(\"\"\" \\\n r\"\"\"name=\"{}\", description=\"{}\", \"\"\" \\\n r\"\"\"number_of_instances={}, \"\"\" \\\n r\"\"\"external_references=SFFExternalReferenceList\\(\\[.*\\]\\)\\)\"\"\".format(\n self.name,\n self.description,\n self.no\n )\n )\n self.assertEqual(b.name, self.name)\n self.assertEqual(b.description, self.description)\n self.assertEqual(b.number_of_instances, self.no)\n self.assertEqual(b.external_references, self.external_references)",
"def test_create_from_gds_type(self):\n # without ids\n _TT = emdb_sff.transform_listType(self.gds_txs)\n TT = adapter.SFFTransformList.from_gds_type(_TT)\n self.assertEqual(self.tx_count, len(TT))\n self.assertEqual(len(TT.get_ids()), 0)\n # with ids\n _TT = emdb_sff.transform_listType(self.gds_txs_with_ids)\n TT = adapter.SFFTransformList.from_gds_type(_TT)\n self.assertEqual(self.tx_count, len(TT))\n self.assertEqual(list(TT.get_ids()), list(_xrange(len(TT))))",
"def test_tool_types_create(self):\n pass",
"def test_type_object_creation(self):\n\t\trestaurant_type = Type.objects.create(name=\"Test Restaurant Type\")\n\t\tself.assertIs(isinstance(restaurant_type, Type), True)\n\t\tself.assertEqual(restaurant_type.__str__(), restaurant_type.name)",
"def test_create_type_no_parent(self, app):\n\n with app.app_context():\n conn = get_connection(current_app)\n\n name = 'Book'\n desc = 'A physical or digital book'\n resp = conn.create_type(name, desc)\n\n assert type(resp) == LtpType\n assert str(resp.name) == name\n assert str(resp.description) == desc",
"def test_type_mapping(store_session):\n\n _, session = store_session\n Thing = session.get_class(surf.ns.OWL.Thing)\n\n t1 = Thing(\"http://t1\")\n t1.surf_string_value = \"text\"\n t1.surf_bool_value = True\n t1.surf_float_value = 3.14\n t1.surf_int_value = 2010\n t1.save()\n\n t1 = Thing(\"http://t1\")\n assert type(t1.surf_string_value.first) == str\n assert type(t1.surf_bool_value.first) == bool\n assert type(t1.surf_float_value.first) == float\n assert type(t1.surf_int_value.first) == int",
"def test_service_discovery_instance_type_post(self):\n pass",
"def test_genre_creation(self):\n\t\tgenre = self.create_genre()\n\t\tself.assertTrue(isinstance(genre, Genre))",
"def test_create(self):\n model = DescriptorModel(field=DescriptorType(value=1))\n self.assertEqual(model.field.value, 3)",
"def test_by_type_field_instantiate(self):\n pet_owner = PetOwner(by_type_contact='foo@bar.com')\n self.assertEqual(pet_owner.by_type_contact, 'foo@bar.com')",
"def test_source_dataset_factory_create(self):\n source_dataset = factories.SourceDatasetFactory.create()\n self.assertIsInstance(source_dataset, models.SourceDataset)",
"def test_instantiating_a_new_type_returns_expected_type():\n NewType = make_type(int, \"NewType\", [numeric.Minimum(0), numeric.Maximum(10)])\n instance = NewType(5)\n assert isinstance(instance, NewType)\n assert isinstance(instance, int)",
"def test_donor_type():\n donor = Donor('test')\n assert type(donor) is Donor",
"def test_parameterized_serializer_create(self):\n parent = test_serializers.ExampleTypeFieldSerializer(\n data=self.type_field_data)\n parent.is_valid(raise_exception=True)\n create_result = parent.create(validated_data=parent.validated_data)\n self.assertEqual(\n create_result, models.Person.objects.get(),\n 'Wrong type field serializer create results')",
"def test_create_ds_metadata(self):\n json_sample = {\"name\": \"Leonhard Euler Party\", \"description\": \"Mathematician Guest List\", \"rows\": 0,\n \"schema\": {\n \"columns\": [{\n \"type\": \"STRING\",\n \"name\": \"Friend\"\n }, {\n \"type\": \"STRING\",\n \"name\": \"Attending\"\n }]\n }\n }\n ds_name = \"Leonhard Euler Party\"\n ds_description = \"Mathematician Guest List\"\n col_dtypes_dict = {\"Friend\": \"STRING\", \"Attending\": \"STRING\"}\n json_generated = DomoData.create_meta_string_from_user_declared(self,\n ds_name=ds_name, ds_descr=ds_description, col_types_dict=col_dtypes_dict)\n self.assertEqual(json_sample, json_generated)",
"def test_devicetype_string(self):\n devicetype = models.Devicetype.objects.create(\n device_type='Soil Moisture Probe'\n )\n\n self.assertEqual(str(devicetype), devicetype.device_type)",
"def create(self, validated_data):\n\t\treturn Type.objects.create(**validated_data)",
"def test_typed_key_factory(value_type):\n key = Key(identifier=IdentifierFactory(source=value_type), pseudonym=PseudonymFactory())\n\n typed_key = KeyTypeFactory().create_typed_key(key)\n assert typed_key.value_type == value_type",
"def test_tool_types_read(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can create from gds_types
|
def test_create_from_gds_type(self):
# without ids
_TT = emdb_sff.transform_listType(self.gds_txs)
TT = adapter.SFFTransformList.from_gds_type(_TT)
self.assertEqual(self.tx_count, len(TT))
self.assertEqual(len(TT.get_ids()), 0)
# with ids
_TT = emdb_sff.transform_listType(self.gds_txs_with_ids)
TT = adapter.SFFTransformList.from_gds_type(_TT)
self.assertEqual(self.tx_count, len(TT))
self.assertEqual(list(TT.get_ids()), list(_xrange(len(TT))))
|
[
"def test_tool_types_create(self):\n pass",
"def test_create_from_gds_type(self):\n _S = emdb_sff.software_type()\n S = adapter.SFFSoftware.from_gds_type(_S)\n self.assertRegex(\n _str(S),\n r\"\"\"SFFSoftware\\(id={}, name={}, version={}, processing_details={}\\)\"\"\".format(\n S.id, None, None, None\n )\n )\n self.assertIsNone(S.id)\n self.assertIsNone(S.name)\n self.assertIsNone(S.version)\n self.assertIsNone(S.processing_details)\n # no id\n name = rw.random_word()\n version = rw.random_word()\n processing_details = li.get_sentences(sentences=_random_integer(start=2, stop=5))\n _S = emdb_sff.software_type(\n name=name,\n version=version,\n processing_details=processing_details\n )\n S = adapter.SFFSoftware.from_gds_type(_S)\n self.assertRegex(\n _str(S),\n r\"\"\"SFFSoftware\\(id=None, name=\".+\", version=\".+\", processing_details=\".+\"\\)\"\"\"\n )\n self.assertIsNone(S.id)\n self.assertEqual(S.name, name)\n self.assertEqual(S.version, version)\n self.assertEqual(S.processing_details, processing_details)\n # with id\n _id = _random_integer()\n name = rw.random_word()\n version = rw.random_word()\n processing_details = li.get_sentences(sentences=_random_integer(start=2, stop=5))\n _S = emdb_sff.software_type(\n id=_id,\n name=name,\n version=version,\n processing_details=processing_details\n )\n S = adapter.SFFSoftware.from_gds_type(_S)\n self.assertRegex(\n _str(S),\n r\"\"\"SFFSoftware\\(id=\\d+, name=\".+\", version=\".+\", processing_details=\".+\"\\)\"\"\"\n )\n self.assertEqual(S.id, _id)\n self.assertEqual(S.name, name)\n self.assertEqual(S.version, version)\n self.assertEqual(S.processing_details, processing_details)",
"def test_create_from_gds_type(self):\n _b = emdb_sff.biological_annotationType(\n name=self.name,\n description=self.description,\n number_of_instances=self.no,\n external_references=self._external_references\n )\n b = adapter.SFFBiologicalAnnotation.from_gds_type(_b)\n self.assertRegex(\n _str(b),\n r\"\"\"SFFBiologicalAnnotation\\(\"\"\" \\\n r\"\"\"name=\"{}\", description=\"{}\", \"\"\" \\\n r\"\"\"number_of_instances={}, \"\"\" \\\n r\"\"\"external_references=SFFExternalReferenceList\\(\\[.*\\]\\)\\)\"\"\".format(\n self.name,\n self.description,\n self.no\n )\n )\n self.assertEqual(b.name, self.name)\n self.assertEqual(b.description, self.description)\n self.assertEqual(b.number_of_instances, self.no)\n self.assertEqual(b.external_references, self.external_references)",
"def test_type_mapping(store_session):\n\n _, session = store_session\n Thing = session.get_class(surf.ns.OWL.Thing)\n\n t1 = Thing(\"http://t1\")\n t1.surf_string_value = \"text\"\n t1.surf_bool_value = True\n t1.surf_float_value = 3.14\n t1.surf_int_value = 2010\n t1.save()\n\n t1 = Thing(\"http://t1\")\n assert type(t1.surf_string_value.first) == str\n assert type(t1.surf_bool_value.first) == bool\n assert type(t1.surf_float_value.first) == float\n assert type(t1.surf_int_value.first) == int",
"def test_tool_types_read(self):\n pass",
"def test_tool_types_list(self):\n pass",
"def check_type_get_example(self, in_types):\n pass",
"def test_describe_types(self):\n self.cluster.populate(1)\n self.cluster.start()\n\n create_ks_statement = \"CREATE KEYSPACE test WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : 1}\"\n create_name_type_statement = \"\"\"\nCREATE TYPE test.name_type (\n firstname text,\n lastname text\n)\"\"\"\n create_address_type_statement = \"\"\"\nCREATE TYPE test.address_type (\n name frozen<name_type>,\n number int,\n street text,\n phones set<text>\n)\"\"\"\n\n # create test keyspace and some user defined types\n self.execute(cql=create_ks_statement)\n self.execute(create_name_type_statement)\n self.execute(create_address_type_statement)\n\n # Support for non-frozen UDTs was added in CASSANDRA-7423, so the output of DESCRIBE TYPE must account for this:\n if self.cluster.version() < LooseVersion('3.6'):\n create_address_type_statement = \"\"\"\n CREATE TYPE test.address_type (\n name frozen<name_type>,\n number int,\n street text,\n phones frozen<set<text>>\n )\"\"\"\n\n # DESCRIBE user defined types\n if self.cluster.version() >= LooseVersion('3.0'):\n expected_create_name_type_statement = create_name_type_statement + ';'\n expected_create_address_type_statement = create_address_type_statement + ';'\n else:\n expected_create_name_type_statement = create_name_type_statement\n expected_create_address_type_statement = create_address_type_statement\n\n self.execute(cql='DESCRIBE TYPE test.name_type', expected_output=expected_create_name_type_statement)\n self.execute(cql='DESCRIBE TYPE test.address_type', expected_output=expected_create_address_type_statement)",
"def test_type_object_creation(self):\n\t\trestaurant_type = Type.objects.create(name=\"Test Restaurant Type\")\n\t\tself.assertIs(isinstance(restaurant_type, Type), True)\n\t\tself.assertEqual(restaurant_type.__str__(), restaurant_type.name)",
"def test_missing_data_types(self):\n with self.assertWarnsWith(UserWarning, 'No data types specified. Exiting.'):\n export_spec(self.ns_builder, [], '.')",
"def test_dtypes(self):\n np = self.compile_test('dtypes.sv')\n self.assertTrue(np.get_dtype_width('logic') == 1)\n self.assertTrue(np.get_vertex_dtype_width('dtypes.logic_bit') == 1)\n self.assertTrue(np.get_vertex_dtype_str('dtypes.logic_bit') == 'logic')\n self.assertTrue(np.get_dtype_width('packed_struct_nested3_t') == 3+4+3)\n self.assertTrue(np.get_vertex_dtype_width('dtypes.packstruct_nested3') == 3+4+3)\n self.assertTrue(np.get_vertex_dtype_str('dtypes.packstruct_nested3') == 'packed struct')\n # Check that exceptions are raised\n self.assertRaises(RuntimeError, np.get_dtype_width, 'foo')\n self.assertRaises(RuntimeError, np.get_vertex_dtype_str, 'foo')\n self.assertRaises(RuntimeError, np.get_vertex_dtype_width, 'foo')",
"def test_create_type_no_parent(self, app):\n\n with app.app_context():\n conn = get_connection(current_app)\n\n name = 'Book'\n desc = 'A physical or digital book'\n resp = conn.create_type(name, desc)\n\n assert type(resp) == LtpType\n assert str(resp.name) == name\n assert str(resp.description) == desc",
"def create_test_record_types(record_types=None):\n objects = []\n record_types = record_types if record_types else [\n \"administrative records\",\n \"board materials\",\n \"communications and publications\",\n \"grant records\",\n \"annual reports\",\n ]\n for record_type in record_types:\n object = RecordType.objects.create(name=record_type)\n objects.append(object)\n return objects",
"def test_create_ds_metadata(self):\n json_sample = {\"name\": \"Leonhard Euler Party\", \"description\": \"Mathematician Guest List\", \"rows\": 0,\n \"schema\": {\n \"columns\": [{\n \"type\": \"STRING\",\n \"name\": \"Friend\"\n }, {\n \"type\": \"STRING\",\n \"name\": \"Attending\"\n }]\n }\n }\n ds_name = \"Leonhard Euler Party\"\n ds_description = \"Mathematician Guest List\"\n col_dtypes_dict = {\"Friend\": \"STRING\", \"Attending\": \"STRING\"}\n json_generated = DomoData.create_meta_string_from_user_declared(self,\n ds_name=ds_name, ds_descr=ds_description, col_types_dict=col_dtypes_dict)\n self.assertEqual(json_sample, json_generated)",
"def test_check_genotype_sanity():\n\n with pytest.raises(ValueError):\n check.genotype_sanity([\"00\",\"1\"])\n\n with pytest.raises(ValueError):\n check.genotype_sanity([[1],\"1\"])\n\n with pytest.raises(ValueError):\n check.genotype_sanity([5,\"1\"])\n\n with pytest.raises(ValueError):\n check.genotype_sanity([\"00\",\"01\"],wildtype=\"000\")\n\n # Should not throw error\n check.genotype_sanity([\"00\",\"01\"])\n check.genotype_sanity([\"00\",\"01\"],wildtype=\"00\")",
"def test_tool_types_update(self):\n pass",
"def create_data_type():\n logger.info('Creating Data Types..')\n\n data_codes = ['DAILY', 'INTRADAY']\n data_description = ['Data for a 24 period', 'Data for a 1 minute perioo']\n\n for code, description in zip(data_codes, data_description):\n DataType.objects.update_or_create(code=code, description=description)\n\n logger.info('{} DataType created'.format(DataType.code))",
"def test_service_discovery_instance_type_post(self):\n pass",
"def test_generate_simulation_data_types(self):\n \n seq = list(simdat.generate_simulation_data_types([\"fmiString\", \\\n simdat.SimulationDataType.INTEGER, \"fmiBoolean\", \"fmiReal\"]))\n ref = [simdat.SimulationDataType.STRING, \\\n simdat.SimulationDataType.INTEGER, \\\n simdat.SimulationDataType.BOOLEAN, simdat.SimulationDataType.REAL]\n \n self.assertSequenceEqual(seq, ref)\n \n try:\n it = iter((simdat.generate_simulation_data_types([\"nope\"])))\n next(it)\n self.assertTrue(False)\n except ValueError:\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create an SFFSegmentation object with 3D volume segmentation from scratch
|
def test_create_3D(self):
segmentation = adapter.SFFSegmentation()
segmentation.name = rw.random_word()
segmentation.primary_descriptor = u"three_d_volume"
# transforms
transforms = adapter.SFFTransformList()
transforms.append(
adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12)))
)
)
transforms.append(
adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12)))
)
)
transforms.append(
adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12)))
)
)
# bounding_box
xmax = _random_integer(start=500)
ymax = _random_integer(start=500)
zmax = _random_integer(start=500)
segmentation.bounding_box = adapter.SFFBoundingBox(
xmax=xmax,
ymax=ymax,
zmax=zmax
)
# lattice container
lattices = adapter.SFFLatticeList()
# lattice 1
# binlist = numpy.array([random.randint(0, 5) for i in _xrange(20 * 20 * 20)]).reshape(20, 20, 20)
binlist = numpy.random.randint(0, 5, size=(20, 20, 20))
lattice = adapter.SFFLattice(
mode=u'uint32',
endianness=u'little',
size=adapter.SFFVolumeStructure(cols=20, rows=20, sections=20),
start=adapter.SFFVolumeIndex(cols=0, rows=0, sections=0),
data=binlist,
)
lattices.append(lattice)
# lattice 2
# binlist2 = numpy.array([random.random() * 100 for i in _xrange(30 * 40 * 50)]).reshape(30, 40, 50)
binlist2 = numpy.random.rand(30, 40, 50) * 100
lattice2 = adapter.SFFLattice(
mode=u'float32',
endianness=u'big',
size=adapter.SFFVolumeStructure(cols=30, rows=40, sections=50),
start=adapter.SFFVolumeIndex(cols=-50, rows=-40, sections=100),
data=binlist2,
)
lattices.append(lattice2)
# segments
segments = adapter.SFFSegmentList()
# segment one
segment = adapter.SFFSegment(colour=adapter.SFFRGBA(random_colour=True))
vol1_value = 1
segment.three_d_volume = adapter.SFFThreeDVolume(
lattice_id=0,
value=vol1_value,
)
segments.append(segment)
# segment two
segment = adapter.SFFSegment(colour=adapter.SFFRGBA(random_colour=True))
vol2_value = 37.1
segment.three_d_volume = adapter.SFFThreeDVolume(
lattice_id=1,
value=vol2_value
)
# add segment to segments
segments.append(segment)
segmentation.transforms = transforms
segmentation.segments = segments
segmentation.lattices = lattices
# export
# self.stderr(segmentation)
# self.stderrj(segmentation.as_json())
segmentation.export(self.three_d_volume_file)
# assertions
self.assertRegex(
_str(segmentation),
r"""SFFSegmentation\(name="\w+", version="{}"\)""".format(
EMDB_SFF_VERSION
)
)
self.assertEqual(segmentation.primary_descriptor, u"three_d_volume")
self.assertEqual(segmentation.bounding_box.xmin, 0)
self.assertEqual(segmentation.bounding_box.xmax, xmax)
self.assertEqual(segmentation.bounding_box.ymin, 0)
self.assertEqual(segmentation.bounding_box.ymax, ymax)
self.assertEqual(segmentation.bounding_box.zmin, 0)
self.assertEqual(segmentation.bounding_box.zmax, zmax)
# test the number of transforms
self.assertTrue(len(segmentation.transforms) > 0)
# test the transform IDs
t_ids = map(lambda t: t.id, segmentation.transforms)
self.assertCountEqual(t_ids, range(3))
# segments
self.assertEqual(len(segmentation.segments), 2)
# segment one
segment = segmentation.segments[0]
# volume
self.assertEqual(segment.three_d_volume.lattice_id, 0)
self.assertEqual(segment.three_d_volume.value, vol1_value)
# segment two
segment = segmentation.segments.get_by_id(2)
# volume
self.assertEqual(segment.three_d_volume.lattice_id, 1)
self.assertEqual(segment.three_d_volume.value, vol2_value)
# lattices
lattices = segmentation.lattices
self.assertEqual(len(lattices), 2)
# lattice one
lattice1 = lattices.get_by_id(0)
self.assertEqual(lattice1.mode, u'uint32')
self.assertEqual(lattice1.endianness, u'little')
self.assertCountEqual(lattice1.size.value, (20, 20, 20))
self.assertCountEqual(lattice1.start.value, (0, 0, 0))
# lattice two
self.assertEqual(lattice2.mode, u'float32')
self.assertEqual(lattice2.endianness, u'big')
self.assertCountEqual(lattice2.size.value, (30, 40, 50))
self.assertCountEqual(lattice2.start.value, (-50, -40, 100))
|
[
"def test_create_3D(self):\n segmentation = schema.SFFSegmentation() # 3D volume\n segmentation.primaryDescriptor = \"threeDVolume\"\n # transforms\n transforms = schema.SFFTransformList()\n transforms.add_transform(\n schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12)))\n )\n )\n transforms.add_transform(\n schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12)))\n )\n )\n transforms.add_transform(\n schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12)))\n )\n )\n # boundingBox\n xmax = _random_integer(start=500)\n ymax = _random_integer(start=500)\n zmax = _random_integer(start=500)\n segmentation.boundingBox = schema.SFFBoundingBox(\n xmax=xmax,\n ymax=ymax,\n zmax=zmax\n )\n # lattice container\n lattices = schema.SFFLatticeList()\n # lattice 1\n binlist = numpy.array([random.randint(0, 5) for i in _xrange(20 * 20 * 20)])\n lattice = schema.SFFLattice(\n mode='uint32',\n endianness='little',\n size=schema.SFFVolumeStructure(cols=20, rows=20, sections=20),\n start=schema.SFFVolumeIndex(cols=0, rows=0, sections=0),\n data=binlist,\n )\n lattices.add_lattice(lattice)\n # lattice 2\n binlist2 = numpy.array([random.random() * 100 for i in _xrange(30 * 40 * 50)])\n lattice2 = schema.SFFLattice(\n mode='float32',\n endianness='big',\n size=schema.SFFVolumeStructure(cols=30, rows=40, sections=50),\n start=schema.SFFVolumeIndex(cols=-50, rows=-40, sections=100),\n data=binlist2,\n )\n lattices.add_lattice(lattice2)\n # segments\n segments = schema.SFFSegmentList()\n # segment one\n segment = schema.SFFSegment()\n vol1_value = 1\n segment.volume = schema.SFFThreeDVolume(\n latticeId=0,\n value=vol1_value,\n )\n segments.add_segment(segment)\n # segment two\n segment = schema.SFFSegment()\n vol2_value = 37.1\n segment.volume = schema.SFFThreeDVolume(\n latticeId=1,\n value=vol2_value\n )\n # add segment to segments\n segments.add_segment(segment)\n segmentation.transforms = transforms\n segmentation.segments = segments\n segmentation.lattices = lattices\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, 'sff', 'v0.7', 'test_3d_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.primaryDescriptor, \"threeDVolume\")\n self.assertEqual(segmentation.boundingBox.xmin, 0)\n self.assertEqual(segmentation.boundingBox.xmax, xmax)\n self.assertEqual(segmentation.boundingBox.ymin, 0)\n self.assertEqual(segmentation.boundingBox.ymax, ymax)\n self.assertEqual(segmentation.boundingBox.zmin, 0)\n self.assertEqual(segmentation.boundingBox.zmax, zmax)\n # test the number of transforms\n self.assertEqual(len(segmentation.transforms), 3)\n # test the transform IDs\n t_ids = map(lambda t: t.id, segmentation.transforms)\n self.assertCountEqual(t_ids, range(3))\n # segments\n self.assertEqual(len(segmentation.segments), 2)\n # segment one\n segment = segmentation.segments[0]\n # volume\n self.assertEqual(segment.volume.latticeId, 0)\n self.assertEqual(segment.volume.value, vol1_value)\n # segment two\n segment = segmentation.segments.get_by_id(2)\n # volume\n self.assertEqual(segment.volume.latticeId, 1)\n self.assertEqual(segment.volume.value, vol2_value)\n # lattices\n lattices = segmentation.lattices\n self.assertEqual(len(lattices), 2)\n # lattice one\n lattice1 = lattices.get_by_id(0)\n self.assertEqual(lattice1.mode, 'uint32')\n self.assertEqual(lattice1.endianness, 'little')\n self.assertCountEqual(lattice1.size.value, (20, 20, 20))\n self.assertCountEqual(lattice1.start.value, (0, 0, 0))\n # lattice two\n self.assertEqual(lattice2.mode, 'float32')\n self.assertEqual(lattice2.endianness, 'big')\n self.assertCountEqual(lattice2.size.value, (30, 40, 50))\n self.assertCountEqual(lattice2.start.value, (-50, -40, 100))",
"def create_cuboid_segmentation(env, param, save=False, file_path=None, segmentation_class=None):\n centers = []\n length_part, width_part, height_part = param\n length, width, height = env[\"params\"][\"length\"], env[\"params\"][\"width\"], env[\"params\"][\"height\"]\n seg_length = length / length_part\n seg_width = width / width_part\n seg_height = height / height_part\n for l_p in range(length_part):\n for w_p in range(width_part):\n for h_p in range(height_part):\n centers.append([\n l_p,\n w_p,\n h_p,\n seg_length * (l_p + 0.5),\n seg_width * (w_p + 0.5),\n seg_height * (h_p + 0.5)\n ])\n centers = np.array(centers)\n if save:\n if file_path is not None and segmentation_class is not None:\n np.save(os.path.join(file_path, segmentation_class), centers)\n else:\n print(\"Segmentations not saved, please provide path and class-name\")\n return centers",
"def HfsSegment_create(height, width, segEgbThresholdI=None, minRegionSizeI=None, segEgbThresholdII=None, minRegionSizeII=None, spatialWeight=None, slicSpixelSize=None, numSlicIter=None): # real signature unknown; restored from __doc__\n pass",
"def setup_object_segmentation(self):\n\t\tself.create_mask_for_object_with_color(self.obj_name_+\":Mesh\", [1, 1, 1])\n\n\t\tif self.use_table_:\n\t\t\tcmd.nurbsPlane(name=self.plane_name_, p=(0,0,0), ax=(0,0,1), w=10000, lr=1, d=3, u=1, v=1, ch=1)\n\t\t\tself.create_mask_for_object_with_color(self.plane_name_, [1, 0, 0])",
"def lung_segmentation(patient_dir):\n\n \"\"\" LOAD THE IMAGE \"\"\"\n\n # Initialize image and get dcm files\n dcm_list = glob(patient_dir + '/*.dcm')\n img = np.zeros((len(dcm_list), 512, 512), dtype='float32')\n z = []\n\n # For each dcm file, get the corresponding slice, normalize HU values, and store the Z position of the slice\n for i, f in enumerate(dcm_list):\n dcm = dicom.read_file(f)\n img[i] = float(dcm.RescaleSlope) * dcm.pixel_array.astype('float32') + float(dcm.RescaleIntercept)\n z.append(dcm.ImagePositionPatient[-1])\n\n # Get spacing and reorder slices\n spacing = map(float, dcm.PixelSpacing) + [np.median(np.diff(np.sort(z)))]\n img = img[np.argsort(z)]\n\n \"\"\" NORMALIZE HU AND RESOLUTION \"\"\"\n\n # Clip and normalize\n img = np.clip(img, -1024, 4000)\n img = (img + 1024.) / (4000 + 1024.)\n\n # Rescale 1mm x 1mm x 1mm\n new_shape = map(lambda x, y: int(x * y), img.shape, spacing[::-1])\n img = resize(img, new_shape, preserve_range=True)\n\n \"\"\" SEGMENT LUNGS USING THRESHOLDING + MORPHOLOGY + SIMPLE RULES \"\"\"\n\n # Threshold the image\n middle = img.shape[0] / 2\n data = img[middle].flatten()\n data = data[data > 0][:, None]\n kmeans = KMeans(n_clusters=2).fit(data)\n threshold = np.mean(kmeans.cluster_centers_.flatten())\n thresh_img = np.where(img < threshold, 1.0, 0.0)\n thresh_img[img == 0.] = 0.\n\n # Clean the image\n thresh_img = morphology.binary_erosion(thresh_img, np.ones([3, 3, 3]))\n\n # Detect connexity\n labels = measure.label(thresh_img)\n regions = measure.regionprops(labels)\n good_labels = []\n\n regions = filter(lambda x: x.area > 500000, regions)\n\n for prop in regions:\n B = prop.bbox\n lim = img.shape[1] / 3\n area_center = np.sum((labels == prop.label)[:, lim:2 * lim, :])\n\n # Big enough area (1,2,3), not too close to the image border, and with most area in the center\n if B[5] - B[2] > 1 / 4. * img.shape[2] \\\n and B[3] - B[0] > 1 / 4. * img.shape[0] \\\n and np.sum(B[:3]) > 10 \\\n and area_center > 0.3 * prop.area:\n good_labels.append(prop.label)\n\n lungmask = np.sum([labels == i for i in good_labels], axis=0)\n\n # Get the entire lung with a big dilation (should use ball(15) but it's too slow)\n for i in range(6):\n lungmask = morphology.binary_dilation(lungmask, np.ones((5, 5, 5)))\n for i in range(4):\n lungmask = morphology.binary_erosion(lungmask, np.ones((5, 5, 5)))\n\n \"\"\" CENTER AND PAD TO GET SHAPE (384, 288, 384) \"\"\"\n\n # Center the image\n\n sum_x = np.sum(lungmask, axis=(0, 1))\n sum_y = np.sum(lungmask, axis=(0, 2))\n sum_z = np.sum(lungmask, axis=(1, 2))\n\n mx = np.nonzero(sum_x)[0][0]\n Mx = len(sum_x) - np.nonzero(sum_x[::-1])[0][0]\n my = np.nonzero(sum_y)[0][0]\n My = len(sum_y) - np.nonzero(sum_y[::-1])[0][0]\n mz = np.nonzero(sum_z)[0][0]\n Mz = len(sum_z) - np.nonzero(sum_z[::-1])[0][0]\n\n img = img * lungmask\n img = img[mz:Mz, my:My, mx:Mx]\n\n # Pad the image to (384, 288, 384)\n nz, nr, nc = img.shape\n\n pad1 = int((384 - nz) / 2)\n pad2 = 384 - nz - pad1\n pad3 = int((288 - nr) / 2)\n pad4 = 288 - nr - pad3\n pad5 = int((384 - nc) / 2)\n pad6 = 384 - nc - pad5\n\n # Crop images too big\n if pad1 < 0:\n img = img[:, -pad1:384 - pad2]\n pad1 = pad2 = 0\n if img.shape.shape[0] == 383:\n pad1 = 1\n\n if pad3 < 0:\n img = img[:, :, -pad3:288 - pad4]\n pad3 = pad4 = 0\n if img.shape.shape[1] == 287:\n pad3 = 1\n\n if pad5 < 0:\n img = img[:, :, -pad5:384 - pad6]\n pad5 = pad6 = 0\n if img.shape.shape[2] == 383:\n pad5 = 1\n\n # Pad\n img = np.pad(img, pad_width=((pad1 - 4, pad2 + 4), (pad3, pad4), (pad5, pad6)), mode='constant')\n # The -4 / +4 is here for \"historical\" reasons, but it can be removed\n\n return img",
"def segment(stack):\n mask = threshold_otsu(stack)\n mask = remove_small_objects_in_plane(mask, min_size=1000)\n mask = pseudo_convex_hull(mask)\n stack = identity(stack)\n stack = filter_median(stack)\n stack = gradient_magnitude(stack)\n stack = discrete_gaussian_filter(stack, 2.0)\n stack = morphological_watershed(stack, 0.664)\n identity(stack.view(PrettyColorImage3D))\n stack = filter_cells_outside_mask(stack, mask)\n stack = remove_border_segmentations(stack)\n identity(stack.view(PrettyColorImage3D))\n return stack",
"def create_spherical_seg_3d(\n radius: float = 20.0,\n centre: Tuple[int, int, int] = (49, 49, 49),\n labelfield_value: int = 1,\n background_value: int = 0,\n im_shape: Tuple[int, int, int] = (99, 99, 99),\n) -> np.ndarray:\n # Create image\n image = np.zeros(im_shape, dtype=np.int32)\n spy, spx, spz = np.ogrid[\n -centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]\n ]\n circle = (spx * spx + spy * spy + spz * spz) <= radius * radius\n\n image[circle] = labelfield_value\n image[~circle] = background_value\n return image",
"def __init__(self,\n data,\n npz_path,\n mode='train',\n min_overlap_score=0.4,\n img_resize=None,\n df=None,\n img_padding=False,\n depth_padding=False,\n augment_fn=None,\n **kwargs):\n super().__init__()\n # self.root_dir = root_dir\n self.mode = mode\n\n # prepare scene_info and pair_info\n if mode == 'test' and min_overlap_score != 0:\n logger.warning(\"You are using `min_overlap_score`!=0 in test mode. Set to 0.\")\n min_overlap_score = 0\n\n # parameters for image resizing, padding and depthmap padding\n if mode == 'train':\n assert img_resize is not None and img_padding and depth_padding\n self.img_resize = img_resize\n self.df = df\n self.img_padding = img_padding\n self.depth_max_size = 2000 if depth_padding else None # the upperbound of depthmaps size in megadepth.\n\n # for training LoFTR\n self.augment_fn = augment_fn if mode == 'train' else None\n self.coarse_scale = getattr(kwargs, 'coarse_scale', 0.125)\n self.path1 = data[\"path1\"].values\n self.path2 = data[\"path2\"].values\n self.H_matrix = data[\"H_matrix\"].values\n # self.camerainst2 = data[\"camerainst2\"].values\n # self.rot1 = data[\"rot1\"].values\n # self.rot2 = data[\"rot2\"].values\n # self.trans1 = data[\"trans1\"].values\n # self.trans2 = data[\"trans2\"].values\n gc.collect()",
"def view_instance_seg_dataset(dataset, n_mask_class=2):\n\n def visualize_func(dataset, index):\n img, bboxes, labels, lbls = dataset[index]\n return visualize_instance_segmentation(\n img,\n bboxes,\n labels,\n lbls,\n dataset.class_names,\n n_mask_class=n_mask_class\n )\n\n return view_dataset(dataset, visualize_func)",
"def build_seg_model():\n port_seg = PortraitSegmenter(down_depth=[1,2,2], num_levels=3, up_depth=[1,1],\n filters=[16,24,32],endchannels=[8,1])\n stored_file = \"../models/portraitCElight.pth\"\n port_seg.load_state_dict(torch.load(stored_file, map_location=torch.device('cpu')))\n port_seg.eval()\n port_seg.fuse()\n port_seg.eval()\n return port_seg",
"def run_segmentation_pipeline(session_location, options, master_logger):\n # read grayscale\n if options.image_stack is None:\n raise Exception(\"Must specify path to grayscale in 'image-stack'\")\n\n # run boundary prediction -- produces a prediction file\n if options.gen_pixel:\n prediction_file = pixel.gen_pixel_probabilities(session_location, options, master_logger, \n options.image_stack)\n else:\n prediction_file = options.pixelprob_file\n \n\n # generate supervoxels -- produces supervoxels and output as appropriate\n supervoxels = None\n prediction = None\n if options.gen_supervoxels:\n supervoxels, prediction = gen_supervoxels(options, prediction_file, master_logger) \n elif options.supervoxels_file:\n master_logger.info(\"Reading supervoxels: \" + options.supervoxels_file)\n supervoxels = imio.read_image_stack(options.supervoxels_file) \n #supervoxels = imio.read_mapped_segmentation(options.supervoxels_file) \n master_logger.info(\"Finished reading supervoxels\")\n\n # write superpixels out to hdf5 and/or raveler files\n sps_out = None\n image_stack = None\n\n if options.raveler_output:\n image_stack = imio.read_image_stack(options.image_stack)\n if options.h5_output:\n imio.write_image_stack(supervoxels,\n session_location + \"/\" + options.supervoxels_name)\n\n \"\"\"\n if supervoxels is not None:\n if options.h5_output:\n imio.write_image_stack(supervoxels,\n session_location + \"/\" + options.supervoxels_name, compression='lzf')\n\n if options.raveler_output:\n image_stack = imio.read_image_stack(options.image_stack)\n sps_out = output_raveler(supervoxels, supervoxels, image_stack, \n \"supervoxels\", session_location, master_logger)\n if options.synapse_file is not None:\n shutil.copyfile(options.synapse_file,\n session_location + \"/raveler-export/supervoxels/annotations-synapse.json\") \n \"\"\"\n\n # agglomerate and generate output\n if options.gen_agglomeration:\n if prediction is None and prediction_file is not None:\n master_logger.info(\"Reading pixel prediction: \" + prediction_file)\n prediction = imio.read_image_stack(prediction_file, \n group=PREDICTIONS_HDF5_GROUP)\n prediction = prediction.transpose((2, 1, 0, 3))\n master_logger.info(\"Finished reading pixel prediction\")\n elif prediction is None:\n raise Exception(\"No pixel probs available for agglomeration\")\n\n flow_perform_agglomeration(options, supervoxels, prediction, image_stack,\n session_location, sps_out, master_logger)",
"def test_create_shapes(self):\n segmentation = adapter.SFFSegmentation()\n segmentation.name = rw.random_word()\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=rw.random_word(),\n version=rw.random_word(),\n processingDetails=li.get_sentence(),\n )\n )\n segmentation.primary_descriptor = u\"shape_primitive_list\"\n transforms = adapter.SFFTransformList()\n segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n # shapes\n shapes = adapter.SFFShapePrimitiveList()\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n cylinder = adapter.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transformId=transform.id,\n )\n shapes.append(cylinder)\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n ellipsoid = adapter.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n shapes.append(ellipsoid)\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n ellipsoid2 = adapter.SFFEllipsoid(x=_random_float() * 100, y=_random_float() * 100, z=_random_float() * 100,\n transformId=transform.id, )\n shapes.append(ellipsoid2)\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottom_radius=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n segment.shape_primitive_list = shapes\n segments.append(segment)\n # more shapes\n segment = adapter.SFFSegment()\n # shapes\n shapes = adapter.SFFShapePrimitiveList()\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottom_radius=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottom_radius=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottom_radius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n segment.shape_primitive_list = shapes\n segments.append(segment)\n segmentation.segments = segments\n segmentation.transforms = transforms\n # export\n segmentation.export(self.shape_file)\n # assertions\n self.assertEqual(len(segment.shape_primitive_list), 9)\n self.assertEqual(segment.shape_primitive_list.num_cones, 4)\n self.assertEqual(segment.shape_primitive_list.num_cylinders, 1)\n self.assertEqual(segment.shape_primitive_list.num_cuboids, 2)\n self.assertEqual(segment.shape_primitive_list.num_ellipsoids, 2)",
"def create_skeleton_layer(s3_bucket, skel_res, img_dims, num_res=7):\n # create cloudvolume info\n info = CloudVolume.create_new_info(\n num_channels=1,\n layer_type=\"segmentation\",\n data_type=\"uint64\", # Channel images might be 'uint8'\n encoding=\"raw\", # raw, jpeg, compressed_segmentation, fpzip, kempressed\n # Voxel scaling, units are in nanometers\n resolution=skel_res,\n voxel_offset=[0, 0, 0], # x,y,z offset in voxels from the origin\n # Pick a convenient size for your underlying chunk representation\n # Powers of two are recommended, doesn't need to cover image exactly\n chunk_size=[int(i / 4) for i in img_dims],\n # chunk_size=[128, 128, 64], # units are voxels\n volume_size=[i * 2 ** (num_res - 1) for i in img_dims], # units are voxels\n skeletons=\"skeletons\",\n )\n skel_info = {\n \"@type\": \"neuroglancer_skeletons\",\n \"transform\": [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],\n \"vertex_attributes\": [\n {\"id\": \"radius\", \"data_type\": \"float32\", \"num_components\": 1},\n {\"id\": \"vertex_types\", \"data_type\": \"float32\", \"num_components\": 1},\n {\"id\": \"vertex_color\", \"data_type\": \"float32\", \"num_components\": 4},\n ],\n }\n # get cloudvolume info\n vol = CloudVolume(s3_bucket, info=info, parallel=True)\n [vol.add_scale((2 ** i, 2 ** i, 2 ** i)) for i in range(num_res)] # num_res - 1\n vol.commit_info()\n\n # upload skeleton info to /skeletons/ dir\n with storage.SimpleStorage(vol.cloudpath) as stor:\n stor.put_json(str(Path(\"skeletons\") / \"info\"), skel_info)\n\n return vol",
"def onFetch2Button(self):\n print('Fetching Brain tumor Segmentation Data ...............')\n \n #Clear the scene\n slicer.mrmlScene.Clear()\n \n # Load master volume\n sampleDataLogic = SampleData.SampleDataLogic()\n masterVolumeNode = sampleDataLogic.downloadMRBrainTumor1()\n\n # Create segmentation\n segmentationNode = slicer.vtkMRMLSegmentationNode()\n slicer.mrmlScene.AddNode(segmentationNode)\n segmentationNode.CreateDefaultDisplayNodes() # only needed for display\n segmentationNode.SetReferenceImageGeometryParameterFromVolumeNode(masterVolumeNode)\n\n # Create seed segment inside tumor\n tumorSeed = vtk.vtkSphereSource()\n tumorSeed.SetCenter(-6, 30, 28)\n tumorSeed.SetRadius(10)\n tumorSeed.Update()\n segmentationNode.AddSegmentFromClosedSurfaceRepresentation(tumorSeed.GetOutput(), \"Tumor\", [1.0,0.0,0.0])\n\n # Create seed segment inside tumor 2\n referenceSeed = vtk.vtkSphereSource()\n referenceSeed.SetCenter(-6, -50, -10)\n referenceSeed.SetRadius(20)\n referenceSeed.Update()\n segmentationNode.AddSegmentFromClosedSurfaceRepresentation(referenceSeed.GetOutput(), \"Reference\", [0.0,0.0,1.0])\n\n # Create seed segment outside tumor\n backgroundSeedPositions = [[0,65,32], [1, -14, 30], [0, 28, -7], [0,30,64], [31, 33, 27], [-42, 30, 27]]\n append = vtk.vtkAppendPolyData()\n for backgroundSeedPosition in backgroundSeedPositions:\n backgroundSeed = vtk.vtkSphereSource()\n backgroundSeed.SetCenter(backgroundSeedPosition)\n backgroundSeed.SetRadius(10)\n backgroundSeed.Update()\n append.AddInputData(backgroundSeed.GetOutput())\n\n append.Update()\n backgroundSegmentId = segmentationNode.AddSegmentFromClosedSurfaceRepresentation(append.GetOutput(), \"Background\", [0.0,1.0,0.0])\n\n # Perform analysis\n ################################################\n\n # Create segment editor to get access to effects\n segmentEditorWidget = slicer.qMRMLSegmentEditorWidget()\n # To show segment editor widget (useful for debugging): segmentEditorWidget.show()\n segmentEditorWidget.setMRMLScene(slicer.mrmlScene)\n segmentEditorNode = slicer.vtkMRMLSegmentEditorNode()\n slicer.mrmlScene.AddNode(segmentEditorNode)\n segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)\n segmentEditorWidget.setSegmentationNode(segmentationNode)\n segmentEditorWidget.setMasterVolumeNode(masterVolumeNode)\n\n # Set up masking parameters\n segmentEditorWidget.setActiveEffectByName(\"Mask volume\")\n effect = segmentEditorWidget.activeEffect()\n # set fill value to be outside the valid intensity range\n intensityRange = masterVolumeNode.GetImageData().GetScalarRange()\n effect.setParameter(\"FillValue\", str(intensityRange[0]-1))\n # Blank out voxels that are outside the segment\n effect.setParameter(\"Operation\", \"FILL_OUTSIDE\")\n # Create a volume that will store temporary masked volumes\n maskedVolume = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLScalarVolumeNode\", \"Temporary masked volume\")\n effect.self().outputVolumeSelector.setCurrentNode(maskedVolume)\n \n print('Brain tumor Segmentation Data Fetched Successfully...........')",
"def test_create_shapes(self):\n segmentation = schema.SFFSegmentation()\n segmentation.primaryDescriptor = \"shapePrimitiveList\"\n transforms = schema.SFFTransformList()\n segments = schema.SFFSegmentList()\n segment = schema.SFFSegment()\n # shapes\n shapes = schema.SFFShapePrimitiveList()\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n segment.shapes = shapes\n segments.add_segment(segment)\n # more shapes\n segment = schema.SFFSegment()\n # shapes\n shapes = schema.SFFShapePrimitiveList()\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n segment.shapes = shapes\n segments.add_segment(segment)\n segmentation.segments = segments\n segmentation.transforms = transforms\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, 'sff', 'v0.7', 'test_shape_segmentation.sff'))\n # assertions\n self.assertEqual(len(segment.shapes), 9)\n self.assertEqual(segment.shapes.numCones, 4)\n self.assertEqual(segment.shapes.numCylinders, 1)\n self.assertEqual(segment.shapes.numCuboids, 2)\n self.assertEqual(segment.shapes.numEllipsoids, 2)",
"def skeletonEmbed(segmentationResolution=int, segmentationMethod=int, mergedMesh=bool):\n pass",
"def segment_frame( self, oid, f ):\n assert oid>=0\n assert oid<len(self.object_names)\n assert f>=0\n assert f<len(self.images)\n \n try:\n self.netsurfs[oid][f] = None\n except:\n print('LAZY INIT NETSURFS')\n self.netsurfs[oid] = [None] * len(self.images)\n \n self.netsurfs[oid][f] = NetSurf2d(self.num_columns, K=self.K, max_delta_k=self.max_delta_k)\n optimum = self.netsurfs[oid][f].apply_to(self.images[f], \n self.object_seedpoints[oid][f], \n self.object_max_surf_dist[oid][f], \n min_radius=self.object_min_surf_dist[oid][f])\n self.object_areas[oid][f] = self.netsurfs[oid][f].get_area( self.pixelsize )\n if not self.silent:\n print(' Optimum energy: ', optimum)\n ins, outs = self.netsurfs[oid][f].get_counts()\n print(' Nodes in/out: ', ins, outs)\n print(' Area: ', self.object_areas[oid][f])",
"def segmentMap_fz(self,*args, scale=100.0, sigma=0.95, min_size=50, plot = True, contrast = False, set_default = False): #will slow/crash your laptop\n if contrast:\n img = self.inc_contrast(*args,plot=False) \n else:\n img, band_names = self._call_channels(*args)\n if len(img.shape) == 2:\n img = np.repeat(img[:,:, np.newaxis], 3, axis=2) \n segments_fz = segmentMap_fz(img, name=self.name, mask=self._mask,scale=scale, sigma=sigma, min_size=min_size, plot = plot)\n segments_fz.resolution = self.resolution\n if set_default:\n self._segments['Felzenszwalb'] = segments_fz\n return segments_fz",
"def FC_SVD_compression(layer):\n # trunc = layer.weight.data.numpy().shape[0]\n trunc = 15\n weights1, weights2 = SVD_weights(layer.weight.data.cpu().numpy().T, trunc)\n\n # create SVD FC-layers:\n fc1 = torch.nn.Linear(weights1.shape[0], weights1.shape[1])\n fc2 = torch.nn.Linear(weights2.shape[0], weights2.shape[1])\n print('created: ')\n print(fc1)\n print(fc2)\n\n fc1.weight.data = torch.from_numpy(np.float32(weights1))\n fc2.weight.data = torch.from_numpy(np.float32(weights2))\n new_layers = [fc1, fc2]\n return nn.Sequential(*new_layers)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can create a segmentation of shapes programmatically
|
def test_create_shapes(self):
segmentation = adapter.SFFSegmentation()
segmentation.name = rw.random_word()
segmentation.software_list = adapter.SFFSoftwareList()
segmentation.software_list.append(
adapter.SFFSoftware(
name=rw.random_word(),
version=rw.random_word(),
processingDetails=li.get_sentence(),
)
)
segmentation.primary_descriptor = u"shape_primitive_list"
transforms = adapter.SFFTransformList()
segments = adapter.SFFSegmentList()
segment = adapter.SFFSegment()
# shapes
shapes = adapter.SFFShapePrimitiveList()
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCone(
height=_random_float() * 100,
bottomRadius=_random_float() * 100,
transformId=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCone(
height=_random_float() * 100,
bottomRadius=_random_float() * 100,
transformId=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCone(
height=_random_float() * 100,
bottomRadius=_random_float() * 100,
transformId=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCuboid(
x=_random_float() * 100,
y=_random_float() * 100,
z=_random_float() * 100,
transformId=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCuboid(
x=_random_float() * 100,
y=_random_float() * 100,
z=_random_float() * 100,
transformId=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
cylinder = adapter.SFFCylinder(
height=_random_float() * 100,
diameter=_random_float() * 100,
transformId=transform.id,
)
shapes.append(cylinder)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
ellipsoid = adapter.SFFEllipsoid(
x=_random_float() * 100,
y=_random_float() * 100,
z=_random_float() * 100,
transformId=transform.id,
)
shapes.append(ellipsoid)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
ellipsoid2 = adapter.SFFEllipsoid(x=_random_float() * 100, y=_random_float() * 100, z=_random_float() * 100,
transformId=transform.id, )
shapes.append(ellipsoid2)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCone(
height=_random_float() * 100,
bottom_radius=_random_float() * 100,
transform_id=transform.id,
)
)
segment.shape_primitive_list = shapes
segments.append(segment)
# more shapes
segment = adapter.SFFSegment()
# shapes
shapes = adapter.SFFShapePrimitiveList()
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCone(
height=_random_float() * 100,
bottom_radius=_random_float() * 100,
transform_id=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCone(
height=_random_float() * 100,
bottom_radius=_random_float() * 100,
transform_id=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCone(
height=_random_float() * 100,
bottom_radius=_random_float() * 100,
transformId=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCuboid(
x=_random_float() * 100,
y=_random_float() * 100,
z=_random_float() * 100,
transform_id=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCuboid(
x=_random_float() * 100,
y=_random_float() * 100,
z=_random_float() * 100,
transform_id=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCylinder(
height=_random_float() * 100,
diameter=_random_float() * 100,
transform_id=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFEllipsoid(
x=_random_float() * 100,
y=_random_float() * 100,
z=_random_float() * 100,
transform_id=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFEllipsoid(
x=_random_float() * 100,
y=_random_float() * 100,
z=_random_float() * 100,
transform_id=transform.id,
)
)
transform = adapter.SFFTransformationMatrix(
rows=3,
cols=4,
data=" ".join(map(_str, range(12))),
)
transforms.append(transform)
shapes.append(
adapter.SFFCone(
height=_random_float() * 100,
bottomRadius=_random_float() * 100,
transform_id=transform.id,
)
)
segment.shape_primitive_list = shapes
segments.append(segment)
segmentation.segments = segments
segmentation.transforms = transforms
# export
segmentation.export(self.shape_file)
# assertions
self.assertEqual(len(segment.shape_primitive_list), 9)
self.assertEqual(segment.shape_primitive_list.num_cones, 4)
self.assertEqual(segment.shape_primitive_list.num_cylinders, 1)
self.assertEqual(segment.shape_primitive_list.num_cuboids, 2)
self.assertEqual(segment.shape_primitive_list.num_ellipsoids, 2)
|
[
"def test_create_shapes(self):\n segmentation = schema.SFFSegmentation()\n segmentation.primaryDescriptor = \"shapePrimitiveList\"\n transforms = schema.SFFTransformList()\n segments = schema.SFFSegmentList()\n segment = schema.SFFSegment()\n # shapes\n shapes = schema.SFFShapePrimitiveList()\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n segment.shapes = shapes\n segments.add_segment(segment)\n # more shapes\n segment = schema.SFFSegment()\n # shapes\n shapes = schema.SFFShapePrimitiveList()\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n segment.shapes = shapes\n segments.add_segment(segment)\n segmentation.segments = segments\n segmentation.transforms = transforms\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, 'sff', 'v0.7', 'test_shape_segmentation.sff'))\n # assertions\n self.assertEqual(len(segment.shapes), 9)\n self.assertEqual(segment.shapes.numCones, 4)\n self.assertEqual(segment.shapes.numCylinders, 1)\n self.assertEqual(segment.shapes.numCuboids, 2)\n self.assertEqual(segment.shapes.numEllipsoids, 2)",
"def segmentation(\n img,\n segmentation,\n title=None,\n save=None,\n figsize=(20, 10),\n linewidth=2,\n edgecolor=\"red\",\n):\n fig, ax = plt.subplots(figsize=figsize)\n ax.imshow(img)\n for i in range(len(segmentation)):\n s = segmentation[i]\n s.calculate_properties()\n cent = s.centroid\n patch = mpatches.Rectangle(\n (s.xrange[0], s.yrange[0]),\n s.xdiam,\n s.ydiam,\n fill=False,\n edgecolor=edgecolor,\n linewidth=linewidth,\n )\n ax.add_patch(patch)\n if title is not None:\n fig.suptitle(title, size=20)\n ax.set_axis_off()\n plt.tight_layout()\n fig.subplots_adjust(top=0.95)\n if save is not None:\n plt.savefig(save)\n plt.show()",
"def run_test_shape():\n print()\n print('--------------------------------------------------')\n print('Testing the SHAPE function:')\n print('--------------------------------------------------')\n\n print()\n print('Test 1 of shape: m=5 and n=2')\n shape(5, 2)\n\n print()\n print('Test 2 of shape: m=3 and n=6')\n shape(3, 6)\n\n print()\n print('Test 3 of shape: m=7 and n=1')\n shape(7, 1)\n\n print()\n print('Test 4 of shape: m=6 and n=4')\n shape(6, 4)",
"def test_create_annotations(self):\n segmentation = adapter.SFFSegmentation() # annotation\n segmentation.name = u\"name\"\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=u\"Software\",\n version=u\"1.0.9\",\n processing_details=u\"Processing details\"\n )\n )\n segmentation.details = u\"Details\"\n # global external references\n segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'one',\n url=u'two',\n accession=u'three'\n )\n )\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'four',\n url=u'five',\n accession=u'six'\n )\n )\n segmentation.segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n biol_ann = adapter.SFFBiologicalAnnotation()\n biol_ann.name = u\"Segment1\"\n biol_ann.description = u\"Some description\"\n # external refs\n biol_ann.external_references = adapter.SFFExternalReferenceList()\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sldjflj\",\n accession=u\"doieaik\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sljd;f\",\n accession=u\"20ijalf\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"lsdjlsd\",\n url=u\"lsjfd;sd\",\n accession=u\"23ijlsdjf\"\n )\n )\n biol_ann.number_of_instances = 30\n segment.biological_annotation = biol_ann\n # colour\n segment.colour = adapter.SFFRGBA(\n red=1,\n green=0,\n blue=1,\n alpha=0\n )\n segmentation.segments.append(segment)\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.name, u'name')\n self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set\n software = segmentation.software_list[0]\n self.assertEqual(software.name, u\"Software\")\n self.assertEqual(software.version, u\"1.0.9\")\n self.assertEqual(software.processing_details, u\"Processing details\")\n self.assertEqual(segmentation.details, u\"Details\")\n # global external references\n self.assertEqual(segmentation.global_external_references[0].resource, u'one')\n self.assertEqual(segmentation.global_external_references[0].url, u'two')\n self.assertEqual(segmentation.global_external_references[0].accession, u'three')\n self.assertEqual(segmentation.global_external_references[1].resource, u'four')\n self.assertEqual(segmentation.global_external_references[1].url, u'five')\n self.assertEqual(segmentation.global_external_references[1].accession, u'six')\n # segment: biological_annotation\n self.assertEqual(segment.biological_annotation.name, u\"Segment1\")\n self.assertEqual(segment.biological_annotation.description, u\"Some description\")\n self.assertEqual(len(segment.biological_annotation.external_references), 3)\n self.assertEqual(segment.biological_annotation.external_references[0].resource, u\"sldjflj\")\n self.assertEqual(segment.biological_annotation.external_references[0].accession, u\"doieaik\")\n self.assertEqual(segment.biological_annotation.external_references[1].resource, u\"sljd;f\")\n self.assertEqual(segment.biological_annotation.external_references[1].accession, u\"20ijalf\")\n self.assertEqual(segment.biological_annotation.external_references[2].resource, u\"lsdjlsd\")\n self.assertEqual(segment.biological_annotation.external_references[2].url, u\"lsjfd;sd\")\n self.assertEqual(segment.biological_annotation.external_references[2].accession, u\"23ijlsdjf\")\n self.assertEqual(segment.biological_annotation.number_of_instances, 30)\n # colour\n self.assertEqual(segment.colour.value, (1, 0, 1, 0))",
"def test_analysis_sg_classes(): \n AnalyzeROI_SG.create()\n AnalyzeSED_SG.create()",
"def createSubdivRegion():\n pass",
"def split_segmentation(infile, lbl=1, close=True, close_cube_size=5,\n close_iter=1, min_region_size=100):\n # Load the segmentation numpy array from a file and get only the requested\n # labels as 1 and the background as 0:\n seg = io.load_tomo(infile)\n assert(isinstance(seg, np.ndarray))\n data_type = seg.dtype\n binary_seg = (seg == lbl).astype(data_type)\n\n # If requested, close small holes in the segmentation:\n outfile = infile\n if close:\n outfile = (\"%s%s_closed_size%s_iter%s.mrc\"\n % (infile[0:-4], lbl, close_cube_size, close_iter))\n if not isfile(outfile):\n from scipy import ndimage\n cube = np.ones((close_cube_size, close_cube_size, close_cube_size))\n binary_seg = ndimage.binary_closing(\n binary_seg, structure=cube, iterations=close_iter\n ).astype(data_type)\n # Write the closed binary segmentation into a file:\n io.save_numpy(binary_seg, outfile)\n print (\"Closed the binary segmentation and saved it into the file \"\n \"%s\" % outfile)\n else: # the '.mrc' file already exists\n binary_seg = io.load_tomo(outfile)\n print (\"The closed binary segmentation was loaded from the file \"\n \"%s\" % outfile)\n\n # Label each connected region of the binary segmentation:\n label_seg = label(binary_seg)\n\n # Get only regions with at least the given size:\n regions = []\n for i, region in enumerate(regionprops(label_seg)):\n region_area = region.area\n if region_area >= min_region_size:\n print \"%s. region has %s voxels and pass\" % (i + 1, region_area)\n # Get the region coordinates and make an ndarray with same shape as\n # the segmentation and 1 at those coordinates:\n region_ndarray = np.zeros(shape=tuple(seg.shape), dtype=data_type)\n # 2D array with 3 columns: x, y, z and number of rows corresponding\n # to the number of voxels in the region\n region_coords = region.coords\n for i in xrange(region_coords.shape[0]): # iterate over the rows\n region_ndarray[region_coords[i, 0],\n region_coords[i, 1],\n region_coords[i, 2]] = 1\n regions.append(region_ndarray)\n else:\n print (\"%s. region has %s voxels and does NOT pass\"\n % (i + 1, region_area))\n print \"%s regions passed.\" % len(regions)\n return regions, outfile",
"def test_shape_predictor(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def test_shape(self):\n try:\n self.shape_for_testing\n except ValueError as e:\n raise pybamm.ShapeError(\"Cannot find shape (original error: {})\".format(e))",
"def __populate_segmentation_in_label(self, label, annotation, image_details):\n # if bbox comes as normalized, skip normalization.\n if max(annotation[\"bbox\"]) < 1.5:\n width = 1\n height = 1\n else:\n width = image_details[\"width\"]\n height = image_details[\"height\"]\n\n polygons = []\n if (\n type(annotation[\"segmentation\"]) is dict\n ): # segmentations are in uncompressed rle format\n rle = annotation[\"segmentation\"]\n if self.compressed_rle:\n compressed_rle = rle\n else:\n compressed_rle = mask.frPyObjects(rle, rle[\"size\"][0], rle[\"size\"][1])\n polygons = masktools.convert_mask_to_polygon(compressed_rle)\n else: # segmentation is list of vertices\n for segmentation in annotation[\"segmentation\"]:\n polygon = []\n # loop through vertices:\n for id, vertex in enumerate(segmentation):\n if (id % 2) == 0:\n # x-coordinates (even index)\n x = vertex / width\n polygon.append(x)\n\n else:\n y = vertex / height\n polygon.append(y)\n polygons.append(polygon)\n label[\"polygon\"] = polygons",
"def shape(self) -> S:",
"def analyseShape( self, shape ):\n nRet = 0\n rPerimeter = computePerimeter( shape )\n print( \"rPerimeter: %s\" % rPerimeter ) \n rLastFirstDist = geo.distance( shape[0], shape[-1] )\n print( \"rLastFirstDist: %s\" % rLastFirstDist )\n bb = geo.computeBoudingBox( shape )\n print( \"bb: %s\" % bb )\n rSizeBB = geo.distance( bb[0], bb[1] )\n print( \"rSizeBB: %s\" % rSizeBB )\n cornerBB = [ [bb[0][0],bb[0][1]], [bb[1][0],bb[0][1]], [bb[1][0],bb[1][1]], [bb[0][0],bb[1][1]] ]\n rDistToBB = geo.compute_distance_shape_to_points( shape, cornerBB )\n print( \"rDistToBB: %s\" % rDistToBB )\n if( rLastFirstDist * 6 < rPerimeter ): # 8\n # nearly ClosedFigure \n shape.append( shape[0] )\n center = geo.median( bb[0], bb[1] ) \n self.gridify( center ) \n if( rDistToBB < rSizeBB *0.4 ):\n print( \"Rectangle!\")\n shape = cornerBB\n shape.append(cornerBB[0][:])\n self.gridifyShape( shape )\n self.listFigures.append(Figure( Figure.kRectangle, shape[:] ) )\n else:\n print( \"Circle!\")\n radius = abs( bb[0][0]-center[0] )\n radius = ((radius+(self.nGridSize/2))/self.nGridSize)*self.nGridSize\n shape = generateCircle( center, radius )\n #self.gridifyShape( shape )\n self.listFigures.append(Figure( Figure.kCircle, shape[:] ) )\n nFigID = self.getColorFromShapeIdx( len(self.listFigures) - 1 )\n self.paintSortBuf( nFigID, center )\n nRet = 1\n else:\n # not closed.\n # is it a link ?\n idxFrom = -1\n for i in range(len(self.listFigures)):\n rDist, nearptfirst = self.listFigures[i].computeDistanceToBorder( shape[0] )\n print( \"dist first: %d\" % rDist)\n if( rDist < 8 ):\n idxFrom = i\n break\n idxTo = -1\n for i in range(len(self.listFigures)):\n rDist, nearptsec = self.listFigures[i].computeDistanceToBorder( shape[-1] )\n print( \"dist second: %d\" % rDist)\n if( rDist < 8 ):\n idxTo = i\n break\n print( \"idxFrom: %d, idxTo: %d\" % (idxFrom, idxTo) )\n if( idxFrom != -1 and idxTo != -1 and idxFrom != idxTo ):\n print( \"Link between %d and %d!\" % (idxFrom, idxTo) )\n self.listLinks.append(Link(self.listFigures[idxFrom],self.listFigures[idxTo] ), nearptfirst, nearptsec )\n \n print( \"INF: FastScheme.analyseShape: at end: %s\" % self.__str__() )\n return nRet",
"def test_slice_shape_negative_above(self):\n self.assertEqual(Selection()[3:-1].shape([9]), [5])",
"def test_mixed_shape_with_straight_and_circle(self):\n\n test_shape = ExtrudeMixedShape(\n points=[\n (10, 20, \"straight\"),\n (10, 10, \"straight\"),\n (20, 10, \"circle\"),\n (22, 15, \"circle\"),\n (20, 20, \"straight\"),\n ],\n distance=10,\n )\n assert test_shape.volume > 10 * 10 * 10",
"def test_destagger_shape_good(meta, shape) -> None:\n assert client.destagger(meta, np.zeros(shape)).shape == shape\n assert client.destagger(meta, np.zeros(shape), inverse=True).shape == shape",
"def vis_segmentation(image, seg_map,path):\n\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n \n plt.imshow(seg_image)\n plt.imsave('./result/'+path.split('/')[-1][:-4]+'_color.', seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.4)\n# seg_image=Image.open('./result/'+path.split('/')[-1][:-4]+'_color.png').convert(\"RGB\")\n seg_image=Image.fromarray(seg_image) \n img_mix = np.asarray(Image.blend(image, seg_image, 0.4))\n plt.imsave('./result/'+path.split('/')[-1][:-4]+'_color_image.', img_mix)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n unique_labels = np.unique(seg_map)\n ax = plt.subplot(grid_spec[3])\n plt.imshow(\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n ax.yaxis.tick_right()\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n plt.xticks([], [])\n ax.tick_params(width=0.0)\n plt.grid('off')\n plt.show()",
"def test_panseg_to_bitmasks(self) -> None:\n self.task_specific_test(\n \"pan_seg\",\n \"panseg_bdd100k/panseg_mask.png\",\n \"panseg_mask.png\",\n panseg_to_bitmasks,\n )",
"def segment(self, image):\n pass",
"def test_get_shape(self, inpt, target_shape):\n shape = get_shape(inpt)\n assert shape == target_shape"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can create a segmentation of meshes programmatically
|
def test_create_meshes(self):
segmentation = adapter.SFFSegmentation()
segmentation.name = rw.random_word()
segmentation.primary_descriptor = u"mesh_list"
segments = adapter.SFFSegmentList()
segment = adapter.SFFSegment()
# meshes
mesh_list = adapter.SFFMeshList()
# mesh 1
count1 = _random_integer(start=3, stop=10)
vertices1, normals1, triangles1 = self.get_mesh_components(count=count1)
mesh = adapter.SFFMesh(
vertices=adapter.SFFVertices.from_array(vertices1),
normals=adapter.SFFNormals.from_array(normals1),
triangles=adapter.SFFTriangles.from_array(triangles1)
)
# mesh 2
count2 = _random_integer(start=3, stop=10)
vertices2, normals2, triangles2 = self.get_mesh_components(count=count2)
mesh2 = adapter.SFFMesh(
vertices=adapter.SFFVertices.from_array(vertices2),
normals=adapter.SFFNormals.from_array(normals2),
triangles=adapter.SFFTriangles.from_array(triangles2)
)
mesh_list.append(mesh)
mesh_list.append(mesh2)
segment.mesh_list = mesh_list
segments.append(segment)
# segment two
segment = adapter.SFFSegment()
# mesh
mesh_list = adapter.SFFMeshList()
count3 = _random_integer(start=3, stop=10)
vertices3, normals3, triangles3 = self.get_mesh_components(count=count3)
mesh = adapter.SFFMesh(
vertices=adapter.SFFVertices.from_array(vertices3),
normals=adapter.SFFNormals.from_array(normals3),
triangles=adapter.SFFTriangles.from_array(triangles3)
)
mesh_list.append(mesh)
segment.mesh_list = mesh_list
segments.append(segment)
segmentation.segments = segments
# export
segmentation.export(self.mesh_file)
# assertions
# segment one
segment1 = segmentation.segments.get_by_id(1)
self.assertEqual(len(segment1.mesh_list), 2)
mesh1, mesh2 = segment1.mesh_list
self.assertEqual(len(mesh1.vertices), vertices1.shape[0])
self.assertEqual(len(mesh1.normals), normals1.shape[0])
self.assertEqual(len(mesh1.triangles), triangles1.shape[0])
self.assertEqual(len(mesh2.vertices), vertices2.shape[0])
self.assertEqual(len(mesh2.normals), normals2.shape[0])
self.assertEqual(len(mesh2.triangles), triangles2.shape[0])
# segment two
segment2 = segmentation.segments.get_by_id(2)
mesh = segment2.mesh_list[0]
self.assertEqual(len(segment2.mesh_list), 1)
self.assertEqual(len(mesh.vertices), vertices3.shape[0])
self.assertEqual(len(mesh.normals), normals3.shape[0])
self.assertEqual(len(mesh.triangles), triangles3.shape[0])
|
[
"def test_create_shapes(self):\n segmentation = adapter.SFFSegmentation()\n segmentation.name = rw.random_word()\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=rw.random_word(),\n version=rw.random_word(),\n processingDetails=li.get_sentence(),\n )\n )\n segmentation.primary_descriptor = u\"shape_primitive_list\"\n transforms = adapter.SFFTransformList()\n segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n # shapes\n shapes = adapter.SFFShapePrimitiveList()\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n cylinder = adapter.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transformId=transform.id,\n )\n shapes.append(cylinder)\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n ellipsoid = adapter.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n shapes.append(ellipsoid)\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n ellipsoid2 = adapter.SFFEllipsoid(x=_random_float() * 100, y=_random_float() * 100, z=_random_float() * 100,\n transformId=transform.id, )\n shapes.append(ellipsoid2)\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottom_radius=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n segment.shape_primitive_list = shapes\n segments.append(segment)\n # more shapes\n segment = adapter.SFFSegment()\n # shapes\n shapes = adapter.SFFShapePrimitiveList()\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottom_radius=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottom_radius=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottom_radius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n transform = adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12))),\n )\n transforms.append(transform)\n shapes.append(\n adapter.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transform_id=transform.id,\n )\n )\n segment.shape_primitive_list = shapes\n segments.append(segment)\n segmentation.segments = segments\n segmentation.transforms = transforms\n # export\n segmentation.export(self.shape_file)\n # assertions\n self.assertEqual(len(segment.shape_primitive_list), 9)\n self.assertEqual(segment.shape_primitive_list.num_cones, 4)\n self.assertEqual(segment.shape_primitive_list.num_cylinders, 1)\n self.assertEqual(segment.shape_primitive_list.num_cuboids, 2)\n self.assertEqual(segment.shape_primitive_list.num_ellipsoids, 2)",
"def test_create_shapes(self):\n segmentation = schema.SFFSegmentation()\n segmentation.primaryDescriptor = \"shapePrimitiveList\"\n transforms = schema.SFFTransformList()\n segments = schema.SFFSegmentList()\n segment = schema.SFFSegment()\n # shapes\n shapes = schema.SFFShapePrimitiveList()\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n segment.shapes = shapes\n segments.add_segment(segment)\n # more shapes\n segment = schema.SFFSegment()\n # shapes\n shapes = schema.SFFShapePrimitiveList()\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCuboid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCylinder(\n height=_random_float() * 100,\n diameter=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFEllipsoid(\n x=_random_float() * 100,\n y=_random_float() * 100,\n z=_random_float() * 100,\n transformId=transform.id,\n )\n )\n transform = schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12))),\n )\n transforms.add_transform(transform)\n shapes.add_shape(\n schema.SFFCone(\n height=_random_float() * 100,\n bottomRadius=_random_float() * 100,\n transformId=transform.id,\n )\n )\n segment.shapes = shapes\n segments.add_segment(segment)\n segmentation.segments = segments\n segmentation.transforms = transforms\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, 'sff', 'v0.7', 'test_shape_segmentation.sff'))\n # assertions\n self.assertEqual(len(segment.shapes), 9)\n self.assertEqual(segment.shapes.numCones, 4)\n self.assertEqual(segment.shapes.numCylinders, 1)\n self.assertEqual(segment.shapes.numCuboids, 2)\n self.assertEqual(segment.shapes.numEllipsoids, 2)",
"def setup_object_segmentation(self):\n\t\tself.create_mask_for_object_with_color(self.obj_name_+\":Mesh\", [1, 1, 1])\n\n\t\tif self.use_table_:\n\t\t\tcmd.nurbsPlane(name=self.plane_name_, p=(0,0,0), ax=(0,0,1), w=10000, lr=1, d=3, u=1, v=1, ch=1)\n\t\t\tself.create_mask_for_object_with_color(self.plane_name_, [1, 0, 0])",
"def segmentation(\n img,\n segmentation,\n title=None,\n save=None,\n figsize=(20, 10),\n linewidth=2,\n edgecolor=\"red\",\n):\n fig, ax = plt.subplots(figsize=figsize)\n ax.imshow(img)\n for i in range(len(segmentation)):\n s = segmentation[i]\n s.calculate_properties()\n cent = s.centroid\n patch = mpatches.Rectangle(\n (s.xrange[0], s.yrange[0]),\n s.xdiam,\n s.ydiam,\n fill=False,\n edgecolor=edgecolor,\n linewidth=linewidth,\n )\n ax.add_patch(patch)\n if title is not None:\n fig.suptitle(title, size=20)\n ax.set_axis_off()\n plt.tight_layout()\n fig.subplots_adjust(top=0.95)\n if save is not None:\n plt.savefig(save)\n plt.show()",
"def vis_segmentation(image, seg_map,path):\n\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n \n plt.imshow(seg_image)\n plt.imsave('./result/'+path.split('/')[-1][:-4]+'_color.', seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.4)\n# seg_image=Image.open('./result/'+path.split('/')[-1][:-4]+'_color.png').convert(\"RGB\")\n seg_image=Image.fromarray(seg_image) \n img_mix = np.asarray(Image.blend(image, seg_image, 0.4))\n plt.imsave('./result/'+path.split('/')[-1][:-4]+'_color_image.', img_mix)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n unique_labels = np.unique(seg_map)\n ax = plt.subplot(grid_spec[3])\n plt.imshow(\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n ax.yaxis.tick_right()\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n plt.xticks([], [])\n ax.tick_params(width=0.0)\n plt.grid('off')\n plt.show()",
"def split_segmentation(infile, lbl=1, close=True, close_cube_size=5,\n close_iter=1, min_region_size=100):\n # Load the segmentation numpy array from a file and get only the requested\n # labels as 1 and the background as 0:\n seg = io.load_tomo(infile)\n assert(isinstance(seg, np.ndarray))\n data_type = seg.dtype\n binary_seg = (seg == lbl).astype(data_type)\n\n # If requested, close small holes in the segmentation:\n outfile = infile\n if close:\n outfile = (\"%s%s_closed_size%s_iter%s.mrc\"\n % (infile[0:-4], lbl, close_cube_size, close_iter))\n if not isfile(outfile):\n from scipy import ndimage\n cube = np.ones((close_cube_size, close_cube_size, close_cube_size))\n binary_seg = ndimage.binary_closing(\n binary_seg, structure=cube, iterations=close_iter\n ).astype(data_type)\n # Write the closed binary segmentation into a file:\n io.save_numpy(binary_seg, outfile)\n print (\"Closed the binary segmentation and saved it into the file \"\n \"%s\" % outfile)\n else: # the '.mrc' file already exists\n binary_seg = io.load_tomo(outfile)\n print (\"The closed binary segmentation was loaded from the file \"\n \"%s\" % outfile)\n\n # Label each connected region of the binary segmentation:\n label_seg = label(binary_seg)\n\n # Get only regions with at least the given size:\n regions = []\n for i, region in enumerate(regionprops(label_seg)):\n region_area = region.area\n if region_area >= min_region_size:\n print \"%s. region has %s voxels and pass\" % (i + 1, region_area)\n # Get the region coordinates and make an ndarray with same shape as\n # the segmentation and 1 at those coordinates:\n region_ndarray = np.zeros(shape=tuple(seg.shape), dtype=data_type)\n # 2D array with 3 columns: x, y, z and number of rows corresponding\n # to the number of voxels in the region\n region_coords = region.coords\n for i in xrange(region_coords.shape[0]): # iterate over the rows\n region_ndarray[region_coords[i, 0],\n region_coords[i, 1],\n region_coords[i, 2]] = 1\n regions.append(region_ndarray)\n else:\n print (\"%s. region has %s voxels and does NOT pass\"\n % (i + 1, region_area))\n print \"%s regions passed.\" % len(regions)\n return regions, outfile",
"def test_create_3D(self):\n segmentation = adapter.SFFSegmentation()\n segmentation.name = rw.random_word()\n segmentation.primary_descriptor = u\"three_d_volume\"\n # transforms\n transforms = adapter.SFFTransformList()\n transforms.append(\n adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12)))\n )\n )\n transforms.append(\n adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12)))\n )\n )\n transforms.append(\n adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12)))\n )\n )\n # bounding_box\n xmax = _random_integer(start=500)\n ymax = _random_integer(start=500)\n zmax = _random_integer(start=500)\n segmentation.bounding_box = adapter.SFFBoundingBox(\n xmax=xmax,\n ymax=ymax,\n zmax=zmax\n )\n # lattice container\n lattices = adapter.SFFLatticeList()\n # lattice 1\n # binlist = numpy.array([random.randint(0, 5) for i in _xrange(20 * 20 * 20)]).reshape(20, 20, 20)\n binlist = numpy.random.randint(0, 5, size=(20, 20, 20))\n lattice = adapter.SFFLattice(\n mode=u'uint32',\n endianness=u'little',\n size=adapter.SFFVolumeStructure(cols=20, rows=20, sections=20),\n start=adapter.SFFVolumeIndex(cols=0, rows=0, sections=0),\n data=binlist,\n )\n lattices.append(lattice)\n # lattice 2\n # binlist2 = numpy.array([random.random() * 100 for i in _xrange(30 * 40 * 50)]).reshape(30, 40, 50)\n binlist2 = numpy.random.rand(30, 40, 50) * 100\n lattice2 = adapter.SFFLattice(\n mode=u'float32',\n endianness=u'big',\n size=adapter.SFFVolumeStructure(cols=30, rows=40, sections=50),\n start=adapter.SFFVolumeIndex(cols=-50, rows=-40, sections=100),\n data=binlist2,\n )\n lattices.append(lattice2)\n # segments\n segments = adapter.SFFSegmentList()\n # segment one\n segment = adapter.SFFSegment(colour=adapter.SFFRGBA(random_colour=True))\n vol1_value = 1\n segment.three_d_volume = adapter.SFFThreeDVolume(\n lattice_id=0,\n value=vol1_value,\n )\n segments.append(segment)\n # segment two\n segment = adapter.SFFSegment(colour=adapter.SFFRGBA(random_colour=True))\n vol2_value = 37.1\n segment.three_d_volume = adapter.SFFThreeDVolume(\n lattice_id=1,\n value=vol2_value\n )\n # add segment to segments\n segments.append(segment)\n segmentation.transforms = transforms\n segmentation.segments = segments\n segmentation.lattices = lattices\n # export\n # self.stderr(segmentation)\n # self.stderrj(segmentation.as_json())\n segmentation.export(self.three_d_volume_file)\n # assertions\n self.assertRegex(\n _str(segmentation),\n r\"\"\"SFFSegmentation\\(name=\"\\w+\", version=\"{}\"\\)\"\"\".format(\n EMDB_SFF_VERSION\n )\n )\n self.assertEqual(segmentation.primary_descriptor, u\"three_d_volume\")\n self.assertEqual(segmentation.bounding_box.xmin, 0)\n self.assertEqual(segmentation.bounding_box.xmax, xmax)\n self.assertEqual(segmentation.bounding_box.ymin, 0)\n self.assertEqual(segmentation.bounding_box.ymax, ymax)\n self.assertEqual(segmentation.bounding_box.zmin, 0)\n self.assertEqual(segmentation.bounding_box.zmax, zmax)\n # test the number of transforms\n self.assertTrue(len(segmentation.transforms) > 0)\n # test the transform IDs\n t_ids = map(lambda t: t.id, segmentation.transforms)\n self.assertCountEqual(t_ids, range(3))\n # segments\n self.assertEqual(len(segmentation.segments), 2)\n # segment one\n segment = segmentation.segments[0]\n # volume\n self.assertEqual(segment.three_d_volume.lattice_id, 0)\n self.assertEqual(segment.three_d_volume.value, vol1_value)\n # segment two\n segment = segmentation.segments.get_by_id(2)\n # volume\n self.assertEqual(segment.three_d_volume.lattice_id, 1)\n self.assertEqual(segment.three_d_volume.value, vol2_value)\n # lattices\n lattices = segmentation.lattices\n self.assertEqual(len(lattices), 2)\n # lattice one\n lattice1 = lattices.get_by_id(0)\n self.assertEqual(lattice1.mode, u'uint32')\n self.assertEqual(lattice1.endianness, u'little')\n self.assertCountEqual(lattice1.size.value, (20, 20, 20))\n self.assertCountEqual(lattice1.start.value, (0, 0, 0))\n # lattice two\n self.assertEqual(lattice2.mode, u'float32')\n self.assertEqual(lattice2.endianness, u'big')\n self.assertCountEqual(lattice2.size.value, (30, 40, 50))\n self.assertCountEqual(lattice2.start.value, (-50, -40, 100))",
"def skeletonEmbed(segmentationResolution=int, segmentationMethod=int, mergedMesh=bool):\n pass",
"def test_create_3D(self):\n segmentation = schema.SFFSegmentation() # 3D volume\n segmentation.primaryDescriptor = \"threeDVolume\"\n # transforms\n transforms = schema.SFFTransformList()\n transforms.add_transform(\n schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12)))\n )\n )\n transforms.add_transform(\n schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12)))\n )\n )\n transforms.add_transform(\n schema.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(str, range(12)))\n )\n )\n # boundingBox\n xmax = _random_integer(start=500)\n ymax = _random_integer(start=500)\n zmax = _random_integer(start=500)\n segmentation.boundingBox = schema.SFFBoundingBox(\n xmax=xmax,\n ymax=ymax,\n zmax=zmax\n )\n # lattice container\n lattices = schema.SFFLatticeList()\n # lattice 1\n binlist = numpy.array([random.randint(0, 5) for i in _xrange(20 * 20 * 20)])\n lattice = schema.SFFLattice(\n mode='uint32',\n endianness='little',\n size=schema.SFFVolumeStructure(cols=20, rows=20, sections=20),\n start=schema.SFFVolumeIndex(cols=0, rows=0, sections=0),\n data=binlist,\n )\n lattices.add_lattice(lattice)\n # lattice 2\n binlist2 = numpy.array([random.random() * 100 for i in _xrange(30 * 40 * 50)])\n lattice2 = schema.SFFLattice(\n mode='float32',\n endianness='big',\n size=schema.SFFVolumeStructure(cols=30, rows=40, sections=50),\n start=schema.SFFVolumeIndex(cols=-50, rows=-40, sections=100),\n data=binlist2,\n )\n lattices.add_lattice(lattice2)\n # segments\n segments = schema.SFFSegmentList()\n # segment one\n segment = schema.SFFSegment()\n vol1_value = 1\n segment.volume = schema.SFFThreeDVolume(\n latticeId=0,\n value=vol1_value,\n )\n segments.add_segment(segment)\n # segment two\n segment = schema.SFFSegment()\n vol2_value = 37.1\n segment.volume = schema.SFFThreeDVolume(\n latticeId=1,\n value=vol2_value\n )\n # add segment to segments\n segments.add_segment(segment)\n segmentation.transforms = transforms\n segmentation.segments = segments\n segmentation.lattices = lattices\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, 'sff', 'v0.7', 'test_3d_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.primaryDescriptor, \"threeDVolume\")\n self.assertEqual(segmentation.boundingBox.xmin, 0)\n self.assertEqual(segmentation.boundingBox.xmax, xmax)\n self.assertEqual(segmentation.boundingBox.ymin, 0)\n self.assertEqual(segmentation.boundingBox.ymax, ymax)\n self.assertEqual(segmentation.boundingBox.zmin, 0)\n self.assertEqual(segmentation.boundingBox.zmax, zmax)\n # test the number of transforms\n self.assertEqual(len(segmentation.transforms), 3)\n # test the transform IDs\n t_ids = map(lambda t: t.id, segmentation.transforms)\n self.assertCountEqual(t_ids, range(3))\n # segments\n self.assertEqual(len(segmentation.segments), 2)\n # segment one\n segment = segmentation.segments[0]\n # volume\n self.assertEqual(segment.volume.latticeId, 0)\n self.assertEqual(segment.volume.value, vol1_value)\n # segment two\n segment = segmentation.segments.get_by_id(2)\n # volume\n self.assertEqual(segment.volume.latticeId, 1)\n self.assertEqual(segment.volume.value, vol2_value)\n # lattices\n lattices = segmentation.lattices\n self.assertEqual(len(lattices), 2)\n # lattice one\n lattice1 = lattices.get_by_id(0)\n self.assertEqual(lattice1.mode, 'uint32')\n self.assertEqual(lattice1.endianness, 'little')\n self.assertCountEqual(lattice1.size.value, (20, 20, 20))\n self.assertCountEqual(lattice1.start.value, (0, 0, 0))\n # lattice two\n self.assertEqual(lattice2.mode, 'float32')\n self.assertEqual(lattice2.endianness, 'big')\n self.assertCountEqual(lattice2.size.value, (30, 40, 50))\n self.assertCountEqual(lattice2.start.value, (-50, -40, 100))",
"def view_instance_seg_dataset(dataset, n_mask_class=2):\n\n def visualize_func(dataset, index):\n img, bboxes, labels, lbls = dataset[index]\n return visualize_instance_segmentation(\n img,\n bboxes,\n labels,\n lbls,\n dataset.class_names,\n n_mask_class=n_mask_class\n )\n\n return view_dataset(dataset, visualize_func)",
"def test_insseg_to_bitmasks(self) -> None:\n self.task_specific_test(\n \"ins_seg\",\n \"bitmasks/quasi-video/insseg_bitmask.png\",\n \"b1c81faa-3df17267-0000001.png\",\n insseg_to_bitmasks,\n )",
"def test_create_annotations(self):\n segmentation = adapter.SFFSegmentation() # annotation\n segmentation.name = u\"name\"\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=u\"Software\",\n version=u\"1.0.9\",\n processing_details=u\"Processing details\"\n )\n )\n segmentation.details = u\"Details\"\n # global external references\n segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'one',\n url=u'two',\n accession=u'three'\n )\n )\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'four',\n url=u'five',\n accession=u'six'\n )\n )\n segmentation.segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n biol_ann = adapter.SFFBiologicalAnnotation()\n biol_ann.name = u\"Segment1\"\n biol_ann.description = u\"Some description\"\n # external refs\n biol_ann.external_references = adapter.SFFExternalReferenceList()\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sldjflj\",\n accession=u\"doieaik\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sljd;f\",\n accession=u\"20ijalf\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"lsdjlsd\",\n url=u\"lsjfd;sd\",\n accession=u\"23ijlsdjf\"\n )\n )\n biol_ann.number_of_instances = 30\n segment.biological_annotation = biol_ann\n # colour\n segment.colour = adapter.SFFRGBA(\n red=1,\n green=0,\n blue=1,\n alpha=0\n )\n segmentation.segments.append(segment)\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.name, u'name')\n self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set\n software = segmentation.software_list[0]\n self.assertEqual(software.name, u\"Software\")\n self.assertEqual(software.version, u\"1.0.9\")\n self.assertEqual(software.processing_details, u\"Processing details\")\n self.assertEqual(segmentation.details, u\"Details\")\n # global external references\n self.assertEqual(segmentation.global_external_references[0].resource, u'one')\n self.assertEqual(segmentation.global_external_references[0].url, u'two')\n self.assertEqual(segmentation.global_external_references[0].accession, u'three')\n self.assertEqual(segmentation.global_external_references[1].resource, u'four')\n self.assertEqual(segmentation.global_external_references[1].url, u'five')\n self.assertEqual(segmentation.global_external_references[1].accession, u'six')\n # segment: biological_annotation\n self.assertEqual(segment.biological_annotation.name, u\"Segment1\")\n self.assertEqual(segment.biological_annotation.description, u\"Some description\")\n self.assertEqual(len(segment.biological_annotation.external_references), 3)\n self.assertEqual(segment.biological_annotation.external_references[0].resource, u\"sldjflj\")\n self.assertEqual(segment.biological_annotation.external_references[0].accession, u\"doieaik\")\n self.assertEqual(segment.biological_annotation.external_references[1].resource, u\"sljd;f\")\n self.assertEqual(segment.biological_annotation.external_references[1].accession, u\"20ijalf\")\n self.assertEqual(segment.biological_annotation.external_references[2].resource, u\"lsdjlsd\")\n self.assertEqual(segment.biological_annotation.external_references[2].url, u\"lsjfd;sd\")\n self.assertEqual(segment.biological_annotation.external_references[2].accession, u\"23ijlsdjf\")\n self.assertEqual(segment.biological_annotation.number_of_instances, 30)\n # colour\n self.assertEqual(segment.colour.value, (1, 0, 1, 0))",
"def test_analysis_sg_classes(): \n AnalyzeROI_SG.create()\n AnalyzeSED_SG.create()",
"def testSegmentsIntersect(self):\n self.assertTrue(compress_section.SegmentsIntersect(0, 3, 0, 3))\n self.assertTrue(compress_section.SegmentsIntersect(0, 3, -1, 1))\n self.assertTrue(compress_section.SegmentsIntersect(0, 3, 2, 4))\n self.assertTrue(compress_section.SegmentsIntersect(0, 3, 1, 2))\n\n self.assertFalse(compress_section.SegmentsIntersect(0, 3, 4, 6))\n self.assertFalse(compress_section.SegmentsIntersect(0, 3, -1, 0))\n self.assertFalse(compress_section.SegmentsIntersect(0, 3, 3, 5))",
"def test_processors_with_fake_preds():\n\n from dataset import DSB18Dataset, _DEFAULT_DSB18_OPTIONS\n\n # Load dataset using instance masks\n ds_options = _DEFAULT_DSB18_OPTIONS\n ds_options['mode'] = 'instance_masks'\n ds_options['in_memory'] = True\n ds_inst_masks = DSB18Dataset(phase='train_noval', options=ds_options)\n ds_inst_masks.print_config()\n\n # Get the gt instance masks\n _, gt_inst_masks, _ = ds_inst_masks.get_rand_samples_with_inst_masks(ds_inst_masks.train_size, 'train', deterministic=True)\n\n # Load dataset using semantic labels\n ds_options = _DEFAULT_DSB18_OPTIONS\n ds_options['mode'] = 'semantic_labels'\n ds_options['in_memory'] = True\n ds_sem_labels = DSB18Dataset(phase='train_noval', options=ds_options)\n ds_sem_labels.print_config()\n\n # Get the gt semantic labels (and their IDs so we can list worst offenders)\n _, bin_sem_labels, IDs = ds_sem_labels.get_rand_samples_with_sem_labels(ds_sem_labels.train_size, 'train', return_IDs=True, deterministic=True)\n bin_sem_labels = [np.squeeze(bin_sem_label) for bin_sem_label in bin_sem_labels]\n\n # Get the centroids of each instance mask and combine them in a single bin label\n bin_center_markers = []\n with tqdm(total=len(gt_inst_masks), desc=\"Compute instance mask centroids\", ascii=True, ncols=100) as pbar:\n for gt_masks in gt_inst_masks:\n pbar.update(1)\n bin_center_markers.append(inst_masks_centroids_to_label(gt_masks))\n\n # Get the contours of each instance mask and combine them in a single bin label\n bin_contour_markers_4px = []\n with tqdm(total=len(gt_inst_masks), desc=\"Compute 4px-wide instance mask contours\", ascii=True, ncols=100) as pbar:\n for gt_masks in gt_inst_masks:\n pbar.update(1)\n bin_contour_markers_4px.append(inst_masks_contours_to_label(gt_masks, thickness=4))\n bin_contour_markers_3px = []\n with tqdm(total=len(gt_inst_masks), desc=\"Compute 3px-wide instance mask contours\", ascii=True, ncols=100) as pbar:\n for gt_masks in gt_inst_masks:\n pbar.update(1)\n bin_contour_markers_3px.append(inst_masks_contours_to_label(gt_masks, thickness=3))\n bin_contour_markers_2px = []\n with tqdm(total=len(gt_inst_masks), desc=\"Compute 2px-wide instance mask contours\", ascii=True, ncols=100) as pbar:\n for gt_masks in gt_inst_masks:\n pbar.update(1)\n bin_contour_markers_2px.append(inst_masks_contours_to_label(gt_masks, thickness=2))\n\n # Parameters\n # print(bin_sem_labels[0].shape)\n # print(bin_contour_markers[0].shape)\n # print(bin_center_markers[0].shape)\n # num_samples = min(10, len(gt_inst_masks))\n # save_folder1 = \"c:/temp/visualizations1\"\n # save_folder2 = \"c:/temp/visualizations2\"\n # save_folder3 = \"c:/temp/visualizations3\"\n # save_folder4 = \"c:/temp/visualizations4_ws_cent\"\n # save_folder5 = \"c:/temp/visualizations5_ws_cont\"\n # save_folder5_offenders_gt = \"c:/temp/visualizations5_ws_cont_offenders_gt\"\n # save_folder5_offenders = \"c:/temp/visualizations5_ws_cont_offenders\"\n # archive_images_with_contours_and_centers(bin_sem_labels[0:num_samples], # [(H,W,1)]\n # bin_contour_markers[0:num_samples], # [(H,W)]\n # bin_center_markers[0:num_samples], # [(H,W)]\n # save_folder1)\n # archive_images(bin_sem_labels[0:num_samples], save_folder2)\n\n # Number of worst offenders to track\n num_offenders = 20\n\n #\n # Evaluate connected_components\n #\n method = 'connected_components'\n\n # Instantiate post-processor\n post_options = _DEFAULT_PROC_OPTIONS\n post_options['method'] = method\n post = Post(post_options)\n post.print_config()\n\n # Run the post-processor\n pred_inst_masks, _ = post.process_samples(bin_sem_labels)\n\n # Evaluate the performance of the post-processor\n mAP, worst_APs_idx, worst_APs_val = post.eval_map(gt_inst_masks, pred_inst_masks, track_offenders=True)\n print(\"{} mAP: {}\\nTop {} worst offenders:\".format(method, mAP, num_offenders))\n bad_bin_sem_labels, bad_pred_inst_masks, bad_gt_inst_masks = [], [], []\n for n in range(num_offenders):\n print(\"ID:{} - AP:{}\".format(IDs[worst_APs_idx[n]], worst_APs_val[n]))\n bad_bin_sem_labels.append(bin_sem_labels[worst_APs_idx[n]])\n bad_pred_inst_masks.append(pred_inst_masks[worst_APs_idx[n]])\n bad_gt_inst_masks.append(gt_inst_masks[worst_APs_idx[n]])\n\n #\n # Evaluate watershed_centers_basic\n #\n method = 'watershed_centers_basic'\n\n # Instantiate post-processor\n post_options = _DEFAULT_PROC_OPTIONS\n post_options['method'] = method\n post = Post(post_options)\n post.print_config()\n\n # Run the post-processor\n pred_inst_masks, _ = post.process_samples(bin_sem_labels, bin_center_markers)\n\n # Evaluate the performance of the post-processor\n mAP, worst_APs_idx, worst_APs_val = post.eval_map(gt_inst_masks, pred_inst_masks, track_offenders=True)\n print(\"{} mAP: {}\\nTop {} worst offenders:\".format(method, mAP, num_offenders))\n bad_bin_sem_labels, bad_pred_inst_masks, bad_gt_inst_masks = [], [], []\n for n in range(num_offenders):\n print(\"ID:{} - AP:{}\".format(IDs[worst_APs_idx[n]], worst_APs_val[n]))\n bad_bin_sem_labels.append(bin_sem_labels[worst_APs_idx[n]])\n bad_pred_inst_masks.append(pred_inst_masks[worst_APs_idx[n]])\n bad_gt_inst_masks.append(gt_inst_masks[worst_APs_idx[n]])\n\n #\n # Evaluate watershed_contours_basic with 4px-wide contours\n #\n method = 'watershed_contours_basic'\n contour_width = '4px-wide'\n\n # Instantiate post-processor\n post_options = _DEFAULT_PROC_OPTIONS\n post_options['method'] = method\n post = Post(post_options)\n post.print_config()\n\n # Run the post-processor\n pred_inst_masks, _ = post.process_samples(bin_sem_labels, bin_contour_markers=bin_contour_markers_4px)\n\n # Evaluate the performance of the post-processor\n mAP, worst_APs_idx, worst_APs_val = post.eval_map(gt_inst_masks, pred_inst_masks, track_offenders=True)\n print(\"{} {} mAP: {}\\nTop {} worst offenders:\".format(method, contour_width, mAP, num_offenders))\n bad_bin_sem_labels, bad_pred_inst_masks, bad_gt_inst_masks = [], [], []\n for n in range(num_offenders):\n print(\"ID:{} - AP:{}\".format(IDs[worst_APs_idx[n]], worst_APs_val[n]))\n bad_bin_sem_labels.append(bin_sem_labels[worst_APs_idx[n]])\n bad_pred_inst_masks.append(pred_inst_masks[worst_APs_idx[n]])\n bad_gt_inst_masks.append(gt_inst_masks[worst_APs_idx[n]])\n\n #\n # Evaluate watershed_contours_basic with 3px-wide contours\n #\n method = 'watershed_contours_basic'\n contour_width = '3px-wide'\n\n # Instantiate post-processor\n post_options = _DEFAULT_PROC_OPTIONS\n post_options['method'] = method\n post = Post(post_options)\n post.print_config()\n\n # Run the post-processor\n pred_inst_masks, _ = post.process_samples(bin_sem_labels, bin_contour_markers=bin_contour_markers_3px)\n\n # Evaluate the performance of the post-processor\n mAP, worst_APs_idx, worst_APs_val = post.eval_map(gt_inst_masks, pred_inst_masks, track_offenders=True)\n print(\"{} {} mAP: {}\\nTop {} worst offenders:\".format(method, contour_width, mAP, num_offenders))\n bad_bin_sem_labels, bad_pred_inst_masks, bad_gt_inst_masks = [], [], []\n for n in range(num_offenders):\n print(\"ID:{} - AP:{}\".format(IDs[worst_APs_idx[n]], worst_APs_val[n]))\n bad_bin_sem_labels.append(bin_sem_labels[worst_APs_idx[n]])\n bad_pred_inst_masks.append(pred_inst_masks[worst_APs_idx[n]])\n bad_gt_inst_masks.append(gt_inst_masks[worst_APs_idx[n]])\n\n #\n # Evaluate watershed_contours_basic with 2px-wide contours\n #\n method = 'watershed_contours_basic'\n contour_width = '2px-wide'\n\n # Instantiate post-processor\n post_options = _DEFAULT_PROC_OPTIONS\n post_options['method'] = method\n post = Post(post_options)\n post.print_config()\n\n # Run the post-processor\n pred_inst_masks, _ = post.process_samples(bin_sem_labels, bin_contour_markers=bin_contour_markers_2px)\n\n # Evaluate the performance of the post-processor\n mAP, worst_APs_idx, worst_APs_val = post.eval_map(gt_inst_masks, pred_inst_masks, track_offenders=True)\n print(\"{} {} mAP: {}\\nTop {} worst offenders:\".format(method, contour_width, mAP, num_offenders))\n bad_bin_sem_labels, bad_pred_inst_masks, bad_gt_inst_masks = [], [], []\n for n in range(num_offenders):\n print(\"ID:{} - AP:{}\".format(IDs[worst_APs_idx[n]], worst_APs_val[n]))\n bad_bin_sem_labels.append(bin_sem_labels[worst_APs_idx[n]])\n bad_pred_inst_masks.append(pred_inst_masks[worst_APs_idx[n]])\n bad_gt_inst_masks.append(gt_inst_masks[worst_APs_idx[n]])\n\n # archive_instances(bin_sem_labels[0:num_samples], gt_inst_masks[0:num_samples], save_folder3)\n # archive_instances(bin_sem_labels[0:num_samples], pred_inst_masks[0:num_samples], save_folder4)\n # archive_instances(bin_sem_labels[0:num_samples], pred_inst_masks[0:num_samples], save_folder5)\n\n # archive_instances(bad_bin_sem_labels, bad_pred_inst_masks, save_folder5_offenders)\n # archive_instances(bad_bin_sem_labels, bad_gt_inst_masks, save_folder5_offenders_gt)\n # archive_instances(bin_sem_labels[0:num_samples], pred_inst_masks[0:num_samples], save_folder4)\n\n # Worst offenders:\n # ID:308084bdd358e0bd3dc7f2b409d6f34cc119bce30216f44667fc2be43ff31722 - AP:0.37068899207755457 - idx:14\n # ID:1d02c4b5921e916b9ddfb2f741fd6cf8d0e571ad51eb20e021c826b5fb87350e - AP:0.42729286655373605 - idx:16\n # ID:4217e25defac94ff465157d53f5a24b8a14045b763d8606ec4a97d71d99ee381 - AP:0.5232783157781324 - idx:20\n # ID:ed5be4b63e9506ad64660dd92a098ffcc0325195298c13c815a73773f1efc279 - AP:0.6324095421038027 - idx:3\n # ID:4e07a653352b30bb95b60ebc6c57afbc7215716224af731c51ff8d430788cd40 - AP:0.6619212410657151 - idx:8\n # ID:07fb37aafa6626608af90c1e18f6a743f29b6b233d2e427dcd1102df6a916cf5 - AP:0.6956153758410263 - idx:6\n # ID:353ab00e964f71aa720385223a9078b770b7e3efaf5be0f66e670981f68fe606 - AP:0.7090760717590181 - idx:10\n # ID:175dbb364bfefc9537931144861c9b6e08934df3992782c669c6fe4234319dfc - AP:0.7349640039729471 - idx:9\n # ID:8f6e49e474ebb649a1e99662243d51a46cc9ba0c9c8f1efe2e2b662a81b48de1 - AP:0.7366945430619456 - idx:5\n # ID:6b61ab2e3ff0e2c7a55fd71e290b51e142555cf82bc7574fc27326735e8acbd1 - AP:0.7540038738142536 - idx:1",
"def assert_segmap_conversions():\n segmap_colors = np.zeros((192, 192, 3), dtype=np.float32)\n for y in range(192):\n for x in range(192):\n segmap_colors[y, x, :] = class_colors[np.random.choice(n_classes)]\n assert segmap_colors.shape == (192, 192, 3)\n \n segmap_classes = segmap_colors_to_segmap_classes(segmap_colors)\n assert segmap_classes.shape == (192, 192, n_classes)\n \n segmap_stacked = segmap_classes_to_segmap_stacked(segmap_classes)\n assert segmap_stacked.shape == (192, 192)\n \n segmap_colors_2 = segmap_stacked_to_segmap_colors(segmap_stacked)\n assert segmap_colors_2.shape == (192, 192, 3) \n \n assert np.all(segmap_colors == segmap_colors_2), 'Converting segmap colors into classes and back should result in the same values!'\n \n print('Segmap conversions work correctly!')",
"def setUpClass(cls):\n super(Module11Tests, cls).setUpClass()\n # Data loading\n PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n DATASETS_ROOT = PROJECT_ROOT + '/Data/Module_11_test/'\n cls.segmentation_data = sio.loadmat(DATASETS_ROOT + 'segmentationMask.mat')\n cls.segmentation_data = cls.segmentation_data['imageMaskFull']\n\n cls.struct = smns.mri_struct()\n cls.struct.segmentation = cls.segmentation_data",
"def test_ensemble_simple_example_hellinger():\n X = np.array([[1,1,2,2,3,3,4,4],\n [1,1,2,2,3,3,4,4],\n [1,1,2,2,3,3,4,4],\n [1,1,2,2,5,5,6,6],\n [1,1,1,2,3,3,3,4],\n [1,1,1,2,3,3,3,4]])\n matXYZ = np.argwhere(np.zeros((2,2,2)) == 0)\n Z = sp.spatial_ensemble_clustering(X, matXYZ, method = \"hellinger\",\n diag_neighbor = False)\n labels = sp.get_cluster(Z, V = 8, n_init_cluster = 4)\n assert (labels == np.array([1, 1, 3, 3, 2, 2, 4, 4])).all(), \"Wrong labels\"\n # Spatial contiguous?\n list_neighbors = spasi.get_list_neighbors(matXYZ, diag_neighbor = False)\n assert is_spatial_contiguous(labels, list_neighbors), \"Not spatial contiguous (ensemble Hellinger)\"",
"def testInstantiation(self):\n\t\tm = Mesh.Mesh(self.mesh_length, self.mesh_area, self.num_zones);\n\t\tfzd = Field.FieldZoneDouble(m);\n\t\tfzn = Field.FieldNodeDouble(m);\n\t\tfzm = Field.FieldZoneMat(m);\n\t\tself.assertEqual(m.numZones(), self.num_zones) \n\t\tself.assertEqual(m.length(), self.mesh_length) \n\t\tself.assertEqual(m.area(), self.mesh_area) \n\t\tm2 = Mesh.Mesh(self.len_vector, 1.0)\n\t\tself.assertEqual(m2.numZones(), len(self.len_vector))\n\t\tfzd2 = Field.FieldZoneDouble(m2, self.len_vector)\n\t\tself.assertEqual(m2.numZones(), fzd2.size())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can add annotations programmatically
|
def test_create_annotations(self):
segmentation = adapter.SFFSegmentation() # annotation
segmentation.name = u"name"
segmentation.software_list = adapter.SFFSoftwareList()
segmentation.software_list.append(
adapter.SFFSoftware(
name=u"Software",
version=u"1.0.9",
processing_details=u"Processing details"
)
)
segmentation.details = u"Details"
# global external references
segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()
segmentation.global_external_references.append(
adapter.SFFExternalReference(
resource=u'one',
url=u'two',
accession=u'three'
)
)
segmentation.global_external_references.append(
adapter.SFFExternalReference(
resource=u'four',
url=u'five',
accession=u'six'
)
)
segmentation.segments = adapter.SFFSegmentList()
segment = adapter.SFFSegment()
biol_ann = adapter.SFFBiologicalAnnotation()
biol_ann.name = u"Segment1"
biol_ann.description = u"Some description"
# external refs
biol_ann.external_references = adapter.SFFExternalReferenceList()
biol_ann.external_references.append(
adapter.SFFExternalReference(
resource=u"sldjflj",
accession=u"doieaik"
)
)
biol_ann.external_references.append(
adapter.SFFExternalReference(
resource=u"sljd;f",
accession=u"20ijalf"
)
)
biol_ann.external_references.append(
adapter.SFFExternalReference(
resource=u"lsdjlsd",
url=u"lsjfd;sd",
accession=u"23ijlsdjf"
)
)
biol_ann.number_of_instances = 30
segment.biological_annotation = biol_ann
# colour
segment.colour = adapter.SFFRGBA(
red=1,
green=0,
blue=1,
alpha=0
)
segmentation.segments.append(segment)
# export
# segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))
# assertions
self.assertEqual(segmentation.name, u'name')
self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set
software = segmentation.software_list[0]
self.assertEqual(software.name, u"Software")
self.assertEqual(software.version, u"1.0.9")
self.assertEqual(software.processing_details, u"Processing details")
self.assertEqual(segmentation.details, u"Details")
# global external references
self.assertEqual(segmentation.global_external_references[0].resource, u'one')
self.assertEqual(segmentation.global_external_references[0].url, u'two')
self.assertEqual(segmentation.global_external_references[0].accession, u'three')
self.assertEqual(segmentation.global_external_references[1].resource, u'four')
self.assertEqual(segmentation.global_external_references[1].url, u'five')
self.assertEqual(segmentation.global_external_references[1].accession, u'six')
# segment: biological_annotation
self.assertEqual(segment.biological_annotation.name, u"Segment1")
self.assertEqual(segment.biological_annotation.description, u"Some description")
self.assertEqual(len(segment.biological_annotation.external_references), 3)
self.assertEqual(segment.biological_annotation.external_references[0].resource, u"sldjflj")
self.assertEqual(segment.biological_annotation.external_references[0].accession, u"doieaik")
self.assertEqual(segment.biological_annotation.external_references[1].resource, u"sljd;f")
self.assertEqual(segment.biological_annotation.external_references[1].accession, u"20ijalf")
self.assertEqual(segment.biological_annotation.external_references[2].resource, u"lsdjlsd")
self.assertEqual(segment.biological_annotation.external_references[2].url, u"lsjfd;sd")
self.assertEqual(segment.biological_annotation.external_references[2].accession, u"23ijlsdjf")
self.assertEqual(segment.biological_annotation.number_of_instances, 30)
# colour
self.assertEqual(segment.colour.value, (1, 0, 1, 0))
|
[
"def create_annotations(self) -> None:\n pass",
"def test_annotations(defined_object, expected):\n assert getattr(defined_object, \"__annotations__\") == expected",
"def test_get_annotations(self):\n\n itemuri = \"http://localhost:3000/catalog/cooee/items/1-012\"\n docurl = \"http://localhost:3000/documents/cooee/1-012-plain.txt\"\n\n ann = self.api.get_annotations(itemuri)\n \n self.assertIn('alveo:annotations', ann)\n self.assertIn('commonProperties', ann)\n self.assertIn('@context', ann)\n \n self.assertEqual(2, len(ann['alveo:annotations']))\n \n self.assertEqual('dada:TextAnnotation', ann['alveo:annotations'][0]['@type'])\n \n self.assertEqual(docurl, ann['commonProperties']['alveo:annotates'])\n \n ann = self.api.get_annotations(itemuri, {\"user\":\"Steve.Cassidy@mq.edu.au\"})\n ann = self.api.get_annotations(itemuri, {\"priorTo\":datetime.strptime(\"2013-12-20T12:20:00\", '%Y-%m-%dT%I:%M:%S')})\n \n pass",
"def setAnnotation(*args, **kwargs):\n \n pass",
"def all_annotations(num, test) -> None:\n return None",
"def test_correct_annotations(self):\n for doc in self.prj.documents:\n if doc.id == 26608:\n assert len(doc.annotations(self.prj.get_label_by_id(579))) == 1",
"def some_annotations(num: int, test) -> None:\n return None",
"def annotate(args):\n from .annotation.annotation import annotate as anno\n anno(args)",
"def test_document_add_new_annotation(self):\n doc = self.prj.labels[0].documents[5] # the latest document\n # we create a revised annotations, as only revised annotation can be deleted\n # if we would delete an unrevised annotation, we would provide feedback and thereby keep the\n # annotation as \"wrong\" but \"revised\"\n assert len(doc.annotations(use_correct=False)) == 13\n label = self.prj.labels[0]\n new_anno = Annotation(\n start_offset=225,\n end_offset=237,\n label=label.id,\n template_id=label.templates[0].id, # hand selected document section label\n revised=True,\n is_correct=True,\n accuracy=0.98765431,\n document=doc,\n )\n # make sure document annotations are updated too\n assert len(doc.annotations(use_correct=False)) == 14\n assert len(self.prj.labels[0].correct_annotations) == 27\n assert new_anno.id is None\n new_anno.save()\n assert new_anno.id\n new_anno.delete()\n assert new_anno.id is None\n assert len(doc.annotations(use_correct=False)) == 13\n assert len(self.prj.labels[0].correct_annotations) == 26",
"def cls_some_annotations(self, num: int, test) -> None:\n return None",
"def cls_all_annotations(self, num, test) -> None:\n return None",
"def add_annotations(self, annotations):\n\n if not isinstance(annotations, list):\n print('Image.add_annotations expects a list, received {}'.format(type(annotations)))\n exit(1)\n\n self.annotations += annotations\n self.is_annotated = True",
"def _create_annotations(self, args: parser_extensions.Namespace):\n annotations = flags.Get(args, 'annotations')\n return self._dict_to_annotations_message(annotations)",
"def code_insert_trait_annotation(type: str):",
"def post_annotations(self):\n annotations_url = self.url + \"/annotations\"\n requests.post(annotations_url, json=self.annotations, auth=self.auth)",
"def add_annotations(self, annotations: Iterable[FeatureStructure]):\n for annotation in annotations:\n self.add_annotation(annotation)",
"def setannotation(self, *args):\n\n typ, dat = self._simple_command('SETANNOTATION', *args)\n return self._untagged_response(typ, dat, 'ANNOTATION')",
"def test_add_tag(self, mock_client):\n iri = 'example.com'\n rc = ResultCollection(iri)\n target = 'foo'\n value = 'bar'\n task = TaskFactory()\n fake_anno = dict(foo='bar')\n mock_client.create_annotation.return_value = fake_anno\n anno = rc.add_tag(task, target, value)\n assert_equal(anno, fake_anno)\n mock_client.create_annotation.assert_called_once_with(iri, {\n 'motivation': 'tagging',\n 'type': 'Annotation',\n 'generator': [\n {\n \"id\": flask_app.config.get('GITHUB_REPO'),\n \"type\": \"Software\",\n \"name\": \"LibCrowds\",\n \"homepage\": flask_app.config.get('SPA_SERVER_NAME')\n },\n {\n \"id\": url_for('api.api_task', oid=task.id),\n \"type\": \"Software\"\n }\n ],\n 'body': {\n 'type': 'TextualBody',\n 'purpose': 'tagging',\n 'value': value\n },\n 'target': target\n })",
"def test_get_annotations_config(self):\n for annotations in self.annotations_examples:\n # Setup data as is expected in the method get_annotations_config\n raw_annotations = {\n 'metadata': {\n 'annotations': annotations\n }\n }\n parsed_annotations = config_functions.get_annotations_config(raw_annotations)\n\n # Test if parsed annotations has all the keys that it should have.\n self.assertThat(parsed_annotations,\n matchers.KeysEqual('core_pattern', 'file_size_config',\n 'file_compression_config', 'max_use_config', 'keep_free_config'))\n\n # Test each value\n self.assertEqual(parsed_annotations['core_pattern'],\n annotations.get(\"starlingx.io/core_pattern\"))\n self.assertEqual(parsed_annotations['file_size_config'],\n annotations.get(\"starlingx.io/core_max_size\"))\n self.assertEqual(parsed_annotations['file_compression_config'],\n annotations.get(\"starlingx.io/core_compression\"))\n self.assertEqual(parsed_annotations['max_use_config'],\n annotations.get(\"starlingx.io/core_max_used\"))\n self.assertEqual(parsed_annotations['keep_free_config'],\n annotations.get(\"starlingx.io/core_min_free\"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that transform ids work correctly
|
def test_transform_ids(self):
transforms = adapter.SFFTransformList()
matrix = adapter.SFFTransformationMatrix(rows=3, cols=3, data=' '.join(map(_str, range(9))))
transforms.append(matrix)
transforms2 = adapter.SFFTransformList()
matrix2 = adapter.SFFTransformationMatrix(rows=3, cols=3, data=' '.join(map(_str, range(9))))
transforms2.append(matrix2)
self.assertIsNotNone(transforms[0].id)
self.assertEqual(transforms[0].id, transforms2[0].id)
|
[
"def test_transform_ids(self):\n transforms = schema.SFFTransformList()\n matrix = schema.SFFTransformationMatrix(rows=3, cols=3, data=' '.join(map(str, range(9))))\n transforms.add_transform(matrix)\n\n transforms2 = schema.SFFTransformList()\n matrix2 = schema.SFFTransformationMatrix(rows=3, cols=3, data=' '.join(map(str, range(9))))\n transforms2.add_transform(matrix2)\n\n self.assertIsNotNone(transforms[0].id)\n self.assertEqual(transforms[0].id, transforms2[0].id)",
"def test_snippets_to_ids():\n\tsnippets = [['sentence', 'one'], ['sentence'], ['two']]\n\tresult = (([12205, 68, 0], [12205, 0, 0]), (2, 1))\n\tassert lstm.snippets_to_ids(snippets, 3, 2) == result\n\n\tsnippets = [['sentence', 'three']]\n\tresult = (([12205, 98, 0], [0, 0, 0]), (2, 0))\n\tassert lstm.snippets_to_ids(snippets, 3, 2) == result",
"def test_mousegenes_id_get(self):\n pass",
"def test_convert_region_by_id(self):\n\n region = get_region_by_id(52)\n \n regions_ids = (52, 583,66)\n \n for region_id in regions_ids:\n get_region_by_id(region_id)\n convert_region(region_id)\n \n \n vs_region = convert_region(52)\n region.set_obj()\n self.assertEqual(region.obj['name'], 'Europe')\n self.assertEqual(region.obj['name'], vs_region.name)\n \n #regions_ids = (22, 2218,844,2218,336,52, 583, 328,298,52, 301, 300,74,72,70, 106, 16,1,2, 3, 356, 355, 274, 273, )\n \n #for region_id in regions_ids:\n #get_region_by_id(region_id)\n #convert_region(region_id)\n\n #return True\n #data = serializers.serialize(\"json\", Region.objects.all())\n #f = open('vegbasketapp/content/fixtures/region_52.json', 'w')\n #f.write(data)\n #f.close()\n \n #data = serializers.serialize(\"json\", VeggieSailorRegion.objects.all())\n #f = open('vegbasketapp/content/fixtures/vs_region_source_52.json', 'w')\n #f.write(data)\n #f.close() ",
"def test_greenalgas_id_get(self):\n pass",
"def test_data_source_postgre_sqls_id_replace_post(self):\n pass",
"def test_portals_id_replace_post(self):\n pass",
"def test_create_with_custom_id(self):\n id = 5000\n link = Link.objects.create(id=id, url='http://www.python.org')\n self.assertEqual(link.to_base62(), base62.from_decimal(id))",
"def test_translate_identifier(test_seqrepo_access):\n expected = ([\"ga4gh:SQ.ijXOSP3XSsuLWZhXQ7_TJ5JXu4RJO6VT\"], None)\n resp = test_seqrepo_access.translate_identifier(\n \"NM_152263.3\", target_namespaces=\"ga4gh\")\n assert resp == expected\n\n resp = test_seqrepo_access.translate_identifier(\n \"refseq:NM_152263.3\", target_namespaces=\"ga4gh\")\n assert resp == expected\n\n resp = test_seqrepo_access.translate_identifier(\"refseq:NM_152263.3\")\n assert len(resp[0]) > 0\n assert resp[1] is None\n assert expected[0][0] in resp[0]\n\n resp = test_seqrepo_access.translate_identifier(\"GRCh38:2\")\n assert len(resp[0]) > 0\n assert resp[1] is None\n assert \"refseq:NC_000002.12\" in resp[0]\n\n resp = test_seqrepo_access.translate_identifier(\"NC_000002.12\")\n assert len(resp[0]) > 0\n assert resp[1] is None\n assert \"refseq:NC_000002.12\" in resp[0]\n\n resp = test_seqrepo_access.translate_identifier(\"refseq_152263.3\")\n assert resp == ([], \"SeqRepo unable to get translated identifiers for\"\n \" refseq_152263.3\")",
"def test_unsignedID(self):\n foo = object()\n bar = object()\n\n # A fake object identity mapping\n objects = {foo: 17, bar: -73}\n def fakeId(obj):\n return objects[obj]\n\n util.setIDFunction(fakeId)\n\n self.assertEqual(util.unsignedID(foo), 17)\n self.assertEqual(util.unsignedID(bar), (sys.maxsize + 1) * 2 - 73)",
"def test_musicals_id_get(self):\n pass",
"def test_that_token_to_id_is_correct(self):\n token_to_id = CodeClassifier.map_tokens_to_ids(self.data, 0)\n expected_tokens = [\n 'and', 'UNK', '%', 'for', ')', '(', '+', 'V', 'else', '==', '0',\n '3', '5', '1000', 'in', 'print', ':', '=', 'or', '+=', 'if']\n self.assertListEqual(token_to_id.keys(), expected_tokens)",
"def regularize_ids(df, replacer):\n return df.replace({'id': replacer})",
"def test_id(self):\n\n self.assertEqual(self.r1.id, 1)\n self.assertEqual(self.r2.id, 2)\n self.assertEqual(self.r3.id, 3)\n self.assertEqual(self.r4.id, 9)",
"def test_transform_simple(self, dataset, preprocessor, bert):\n (actual_processed_dataset, actual_encoded_mentions, actual_encoded_mentions_split_sizes,\n actual_targets, actual_targets_split_sizes) = \\\n preprocessor.transform(dataset, bert)\n\n # TODO 1 Example should include corefs\n expected_processed_dataset = {\n 'train': {\n 'WH_train_0': {\n 'mentions': [[]],\n 'query': \"participant_of juan rossell\",\n 'candidate_indices': {\n '1996 summer olympics': [],\n 'olympic games': [],\n 'sport': [],\n }\n },\n 'WH_train_1': {\n 'mentions': [\n [\n {'text': 'english', 'corefs': []},\n {'text': 'spanish', 'corefs': []},\n ],\n [\n {'text': 'nahuatl', 'corefs': []},\n {'text': 'spanish', 'corefs': []},\n ]\n ],\n 'query': \"languages_spoken_or_written john osteen\",\n 'candidate_indices': {\n 'english': [0],\n 'greek': [],\n 'koine greek': [],\n 'nahuatl': [2],\n 'spanish': [1, 3],\n }\n }\n }\n }\n expected_encoded_mentions_split_sizes = {'train': [0, 4]}\n expected_targets = torch.tensor([1, 0, 0, 1, 0, 0, 0, 0])\n expected_targets_split_sizes = {'train': [3, 5]}\n\n assert expected_processed_dataset == actual_processed_dataset\n # 4 because there are four mentions and 768 b/c it is the size of BERT encodings\n assert actual_encoded_mentions['train'].shape == (4, 768)\n assert expected_encoded_mentions_split_sizes == actual_encoded_mentions_split_sizes\n assert torch.equal(expected_targets, actual_targets['train'])\n assert expected_targets_split_sizes, actual_targets_split_sizes['train']",
"def test_query_ids(self):\n data_values = {\n \"object_name\": \"Program\",\n \"type\": \"values\",\n \"filters\": {\n \"expression\": {\n \"left\": \"title\",\n \"op\": {\"name\": \"~\"},\n \"right\": \"Cat ipsum\",\n },\n },\n }\n programs_values = self._get_first_result_set(data_values, \"Program\")\n\n data_ids = {\n \"object_name\": \"Program\",\n \"type\": \"ids\",\n \"filters\": {\n \"expression\": {\n \"left\": \"title\",\n \"op\": {\"name\": \"~\"},\n \"right\": \"Cat ipsum\",\n },\n },\n }\n programs_ids = self._get_first_result_set(data_ids, \"Program\")\n\n self.assertEqual(\n set(obj.get(\"id\") for obj in programs_values[\"values\"]),\n set(programs_ids[\"ids\"]),\n )",
"def test_logical_ids(self):\r\n\r\n\r\n # Empty our resource\r\n SourceEntity.objects.filter(resource=self.resource).delete()\r\n\r\n # Make sure that we have no suggestions to begin with\r\n self.assertEqual(Suggestion.objects.filter(source_entity__in=\r\n SourceEntity.objects.filter(resource=self.resource).values('id')).count(), 0)\r\n\r\n # Import file with two senteces\r\n handler = POHandler('%s/logical_ids/tests.pot' %\r\n os.path.split(__file__)[0])\r\n handler.bind_resource(self.resource)\r\n handler.set_language(self.resource.source_language)\r\n handler.parse_file(is_source=True)\r\n handler.save2db(is_source=True)\r\n\r\n # import pt_BR translation\r\n handler = POHandler('%s/logical_ids/pt_BR.po' %\r\n os.path.split(__file__)[0])\r\n handler.bind_resource(self.resource)\r\n handler.set_language(self.language)\r\n handler.parse_file()\r\n handler.save2db()\r\n\r\n # Make sure that we have all translations in the db\r\n self.assertEqual(Translation.objects.filter(source_entity__in=\r\n SourceEntity.objects.filter(resource=self.resource).values('id')).count(), 2)\r\n\r\n source = SourceEntity.objects.get(resource=self.resource)\r\n en_trans = Translation.objects.get(source_entity__resource=self.resource,\r\n language = self.resource.source_language)\r\n pt_trans = Translation.objects.get(source_entity__resource=self.resource,\r\n language = self.language)\r\n\r\n # Check to see that the correct strings appear as the translations and\r\n # not the logical id\r\n self.assertEqual(en_trans.string, \"Hello, World!\")\r\n self.assertEqual(pt_trans.string, \"Holas, Amigos!\")\r\n self.assertEqual(source.string, \"source_1\")",
"def test_patch_obj_id_get(self):\n pass",
"def test_transform(self):\r\n self.assert_(self.object._transform([]) == {})\r\n records = self._get_records(5, keyspace=\"eggs\", column_family=\"bacon\")\r\n out = self.object._transform(records)\r\n self.assert_(len(out) == len(records))\r\n for record in records:\r\n self.assert_(record.key.key in out)\r\n self.assert_(out[record.key.key] is record)\r\n\r\n for key in out:\r\n self.assert_(key == out[key].key.key)",
"def test_id_generated():\n msg = Message.parse_obj({\"@type\": TEST_TYPE})\n assert msg.type == TEST_TYPE\n assert msg.id is not None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Export to an XML (.sff) file
|
def test_export_sff(self):
temp_file = tempfile.NamedTemporaryFile()
self.segmentation.export(temp_file.name + u'.sff')
# assertions
with open(temp_file.name + u'.sff') as f:
self.assertEqual(f.readline(), u'<?xml version="1.0" encoding="UTF-8"?>\n')
|
[
"def write_toXMLfile(self):\n sfbxml = self.sdict['sfbxml']\n self._make_sfbxmlfile(sfbxml)",
"def test_export_xml_to_file(self):\n pass",
"def exportXML(self):\r\n encoding:str = self.encodingVariable.get()\r\n\r\n try:\r\n self.filePath:str = asksaveasfilename(\r\n defaultextension = \".xml\",\r\n filetypes = [(\"XML file\", \"*.xml\")],\r\n initialfile = \"output.xml\")\r\n self.exporter.exportAsXML(self.filePath, self.data, encoding)\r\n showinfo(\"success!\",\r\n \"Your XML file is sucessfully saved!\")\r\n except ValueError as valErr:\r\n showerror(\"ERROR!\",\r\n valErr)\r\n except TypeError:\r\n showerror(\"ERROR!\",\r\n \"Invalid tagname \\'nan\\'\")",
"def exportXML(self, xml_file):\n\t\tself.makeTree()\n\t\tif type(xml_file) is str or type(xml_file) is unicode:\n\t\t\txml_file = open(xml_file,'w')\n\t\ttry:\n\t\t\txml_file.write( etree.tostring(self.tree))\n\t\tfinally:\n\t\t\txml_file.close()",
"def test_export_xml(self):\n pass",
"def exportToFO(self, fo_file, xslt_file=None):\n\t\tself.makeTree()\n\n\t\tif xslt_file is None:\n\t\t\txslt_file = 'cards-2x2.xsl' #FIXME, don't hard-code file paths\n\n\t\txslt_tree = etree.XSLT(etree.parse(xslt_file))\n\t\ttrans_tree = xslt_tree(self.tree)\n\t\tclose_file = False\n\n\t\tif type(fo_file) is str or type(fo_file) is unicode:\n\t\t\tfo_file = open(fo_file, 'w')\n\t\t\tclose_file = True\n\n\t\ttry:\n\t\t\tfo_file.write( etree.tostring(trans_tree) )\n\t\tfinally:\n\t\t\tif close_file:\n\t\t\t\tfo_file.close()",
"def xml_results_to_file_path(self, xml_results):\n try:\n xml_out = xml.dom.minidom.parseString(xml_results)\n xml_pretty = xml_out.toprettyxml()\n f = open(self.export_xml_to_file_path, \"w\")\n f.write(xml_pretty)\n f.close()\n except BaseException as err:\n raise FSMBaseException(msg=\"XML Failed to write to file: \" + str(self.export_xml_to_file_path) +\n \"| Error: \" + str(err))",
"def onActionExportAsXMLTriggered(self):\n fileName = self.saveFileDialog(\"Export As XML\" , fileType=\"XML\", fileExtension=\"xml\")\n if fileName:\n try:\n serializer = StudentXMLSerializer()\n students = self.dao.find_all()\n serializer.exportAsXMLToFile(students, fileName)\n QMessageBox.information(self, \"<<Information>>\", \"Exported As XML successfully.\")\n\n except Exception as err:\n QMessageBox.critical(self, \"<<Error>>\", str(err))\n\n\n else:\n QMessageBox.critical(self, \"<<Error>>\", \"No fileName was given.\")",
"def schrijf_xml(data):\n open('filmlijst.xml', 'w')\n bestand = codecs.open('filmlijst.xml', \"w\", \"utf-8\")\n bestand.write(str(data))\n bestand.close()",
"def escribir(self):\n tree.write('metadata1.xml')\n bs = BeautifulSoup(open('metadata1.xml'), 'xml')\n archivo1 = open('metadata1.xml', \"w+\")\n archivo1.write(bs.prettify())",
"def dumpTreeXMLToFile(tree, output_file):\n\n value = toString(tree).rstrip()\n output_file.write(value)",
"def write_xml_file(self):\n for item in self.xml_lines:\n self.xml_file.write(\"{}\\n\".format(item))",
"def write_xml_changes(self, outfile):\n raise NotImplementedError",
"def save_to_ftml(self, filename=None):\n if not filename:\n raise ValueError(\"filename to save flight track cannot be None or empty\")\n\n self.filename = filename\n self.name = fs.path.basename(filename.replace(\".ftml\", \"\").strip())\n doc = self.get_xml_doc()\n dirname, name = fs.path.split(self.filename)\n file_dir = fs.open_fs(dirname)\n with file_dir.open(name, 'w') as file_object:\n doc.writexml(file_object, indent=\" \", addindent=\" \", newl=\"\\n\", encoding=\"utf-8\")\n file_dir.close()",
"def test_export_xml_in_job(self):\n pass",
"def exportXML(ctrl, pwa, uid, filename=\"modelDescription\"):\n f = open(filename+\".xml\", 'w')\n\n f.write('<fmiModelDescription\\n')\n f.write(' description=\"Discrete Time Controller\"\\n')\n f.write(' fmiVersion=\"1.5\"\\n')\n f.write(' guid=\"{' + str(uid)+'}\"\\n')\n f.write(' modelName=\"TuLiPFMU\">\\n\\n')\n\n f.write('<CoSimulation modelIdentifier=\"TuLiPFMU\" \\\n canHandleVariableCommunicationStepSize=\"true\" \\\n canHandleEvents=\"true\" \\\n canProvideMaxStepSize=\"true\"/>\\n')\n\n f.write('<ModelVariables>\\n')\n # number of state\n n = pwa.pwa.B.shape[0]\n # number of control\n m = pwa.pwa.B.shape[1]\n # output real variable: the control output\n for i in range(0, m):\n f.write('<ScalarVariable name=\"u'+str(i)+'\" \\\n valueReference=\"'+str(i)+'\" \\\n description=\"output\" \\\n causality=\"output\">')\n f.write('<Real/>\\n')\n f.write('</ScalarVariable>\\n')\n\n # input real variable: the current state of the system\n for i in range(0, n):\n f.write('<ScalarVariable name=\"y'+str(i)+'\" \\\n valueReference=\"'+str(i+m)+'\" \\\n description=\"input\" \\\n causality=\"input\">')\n f.write('<Real/>\\n')\n f.write('</ScalarVariable>\\n')\n\n # input discrete variable\n i = 0\n for inputname, inputset in ctrl.inputs.items():\n f.write('<ScalarVariable name=\"'+inputname+'\" \\\n valueReference=\"'+str(i+m+n)+'\" \\\n description=\"input\" \\\n causality=\"input\">')\n f.write('<Integer/>')\n f.write('</ScalarVariable>')\n i = i+1\n\n f.write('</ModelVariables>\\n')\n f.write('</fmiModelDescription>\\n')\n f.close()",
"def write_xml(tree, filename=None, path=\"output/\", ):\n if filename is None:\n filename = \"scenario_output_\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".xml\"\n if not filename.endswith(\".xml\"):\n filename += \".xml\"\n if not path.endswith(\"/\"):\n path += \"/\"\n if path.startswith(\"/\"):\n path = path[1:]\n\n create_output_folder(path)\n write_tree(tree, path, filename)",
"def output_skr_xml(\n skr: Response, output_filename: Optional[str], log_contents: bool = False\n) -> None:\n xml = skr_to_xml(skr)\n if output_filename:\n xml_bytes = xml.encode()\n with open(output_filename, \"wb\") as fd:\n fd.write(xml_bytes)\n logger.info(\n \"Wrote SKR to file %s %s\", output_filename, checksum_bytes2str(xml_bytes)\n )\n if log_contents:\n log_file_contents(output_filename, xml_bytes, logger.getChild(\"skr\"))\n else:\n print(xml)",
"def convert(filename, fd_out):\n logger = logging.getLogger('pyx12')\n wr = pyx12.x12file.X12Writer(fd_out, '~', '*', ':', '\\n', '^')\n parser = et.XMLParser(encoding=\"utf-8\")\n doc = et.parse(filename, parser=parser)\n for node in doc.iter():\n if node.tag == 'seg':\n wr.Write(get_segment(node))\n return True",
"def to_testlink_xml_file(testsuite, path_to_xml):\n content = to_testlink_xml_content(testsuite)\n if exists(path_to_xml):\n os.remove(path_to_xml)\n\n with open(path_to_xml, 'w', encoding='utf-8') as f:\n f.write(prettify_xml(content))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we can merge annotation from one to another
|
def test_merge_annotation(self):
seg1_fn = os.path.join(TEST_DATA_PATH, u'sff', u'v0.8', u'annotated_emd_1014.json')
seg2_fn = os.path.join(TEST_DATA_PATH, u'sff', u'v0.8', u'emd_1014.json')
seg1 = adapter.SFFSegmentation.from_file(seg1_fn)
seg2 = adapter.SFFSegmentation.from_file(seg2_fn)
# perform the notes merge
seg1.merge_annotation(seg2)
self.assertEqual(seg1.name, seg2.name)
self.assertEqual(seg1.software_list, seg2.software_list)
self.assertEqual(seg1.details, seg2.details)
self.assertEqual(seg1.global_external_references, seg2.global_external_references)
for segment in seg1.segment_list:
other_segment = seg2.segment_list.get_by_id(segment.id)
self.assertEqual(segment.biological_annotation.external_references,
other_segment.biological_annotation.external_references)
self.assertNotEqual(segment.colour, other_segment.colour)
# test that we can merge colours too!
seg1.merge_annotation(seg2, include_colour=True)
for segment in seg1.segment_list:
other_segment = seg2.segment_list.get_by_id(segment.id)
self.assertEqual(segment.biological_annotation.external_references,
other_segment.biological_annotation.external_references)
self.assertEqual(segment.colour, other_segment.colour)
|
[
"def fix_annotations(nanopub: Nanopub) -> Nanopub:\n\n if \"nanopub\" in nanopub:\n for idx, anno in enumerate(nanopub[\"nanopub\"][\"annotations\"]):\n update_bel_annotation(anno)\n\n nanopub[\"nanopub\"][\"annotations\"][idx][\"type\"] = anno[\"type\"]\n nanopub[\"nanopub\"][\"annotations\"][idx][\"id\"] = anno.get(\"id\", None)\n nanopub[\"nanopub\"][\"annotations\"][idx][\"label\"] = anno[\"label\"]\n\n return nanopub",
"def update_metadata(source, target):\n target.namespace_url.update(source.namespace_url)\n target.namespace_pattern.update(source.namespace_pattern)\n target.annotation_url.update(source.annotation_url)\n target.annotation_pattern.update(source.annotation_pattern)\n target.annotation_list.update(source.annotation_list)",
"def test_annotations(defined_object, expected):\n assert getattr(defined_object, \"__annotations__\") == expected",
"def test_create_annotations(self):\n segmentation = adapter.SFFSegmentation() # annotation\n segmentation.name = u\"name\"\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=u\"Software\",\n version=u\"1.0.9\",\n processing_details=u\"Processing details\"\n )\n )\n segmentation.details = u\"Details\"\n # global external references\n segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'one',\n url=u'two',\n accession=u'three'\n )\n )\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'four',\n url=u'five',\n accession=u'six'\n )\n )\n segmentation.segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n biol_ann = adapter.SFFBiologicalAnnotation()\n biol_ann.name = u\"Segment1\"\n biol_ann.description = u\"Some description\"\n # external refs\n biol_ann.external_references = adapter.SFFExternalReferenceList()\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sldjflj\",\n accession=u\"doieaik\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sljd;f\",\n accession=u\"20ijalf\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"lsdjlsd\",\n url=u\"lsjfd;sd\",\n accession=u\"23ijlsdjf\"\n )\n )\n biol_ann.number_of_instances = 30\n segment.biological_annotation = biol_ann\n # colour\n segment.colour = adapter.SFFRGBA(\n red=1,\n green=0,\n blue=1,\n alpha=0\n )\n segmentation.segments.append(segment)\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.name, u'name')\n self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set\n software = segmentation.software_list[0]\n self.assertEqual(software.name, u\"Software\")\n self.assertEqual(software.version, u\"1.0.9\")\n self.assertEqual(software.processing_details, u\"Processing details\")\n self.assertEqual(segmentation.details, u\"Details\")\n # global external references\n self.assertEqual(segmentation.global_external_references[0].resource, u'one')\n self.assertEqual(segmentation.global_external_references[0].url, u'two')\n self.assertEqual(segmentation.global_external_references[0].accession, u'three')\n self.assertEqual(segmentation.global_external_references[1].resource, u'four')\n self.assertEqual(segmentation.global_external_references[1].url, u'five')\n self.assertEqual(segmentation.global_external_references[1].accession, u'six')\n # segment: biological_annotation\n self.assertEqual(segment.biological_annotation.name, u\"Segment1\")\n self.assertEqual(segment.biological_annotation.description, u\"Some description\")\n self.assertEqual(len(segment.biological_annotation.external_references), 3)\n self.assertEqual(segment.biological_annotation.external_references[0].resource, u\"sldjflj\")\n self.assertEqual(segment.biological_annotation.external_references[0].accession, u\"doieaik\")\n self.assertEqual(segment.biological_annotation.external_references[1].resource, u\"sljd;f\")\n self.assertEqual(segment.biological_annotation.external_references[1].accession, u\"20ijalf\")\n self.assertEqual(segment.biological_annotation.external_references[2].resource, u\"lsdjlsd\")\n self.assertEqual(segment.biological_annotation.external_references[2].url, u\"lsjfd;sd\")\n self.assertEqual(segment.biological_annotation.external_references[2].accession, u\"23ijlsdjf\")\n self.assertEqual(segment.biological_annotation.number_of_instances, 30)\n # colour\n self.assertEqual(segment.colour.value, (1, 0, 1, 0))",
"def test_correct_annotations(self):\n for doc in self.prj.documents:\n if doc.id == 26608:\n assert len(doc.annotations(self.prj.get_label_by_id(579))) == 1",
"def compareAnnotations2(old, new, output, args={}):\n result = {}\n global no_change, UTR_added, yardSale, exonChange, modelChangeNotProt, dropped, added, total_transcripts, total_genes\n (\n no_change,\n UTR_added,\n yardSale,\n exonChange,\n modelChangeNotProt,\n dropped,\n added,\n total_transcripts,\n total_genes,\n ) = (0,) * 9\n lib.log.info(\n \"Comparing original annotation to updated\\n original: {}\\n updated: {}\".format(\n old, new\n )\n )\n if args.gff and args.fasta:\n oldInter, oldGenes = gff2interlap(old, args.fasta)\n else:\n oldInter, oldGenes = gbk2interlap(old)\n newInter, newGenes = gff2interlap(new, args.fasta)\n # do the simple stuff first, find models that were deleted\n for contig in oldInter:\n for gene in oldInter[contig]:\n if not gene in newInter[contig]: # these models are removed\n dropped += 1\n if not gene[2] in oldGenes:\n continue\n # populate output dictionary with results\n if not gene[2] in result:\n # dropped model has AED of 1.000\n cdsAED = \"1.000\"\n exonAED = \"1.000\"\n result[gene[2]] = {\n \"contig\": oldGenes[gene[2]][\"contig\"],\n \"old_num_transcripts\": len(oldGenes[gene[2]][\"ids\"]),\n \"old_location\": oldGenes[gene[2]][\"location\"],\n \"num_transcripts\": len(oldGenes[gene[2]][\"ids\"]),\n \"strand\": oldGenes[gene[2]][\"strand\"],\n \"mRNA\": oldGenes[gene[2]][\"mRNA\"],\n \"location\": oldGenes[gene[2]][\"location\"],\n \"CDS\": oldGenes[gene[2]][\"CDS\"],\n \"message\": \"gene model removed\",\n \"cdsAED\": cdsAED,\n \"exonAED\": exonAED,\n \"transcript_id\": oldGenes[gene[2]][\"ids\"],\n \"pident\": [],\n \"protein_id\": oldGenes[gene[2]][\"ids\"],\n \"seq\": oldGenes[gene[2]][\"protein\"],\n }\n\n # now go through the updated annotation, comparing to old annot\n for contig in newInter:\n for gene in newInter[contig]:\n # means this is a new model, so add it\n if not gene in oldInter[contig]:\n added += 1\n total_genes += 1\n if not gene[2] in newGenes:\n continue\n total_transcripts += len(newGenes[gene[2]][\"ids\"])\n if not gene[2] in result:\n result[gene[2]] = {\n \"contig\": newGenes[gene[2]][\"contig\"],\n \"old_num_transcripts\": 0,\n \"old_location\": newGenes[gene[2]][\"location\"],\n \"num_transcripts\": len(newGenes[gene[2]][\"ids\"]),\n \"strand\": newGenes[gene[2]][\"strand\"],\n \"mRNA\": newGenes[gene[2]][\"mRNA\"],\n \"location\": newGenes[gene[2]][\"location\"],\n \"CDS\": newGenes[gene[2]][\"CDS\"],\n \"message\": \"new gene model\",\n \"cdsAED\": \"0.000\",\n \"exonAED\": \"0.000\",\n \"transcript_id\": newGenes[gene[2]][\"ids\"],\n \"protein_id\": newGenes[gene[2]][\"ids\"],\n \"seq\": newGenes[gene[2]][\"protein\"],\n \"pident\": [],\n }\n else: # means this is existing model, and need to do some comparisons\n hitList = list(oldInter[contig].find(gene))\n # there might be some overlapping transcripts, so enforce locus name\n hit = None\n for z in hitList:\n if gene[2] == z[2]:\n hit = z\n if not hit:\n # there is no real hit, so this a new gene\n total_transcripts += len(newGenes[gene[2]][\"ids\"])\n added += 1\n total_genes += 1\n if not gene[2] in result:\n result[gene[2]] = {\n \"contig\": newGenes[gene[2]][\"contig\"],\n \"old_num_transcripts\": 0,\n \"old_location\": newGenes[gene[2]][\"location\"],\n \"num_transcripts\": len(newGenes[gene[2]][\"ids\"]),\n \"strand\": newGenes[gene[2]][\"strand\"],\n \"mRNA\": newGenes[gene[2]][\"mRNA\"],\n \"location\": newGenes[gene[2]][\"location\"],\n \"CDS\": newGenes[gene[2]][\"CDS\"],\n \"message\": \"new gene model\",\n \"cdsAED\": \"0.000\",\n \"exonAED\": \"0.000\",\n \"transcript_id\": newGenes[gene[2]][\"ids\"],\n \"protein_id\": newGenes[gene[2]][\"ids\"],\n \"seq\": newGenes[gene[2]][\"protein\"],\n \"pident\": [],\n }\n else:\n # since we may have multiple transcripts from hit as well as new annotation we need to be aware of that\n # also, tRNA annotations do not exist in Proteins dictionary, so process them differently\n # get the reference hits, pull out CDS and mRNA for pairwiseAED calculation\n total_genes += 1\n total_transcripts += len(newGenes[gene[2]][\"ids\"])\n\n # get the old annotation\n hitInfo = oldGenes.get(gene[2])\n\n # calculate AED\n exonAED = pairwiseAED(newGenes[gene[2]][\"mRNA\"], hitInfo[\"mRNA\"])\n if (\n newGenes[gene[2]][\"type\"] == \"mRNA\"\n and hitInfo[\"type\"] == \"mRNA\"\n ):\n cdsAED = pairwiseAED(newGenes[gene[2]][\"CDS\"], hitInfo[\"CDS\"])\n else:\n cdsAED = \"0.000\"\n\n # check translation, to deal with multiple transcripts, lets loop through new\n protMatches = []\n if (\n newGenes[gene[2]][\"type\"] == \"mRNA\"\n and hitInfo[\"type\"] == \"mRNA\"\n ):\n for i in range(0, len(newGenes[gene[2]][\"ids\"])):\n protMatch = None\n for y in range(0, len(oldGenes[gene[2]][\"ids\"])):\n pident = pairwiseAlign(\n newGenes[gene[2]][\"protein\"][i],\n oldGenes[gene[2]][\"protein\"][y],\n )\n if not protMatch:\n protMatch = pident\n else:\n if pident > protMatch:\n protMatch = pident\n protMatches.append(protMatch)\n # summarize UTRs for mRNA features\n if newGenes[gene[2]][\"type\"] == \"mRNA\":\n try:\n UTRs = findUTRs(\n newGenes[gene[2]][\"CDS\"],\n newGenes[gene[2]][\"mRNA\"],\n newGenes[gene[2]][\"strand\"],\n )\n except:\n UTRs = []\n lib.log.debug(\n \"UTR detection failed for {}: CDS={} mRNA={} strand={}\".format(\n newGenes[gene[2]][\"ids\"],\n newGenes[gene[2]][\"CDS\"],\n newGenes[gene[2]][\"mRNA\"],\n newGenes[gene[2]][\"strand\"],\n )\n )\n else:\n UTRs = []\n\n # structured comments/counts for gene models\n msg, no_change, UTR_added, yardSale, exonChange = message(\n newGenes[gene[2]][\"location\"],\n oldGenes[gene[2]][\"location\"],\n cdsAED,\n exonAED,\n protMatches,\n UTRs,\n no_change,\n UTR_added,\n yardSale,\n exonChange,\n )\n\n if not gene[2] in result:\n result[gene[2]] = {\n \"contig\": newGenes[gene[2]][\"contig\"],\n \"old_num_transcripts\": len(oldGenes[gene[2]][\"ids\"]),\n \"old_location\": oldGenes[gene[2]][\"location\"],\n \"num_transcripts\": len(newGenes[gene[2]][\"ids\"]),\n \"strand\": newGenes[gene[2]][\"strand\"],\n \"mRNA\": newGenes[gene[2]][\"mRNA\"],\n \"location\": newGenes[gene[2]][\"location\"],\n \"CDS\": newGenes[gene[2]][\"CDS\"],\n \"message\": msg,\n \"cdsAED\": cdsAED,\n \"exonAED\": exonAED,\n \"transcript_id\": newGenes[gene[2]][\"ids\"],\n \"protein_id\": newGenes[gene[2]][\"ids\"],\n \"seq\": newGenes[gene[2]][\"protein\"],\n \"pident\": protMatches,\n }\n\n total_cdsAED = []\n total_exonAED = []\n with open(output, \"w\") as out:\n out.write(\n \"Locus_tag\\tOrig_Location\\tOrig_Num_Transcripts\\tContig:start-end\\tStrand\\tGene_Length\\tNum_Transcripts\\tmRNA_AED\\tCDS_AED\\tDescription\\n\"\n )\n for k, v in natsorted(list(result.items())):\n start = str(v[\"location\"][0])\n end = str(v[\"location\"][1])\n GeneLength = int(end) - int(start)\n total_cdsAED.append(float(v[\"cdsAED\"]))\n total_exonAED.append(float(v[\"exonAED\"]))\n out.write(\n \"{:}\\t{:}:{:}-{:}\\t{:}\\t{:}:{:}-{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\\n\".format(\n k,\n v[\"contig\"],\n v[\"old_location\"][0],\n v[\"old_location\"][1],\n v[\"old_num_transcripts\"],\n v[\"contig\"],\n start,\n end,\n v[\"strand\"],\n GeneLength,\n v[\"num_transcripts\"],\n v[\"exonAED\"],\n v[\"cdsAED\"],\n v[\"message\"],\n )\n )\n Avg_cdsAED = sum(total_cdsAED) / float(len(total_cdsAED))\n Avg_exonAED = sum(total_exonAED) / float(len(total_exonAED))\n # output some simple stats to cmd line\n lib.log.info(\n \"Updated annotation complete:\\n\\\n-------------------------------------------------------\\n\\\nTotal Gene Models:\\t{:,}\\n\\\nTotal transcripts:\\t{:,}\\n\\\nNew Gene Models:\\t{:,}\\n\\\nNo Change:\\t\\t{:,}\\n\\\nUpdate UTRs:\\t\\t{:,}\\n\\\nExons Changed:\\t\\t{:,}\\n\\\nExons/CDS Changed:\\t{:,}\\n\\\nDropped Models:\\t\\t{:,}\\n\\\nCDS AED:\\t\\t{:.3f}\\n\\\nmRNA AED:\\t\\t{:.3f}\\n\\\n-------------------------------------------------------\".format(\n total_genes,\n total_transcripts,\n added,\n no_change,\n UTR_added,\n exonChange,\n yardSale,\n dropped,\n Avg_cdsAED,\n Avg_exonAED,\n )\n )",
"def transfer(source, dest):\n\n s_ann = annotations(source)\n d_ann = annotations(dest)\n d_ann.clear()\n\n if s_ann:\n\td_ann.update(\n\t (d_name, s_ann[s_name])\n\t for (s_name, d_name) in zip_annotatable(source, dest)\n\t if s_name in s_ann\n\t)\n\n return dest",
"def test_document_add_new_annotation(self):\n doc = self.prj.labels[0].documents[5] # the latest document\n # we create a revised annotations, as only revised annotation can be deleted\n # if we would delete an unrevised annotation, we would provide feedback and thereby keep the\n # annotation as \"wrong\" but \"revised\"\n assert len(doc.annotations(use_correct=False)) == 13\n label = self.prj.labels[0]\n new_anno = Annotation(\n start_offset=225,\n end_offset=237,\n label=label.id,\n template_id=label.templates[0].id, # hand selected document section label\n revised=True,\n is_correct=True,\n accuracy=0.98765431,\n document=doc,\n )\n # make sure document annotations are updated too\n assert len(doc.annotations(use_correct=False)) == 14\n assert len(self.prj.labels[0].correct_annotations) == 27\n assert new_anno.id is None\n new_anno.save()\n assert new_anno.id\n new_anno.delete()\n assert new_anno.id is None\n assert len(doc.annotations(use_correct=False)) == 13\n assert len(self.prj.labels[0].correct_annotations) == 26",
"def merge_coco_annotations(existing_coco_annotations, new_coco_annotations):\n\n # Concatenate category sections\n for cat_dict in new_coco_annotations[\"categories\"]:\n if cat_dict not in existing_coco_annotations[\"categories\"]:\n existing_coco_annotations[\"categories\"].append(cat_dict)\n\n # Concatenate images sections\n image_id_offset = max([image[\"id\"] for image in existing_coco_annotations[\"images\"]]) + 1\n for image in new_coco_annotations[\"images\"]:\n image[\"id\"] += image_id_offset\n existing_coco_annotations[\"images\"].extend(new_coco_annotations[\"images\"])\n\n # Concatenate annotations sections\n if len(existing_coco_annotations[\"annotations\"]) > 0:\n annotation_id_offset = max([annotation[\"id\"] for annotation in existing_coco_annotations[\"annotations\"]]) + 1\n else:\n annotation_id_offset = 0\n for annotation in new_coco_annotations[\"annotations\"]:\n annotation[\"id\"] += annotation_id_offset\n annotation[\"image_id\"] += image_id_offset\n existing_coco_annotations[\"annotations\"].extend(new_coco_annotations[\"annotations\"])\n\n return existing_coco_annotations, image_id_offset",
"def test_get_annotations(self):\n\n itemuri = \"http://localhost:3000/catalog/cooee/items/1-012\"\n docurl = \"http://localhost:3000/documents/cooee/1-012-plain.txt\"\n\n ann = self.api.get_annotations(itemuri)\n \n self.assertIn('alveo:annotations', ann)\n self.assertIn('commonProperties', ann)\n self.assertIn('@context', ann)\n \n self.assertEqual(2, len(ann['alveo:annotations']))\n \n self.assertEqual('dada:TextAnnotation', ann['alveo:annotations'][0]['@type'])\n \n self.assertEqual(docurl, ann['commonProperties']['alveo:annotates'])\n \n ann = self.api.get_annotations(itemuri, {\"user\":\"Steve.Cassidy@mq.edu.au\"})\n ann = self.api.get_annotations(itemuri, {\"priorTo\":datetime.strptime(\"2013-12-20T12:20:00\", '%Y-%m-%dT%I:%M:%S')})\n \n pass",
"def insert_minimal_annotations(annotated_node, annot_type_2_annot):\n for annotation_type, annotation_set in annot_type_2_annot.iteritems():\n if annotation_type != 'name' and annotation_set != '' and annotation_set != []:\n if not isinstance(annotation_type, list):\n annotation_set = [annotation_set]\n for annotation in annotation_set:\n annotation_node = DatabaseGraph.AnnotNode.create(\n ptype=annotation_type, payload=annotation)\n if verbosity > 1:\n log.info('created annotation %s for %s, _id:%s',\n annotation_node, annotated_node, annotated_node.ID)\n else:\n log.debug('created annotation %s for %s, _id:%s',\n annotation_node, annotated_node, annotated_node.ID)\n DatabaseGraph.is_annotated.create(\n annotated_node,\n annotation_node,\n costum_from=annotated_node.ID,\n costum_to='Annotation')",
"def test_annotate_edit_does_not_delete(self):\n self.t(\"add tw-20\")\n\n self.t(\"1 annotate 1st annotation\")\n self.t(\"1 annotate 2nd annotation\")\n\n code, _timestamp1a, err = self.t(\"_get 1.annotations.1.entry\")\n code, _timestamp2a, err = self.t(\"_get 1.annotations.2.entry\")\n\n self.t(\"1 edit\")\n\n code, _timestamp1b, err = self.t(\"_get 1.annotations.1.entry\")\n code, _timestamp2b, err = self.t(\"_get 1.annotations.2.entry\")\n\n self.assertEqual( _timestamp1a, _timestamp1b )\n self.assertEqual( _timestamp2a, _timestamp2b )\n\n code, out, err = self.t(\"info\")\n\n self.assertNotIn(\"Annotation '1st annotation' deleted.\", out)\n self.assertNotIn(\"Annotation '2nd annotation' deleted.\", out)",
"def annotate(args):\n from .annotation.annotation import annotate as anno\n anno(args)",
"def all_annotations(num, test) -> None:\n return None",
"def get_annotations_and_ids(self):\n return self.annotations.copy(), self.annotated_img_ids.copy()",
"def test_get_annotation_base(self, mock_client):\n rc = ResultCollection(None)\n task = TaskFactory()\n motivation = 'foo'\n base = rc._get_annotation_base(task, motivation)\n assert_equal(base, {\n 'type': 'Annotation',\n 'motivation': motivation,\n 'generator': [\n {\n \"id\": flask_app.config.get('GITHUB_REPO'),\n \"type\": \"Software\",\n \"name\": \"LibCrowds\",\n \"homepage\": flask_app.config.get('SPA_SERVER_NAME')\n },\n {\n \"id\": url_for('api.api_task', oid=task.id),\n \"type\": \"Software\"\n }\n ]\n })",
"def _move_annotation(annotation_uri, new_graph, old_graph, request, timestamp):\n # First check permissions\n old_g = generate_graph(CharmeMiddleware.get_store(), old_graph)\n if not is_update_allowed(old_g, annotation_uri, request):\n raise SecurityError((\"You do not have the required permission to \"\n \"update the status of annotation %s\" %\n annotation_uri))\n\n new_g = generate_graph(CharmeMiddleware.get_store(), new_graph)\n # Move the people\n for res in _get_people(old_g, annotation_uri):\n _remove(old_g, res)\n _add(new_g, res)\n # Copy the organization\n for res in _get_organization(old_g, annotation_uri):\n _add(new_g, res)\n # Copy the software\n for res in _get_software(old_g, annotation_uri):\n _add(new_g, res)\n # Move the annotation\n targets = []\n for res in old_g.triples((annotation_uri, None, None)):\n _remove(old_g, res)\n # We are only allowed one annotatedAt per annotation\n if res[1] == URIRef(OA + 'annotatedAt'):\n continue\n if res[1] == URIRef(OA + 'hasTarget'):\n targets.append(res[2])\n _add(new_g, res)\n # Add new annotatedAt\n _add(new_g, ((annotation_uri, URIRef(OA + 'annotatedAt'), timestamp)))\n\n if targets:\n if new_graph == RETIRED:\n message = \"marked as deleted\"\n else:\n message = \"marked as %s\" % new_graph\n\n _mail_followers(annotation_uri, targets, message)\n\n return new_g",
"def _rename_in_doc(source, target, doc):\n matches = [x for x in doc.annotations() if\n anno_id_to_tuple(x.local_id()) == source]\n pretty_source = anno_id_from_tuple(source)\n pretty_target = anno_id_from_tuple(target)\n target_author, target_date = target\n\n def replace_pointer(pointers):\n \"Given annotation id, return copy with s/src/tgt/\"\n return [pretty_target if ptr == pretty_source else ptr\n for ptr in pointers]\n\n if not matches:\n sys.exit(\"No annotations found with id %s\" % pretty_source)\n elif len(matches) > 1:\n sys.exit(\"Huh?! More than one annotation with id %s\" % pretty_source)\n evil_set_id(matches[0], target_author, target_date)\n for anno in doc.relations:\n if anno.span.t1 == pretty_source:\n anno.span.t1 = pretty_target\n if anno.span.t2 == pretty_source:\n anno.span.t2 = pretty_target\n for anno in doc.schemas:\n anno.units = replace_pointer(anno.units)\n anno.relations = replace_pointer(anno.relations)\n anno.schemas = replace_pointer(anno.schemas)",
"def test_annual_attribute_merge():\n out_expected_left = pd.DataFrame(\n {\n \"report_date\": [\n \"2019-12-01\",\n \"2020-10-01\",\n \"2019-01-01\",\n \"2019-06-01\",\n \"2018-07-01\",\n ],\n \"plant_id_eia\": [2, 2, 3, 3, 3],\n \"prime_mover_code\": [\"HY\", \"ST\", \"HY\", \"CT\", \"HY\"],\n \"fuel_consumed_units\": [0.0, 98085.0, 0.0, 4800000.0, 0.0],\n \"plant_name_eia\": [\"Bankhead Dam\", \"Bankhead\", \"Barry\", \"Barry\", \"Barry\"],\n \"utility_id_eia\": [195, 195, 16, 16, 16],\n }\n ).astype({\"report_date\": \"datetime64[ns]\"})\n\n out_left = date_merge(\n left=MONTHLY_GEN_FUEL.copy(),\n right=ANNUAL_PLANTS_UTIL.copy(),\n on=[\"plant_id_eia\"],\n how=\"left\",\n )\n\n assert_frame_equal(out_left, out_expected_left)\n\n out_expected_right = pd.DataFrame(\n {\n \"report_date\": [\n \"2019-12-01\",\n \"2020-10-01\",\n \"2019-01-01\",\n \"2019-06-01\",\n \"2018-07-01\",\n ],\n \"plant_id_eia\": [2, 2, 3, 3, 3],\n \"plant_name_eia\": [\"Bankhead Dam\", \"Bankhead\", \"Barry\", \"Barry\", \"Barry\"],\n \"utility_id_eia\": [195, 195, 16, 16, 16],\n \"prime_mover_code\": [\"HY\", \"ST\", \"HY\", \"CT\", \"HY\"],\n \"fuel_consumed_units\": [0.0, 98085.0, 0.0, 4800000.0, 0.0],\n }\n ).astype({\"report_date\": \"datetime64[ns]\"})\n\n out_right = date_merge(\n left=ANNUAL_PLANTS_UTIL.copy(),\n right=MONTHLY_GEN_FUEL.copy(),\n on=[\"plant_id_eia\"],\n how=\"right\",\n )\n\n assert_frame_equal(out_right, out_expected_right)\n\n out_expected_inner = pd.DataFrame(\n {\n \"report_date\": [\n \"2019-12-01\",\n \"2020-10-01\",\n \"2019-01-01\",\n \"2019-06-01\",\n \"2018-07-01\",\n ],\n \"plant_id_eia\": [2, 2, 3, 3, 3],\n \"prime_mover_code\": [\"HY\", \"ST\", \"HY\", \"CT\", \"HY\"],\n \"fuel_consumed_units\": [0.0, 98085.0, 0.0, 4800000.0, 0.0],\n \"plant_name_eia\": [\"Bankhead Dam\", \"Bankhead\", \"Barry\", \"Barry\", \"Barry\"],\n \"utility_id_eia\": [195, 195, 16, 16, 16],\n }\n ).astype({\"report_date\": \"datetime64[ns]\"})\n\n out_inner = date_merge(\n left=MONTHLY_GEN_FUEL.copy(),\n right=ANNUAL_PLANTS_UTIL.copy(),\n on=[\"plant_id_eia\"],\n how=\"inner\",\n )\n\n assert_frame_equal(out_inner, out_expected_inner)\n\n out_expected_outer = pd.DataFrame(\n {\n \"report_date\": [\n \"2019-12-01\",\n \"2020-10-01\",\n \"2019-01-01\",\n \"2019-06-01\",\n \"2018-07-01\",\n \"2020-01-01\",\n \"2018-01-01\",\n \"2020-01-01\",\n ],\n \"plant_id_eia\": [2, 2, 3, 3, 3, 1, 2, 3],\n \"prime_mover_code\": [\"HY\", \"ST\", \"HY\", \"CT\", \"HY\", None, None, None],\n \"fuel_consumed_units\": [\n 0.0,\n 98085.0,\n 0.0,\n 4800000.0,\n 0.0,\n None,\n None,\n None,\n ],\n \"plant_name_eia\": [\n \"Bankhead Dam\",\n \"Bankhead\",\n \"Barry\",\n \"Barry\",\n \"Barry\",\n \"Sand Point\",\n \"Bankhead Dam\",\n \"Barry\",\n ],\n \"utility_id_eia\": [195, 195, 16, 16, 16, 63560, 195, 16],\n }\n ).astype({\"report_date\": \"datetime64[ns]\"})\n\n out_outer = date_merge(\n left=MONTHLY_GEN_FUEL.copy(),\n right=ANNUAL_PLANTS_UTIL.copy(),\n on=[\"plant_id_eia\"],\n how=\"outer\",\n )\n\n assert_frame_equal(out_outer, out_expected_outer)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create json for zabbix discovery service
|
def createZabbixJson(data):
try:
result = {"data":[]}
for name, value in data.iteritems():
result['data'].append({"{#NAME}":name, "{#IP_STREAM}": value['ip_stream'], "{#RC_PORT}": value['rc_port'] })
return json.dumps(result)
except:
return 0
|
[
"def GetServices(self):\n return json.dumps(SERVICES)",
"def servicesJson():\n # get the services from the database\n dbSession = current_app.config['DBSESSION']\n services = dbSession.query(Service).all()\n\n # create the list of services that will be returned as json\n servicesList = []\n for serv in services:\n servicesList.append(\n {\n 'id': serv.id,\n 'name': serv.name,\n }\n )\n\n return jsonify(servicesList)",
"def create_inventory(self):\n hosts = []\n ansible_ssh_user = os.environ.get('ANSIBLE_SSH_USER')\n ansible_ssh_pass = os.environ.get('ANSIBLE_SSH_PASS')\n\n for i in self.filtered_data:\n ip_address = i['status'][0]['virtual_machine_ip']\n hosts.append(ip_address)\n self.inventory['_meta']['hostvars'][ip_address] = \\\n {'ansible_ssh_port': i['status'][0]['ssh_port'],\n 'ansible_ssh_user': ansible_ssh_user,\n 'ansible_ssh_pass': ansible_ssh_pass,\n 'ansible_connection': 'ssh'}\n\n self.inventory['group']['hosts'] = hosts\n # varss = self._build_vars()\n # self.inventory['vars'] = varss\n print(json.dumps(self.inventory))\n return json.dumps(self.inventory)",
"def quiery_service():\n if not request.json:\n abort(404)\n\n serviceName = request.json.get(\"serviceName\", \"\")\n status = request.json.get(\"status\", \"\")\n\n result_list = util.getConditionalServiceList(serviceName, status)\n return jsonify({\"Quiery service info\": result_list})",
"def json_inventory(nhosts=10):\n return json.dumps(generate_inventory(nhosts), indent=4)",
"def service_any(service):\n service_type = service['type']\n service_port = service['data']['name']\n data = {\n \"type\":service_type,\n \"dst\":service_port\n }\n return data",
"def get_drs_service_info():\n\n reverse_domain_name = reverse_url(url=os.environ[\"HOSTNAME\"])\n\n ret = {\n \"id\": reverse_domain_name,\n \"name\": \"DRS System\",\n \"version\": \"1.0.3\",\n \"type\": {\n \"group\": \"org.ga4gh\",\n \"artifact\": \"drs\",\n \"version\": \"1.0.3\",\n },\n \"organization\": {\n \"name\": \"CTDS\",\n \"url\": \"https://\" + os.environ[\"HOSTNAME\"],\n },\n }\n\n if blueprint.service_info:\n for key, value in blueprint.service_info.items():\n if key in ret:\n if isinstance(value, dict):\n for inner_key, inner_value in value.items():\n ret[key][inner_key] = inner_value\n else:\n ret[key] = value\n\n return flask.jsonify(ret), 200",
"def get_hosts_info():\n response = {\n \"hosts\": []\n }\n\n scope_hosts_response = fetch_topology_hosts()\n for node_id, node in scope_hosts_response.items():\n if not node.get(\"id\"):\n continue\n host = _parse_host(node[\"id\"])\n public_ip_address = \"\"\n local_networks = []\n interface_ips = {} # list of all interface ips, along with subnet masks\n probe_id = \"\"\n cloud_metadata = {}\n os_type = \"\"\n kubernetes_cluster_name = \"\"\n\n for meta in node.get(\"metadata\", []):\n if not meta.get(\"value\"):\n continue\n if meta.get(\"id\") == \"local_networks\":\n local_networks = meta.get(\"value\").split(\",\")\n elif meta.get(\"id\") == 'kubernetes_cluster_name':\n kubernetes_cluster_name = meta.get(\"value\", \"\")\n elif meta.get(\"id\") == \"probeId\":\n probe_id = meta.get(\"value\")\n elif meta.get(\"id\") == \"interface_ips\":\n try:\n interface_ips = json.loads(meta.get(\"value\"))\n except:\n pass\n elif meta.get(\"id\") == \"cloud_metadata\":\n try:\n cloud_metadata = json.loads(meta.get(\"value\"))\n except:\n pass\n elif meta.get(\"id\") == \"os\":\n os_type = meta.get(\"value\")\n\n if not host:\n \"\"\"\n This mostly happens when the node is either in-theinternet or out-theinternet.\n \"\"\"\n continue\n if cloud_metadata:\n public_ip_address = cloud_metadata.get(\"public_ip\", None)\n\n response[\"hosts\"].append({\n \"hostname\": host,\n \"public_ip_address\": public_ip_address,\n \"local_networks\": _parse_local_networks(local_networks),\n \"probe_id\": probe_id,\n \"interface_ips\": interface_ips,\n \"cloud_metadata\": cloud_metadata,\n \"os\": os_type,\n \"kubernetes_cluster_name\": kubernetes_cluster_name\n })\n\n return response",
"def get_services(self):\r\n services_dict = {}\r\n for service in self.services:\r\n service_chars = {}\r\n service_chars['characteristics'] = service.get_characteristics()\r\n services_dict[service.get_uuid()] = service_chars\r\n\r\n return services_dict",
"def gen_net_config(self):\n\n ret_net = []\n\n for i in self.all_pids:\n if i == self.pid:\n ret_net.append({\"host\": \"0.0.0.0\", \"port\": 5000})\n else:\n ret_net.append({\"host\": \"conclave-{0}-{1}-service.{2}.svc.cluster.local\"\n .format(self.compute_id, str(i), self.namespace_map[i-1]), \"port\": 5000})\n\n return json.dumps(ret_net)",
"def serviceFile_create(self, *args, **kwargs) -> dict:\n\n # pudb.set_trace()\n b_status = False\n str_file = '/tmp/dicomlistener'\n str_xinetd = \"\"\"\n service dicomlistener\n {\n disable = no\n socket_type = stream\n wait = no\n user = root\n server = %s\n server_args = -t %s -E /usr/local/bin -D %s -p %s\n type = UNLISTED\n port = %s\n bind = 0.0.0.0\n } \"\"\" % (\n self.d_dcmtk['receiver'],\n self.d_xinetd['tmpDir'],\n self.d_xinetd['dataDir'],\n self.d_xinetd['appPort'],\n self.d_xinetd['servicePort']\n )\n\n FILE = open(str_file, 'w')\n try:\n FILE.write(str_xinetd)\n b_status = True\n except:\n b_status = False\n\n FILE.close()\n return {\n 'status': b_status,\n 'fileContents': str_xinetd,\n 'file': str_file\n }",
"def test_07_tcp_udp_can_be_used_in_json(self):\n print(\n \"\\nStarting the test that the udp and tcp scanners output can be used in JSON...\"\n )\n dict_of_results = {}\n for server in self.test_servers:\n for domain in self.local_domain_name:\n dict_of_results[\n f\"{str(server)}_UDP_{domain}\"\n ] = scan_mods.protocol_scanners.dns_scanner.udp_dns_scanner(\n server, domain\n )\n dict_of_results[\n f\"{str(server)}_TCP_{domain}\"\n ] = scan_mods.protocol_scanners.dns_scanner.tcp_dns_scanner(\n server, domain\n )\n json_output = json.dumps(dict_of_results)\n self.assertIsNotNone(json_output)\n self.assertGreaterEqual(len(json_output), 1)\n self.assertIsInstance(json_output, str)\n dict_of_results = {}\n for server in self.test_servers:\n for domain in self.local_domain_name:\n dict_of_results[\n f\"{str(server)}_UDP_{domain}\"\n ] = scan_mods.protocol_scanners.dns_scanner.udp_dns_scanner(\n domainname=domain\n )\n dict_of_results[\n f\"{str(server)}_TCP_{domain}\"\n ] = scan_mods.protocol_scanners.dns_scanner.tcp_dns_scanner(\n domainname=domain\n )\n json_output = json.dumps(dict_of_results)\n self.assertIsNotNone(json_output)\n self.assertGreaterEqual(len(json_output), 1)\n self.assertIsInstance(json_output, str)\n dict_of_results = {}\n for server in self.test_servers:\n for domain in self.local_domain_name:\n dict_of_results[\n f\"{str(server)}_UDP_{domain}\"\n ] = scan_mods.protocol_scanners.dns_scanner.udp_dns_scanner(\n dns_server=server\n )\n dict_of_results[\n f\"{str(server)}_TCP_{domain}\"\n ] = scan_mods.protocol_scanners.dns_scanner.tcp_dns_scanner(\n dns_server=server\n )\n json_output = json.dumps(dict_of_results)\n self.assertIsNotNone(json_output)\n self.assertGreaterEqual(len(json_output), 1)\n self.assertIsInstance(json_output, str)\n print(\n \"Finished the test that the udp and tcp scanners output can be used in JSON...\\n\"\n )",
"def get_hardware_inventory_discovery(conn: dict) -> dict:\n return get(conn, PCC_HARDWARE_INVENTORY + \"/discovery\")",
"def get_all_service_info(self):\n result = []\n\n for k in self.service_information.keys():\n ip = k\n for p in self.service_information[k].keys():\n proto, port = p.split(\"/\")\n service_list = self.service_information[k][p]\n status = service_list[0]\n service = service_list[1]\n service_info = service_list[2]\n result.append({\n 'ip': str(ipaddress.IPv4Address(ip)), \n 'proto': proto, \n 'port': port, \n 'status': status, \n 'service': service,\n 'service_info': service_info\n })\n\n return result",
"def write_cube_information_json(self, cube_id):\n #all the tasks,\n groups = self.fetch_data(\"select distinct group_name from task where cube_id = {};\".format(cube_id))\n tasks = self.fetch_data(\"select * from task where cube_id = {};\".format(cube_id))\n events = self.fetch_data(\"select * from event where cube_id = {};\".format(cube_id))\n data = {}\n data['groups'] = []\n for group in groups:\n data['groups'].append(group[0])\n data[group[0]] = []\n for task in tasks:\n data[task[1]].append(task[0])\n data['events'] = []\n for event in events:\n data['events'].append([event[1], event[2], event[4], event[5]])\n return data",
"def __init__(self, host=None, user=None, password=None, ssl_verify=None):\n if not host:\n if 'ZENOSS_HOST' in os.environ:\n host = os.environ['ZENOSS_HOST']\n if not user:\n if 'ZENOSS_USER' in os.environ:\n user = os.environ['ZENOSS_USER']\n if not password:\n if 'ZENOSS_PASSWD' in os.environ:\n password = os.environ['ZENOSS_PASSWD']\n\n if ssl_verify is None:\n if 'ZENOSS_SSL_VERIFY' in os.environ:\n ssl_verify = os.environ['ZENOSS_SSL_VERIFY']\n else:\n ssl_verify = True\n\n if isinstance(ssl_verify, str):\n if ssl_verify == \"False\":\n ssl_verify = False\n else:\n ssl_verify = True\n\n # Allow a http:// hostname, assume https if none provided\n self.api_host = host if '://' in host else 'https://'+host\n self.api_url = '{0}/zport/dmd'.format(host)\n self.api_user = user\n self.ssl_verify = ssl_verify\n self.api_headers = {\"Content-Type\": \"application/json\"}\n self.router_list = []\n self.routers = dict()\n\n for router in self.get_routers():\n self.router_list.append(router)\n self.routers[router] = __import__(\n 'zenossapi.routers.{0}'.format(router),\n fromlist=[router])\n\n if self.api_user and password:\n self.api_headers.update(urllib3.make_headers(\n basic_auth='{0}:{1}'.format(self.api_user, password)))",
"def startDevicesJson():\r\n deviceJson = getYaml()\r\n onLineDevices, statusList = getDevices()\r\n SDJ = {}\r\n if onLineDevices != []:\r\n for k, v in deviceJson.items():\r\n for i in onLineDevices:\r\n if k == i:\r\n SDJ.__setitem__(k, v)\r\n else:\r\n pass\r\n return SDJ, statusList",
"def do_service_list(cs, args):\r\n result = cs.services.list(host=args.host, binary=args.binary)\r\n columns = [\"Binary\", \"Host\", \"Zone\", \"Status\", \"State\", \"Updated_at\"]\r\n # NOTE(jay-lau-513): we check if the response has disabled_reason\r\n # so as not to add the column when the extended ext is not enabled.\r\n if result and hasattr(result[0], 'disabled_reason'):\r\n columns.append(\"Disabled Reason\")\r\n if result:\r\n print 'OKKKKKKKKK'\r\n utils.print_list(result, columns)",
"def _extract_data(self, parsed_scan, option='-sn'):\n now = datetime.datetime.now()\n data = {'scan-time': now.strftime(DATETIME_FORMAT)}\n ret = []\n for host in parsed_scan.hosts:\n if host.status in 'up':\n tmphost = {}\n if len(host.hostnames) > 0:\n tmphost['hostname'] = host.hostnames.pop()\n else:\n tmphost['hostname'] = None\n tmphost['address'] = host.address\n tmphost['mac'] = host.mac\n tmphost['mac-vendor'] = host.vendor\n # TODO other options may have more data, parse such data here\n ret.append(tmphost)\n data['hosts'] = ret\n print \"INFO: scan done at {0}\".format(now.strftime(DATETIME_FORMAT))\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets enforcing mode of SElinux
|
def setenforce(mode):
mode = mode.strip().title()
assert mode in ["Permissive", "Enforcing"]
assert Test.Run.command("/usr/sbin/setenforce %s" % mode)
|
[
"def set_mode(self, nt):\n return _radio_astro_swig.detect_set_mode(self, nt)",
"def safe_mode(self):\n\n self.send_code(SAFE_MODE)",
"def _change_mode(self, attr, old, new):\n self.exg_mode = new",
"def _set_server_mode_faulty(server, mode):\n allowed_mode = ()\n _do_set_server_mode(server, mode, allowed_mode)",
"def dfs_set_tpm_mode(self, mode):\n status = 0x01\n if (mode == 'disable') or (mode == '0'):\n (status, null) = self.__tx_dev.set_tpm_mode(0)\n elif (mode == 'maxchannels') or (mode == '1'):\n (status, null) = self.__tx_dev.set_tpm_mode(1)\n elif (mode == 'maxdistance') or (mode == '2'):\n (status, null) = self.__tx_dev.set_tpm_mode(2)\n else:\n print(self.help('dfs_set_tpm_mode'))\n\n if(status != 0x01):\n print self.__device.decode_error_status(status)",
"def set_mode(self, nt):\n return _radio_astro_swig.detect_sptr_set_mode(self, nt)",
"def set_mode(self, val):\n # self.property_set(register_name, val)\n self.property_set(\"mode\", Sample(0, value=val, unit=\"dF\"))\n \n try:\n self.serial_send(\"A=1,Z=1,M=\" + str(self.modes[val.title()]) + \"\\x0D\")\n except:\n print \"error setting thermostat\"",
"def set_immersive_mode(self):\n if self.device.get_device_android_version().major >= 11:\n logw(\"immersive mode not available on Android 11+ devices\")\n return\n logi(\"setting immersive mode\")\n self.device.execute_command(f\"settings put global policy_control immersive.full={self.package_name}\", shell=True)\\\n .validate(Exception(\"error setting immersive mode\"))",
"def set_mode(mode):\n master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')\n master.wait_heartbeat()\n\n mode_id = master.mode_mapping()[mode]\n master.mav.set_mode_send(\n master.target_system,\n mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,\n mode_id)\n print(\"Mode \" + mode + \" successfully set.\")\n return True",
"def _mode_set(self, thermostat_mode: ThermostatMode):\n if thermostat_mode in [ThermostatMode.FAN_ALWAYS_ON, ThermostatMode.FAN_AUTO]:\n self._groups[GRP_FAN_MODE].set_value(thermostat_mode)\n else:\n self._groups[GRP_SYS_MODE].set_value(thermostat_mode)",
"def toSafeMode(self):\r\n self.start()\r\n time.sleep(0.03)\r\n # now we're in PASSIVE_MODE, so we repeat the above code...\r\n self.send( SAFE )\r\n # they recommend 20 ms between mode-changing commands\r\n time.sleep(0.03)\r\n # change the mode we think we're in...\r\n self.sciMode = SAFE_MODE\r\n # no response here, so we don't get any...\r\n return",
"def mode(ctx, mode, touch_eject, autoeject_timeout, chalresp_timeout, force):\n dev = ctx.obj['dev']\n if autoeject_timeout:\n touch_eject = True\n autoeject = autoeject_timeout if touch_eject else None\n\n if mode is not None:\n if mode.transports != TRANSPORT.CCID:\n autoeject = None\n if touch_eject:\n ctx.fail('--touch-eject can only be used when setting'\n ' CCID-only mode')\n\n if not force:\n if mode == dev.mode:\n click.echo('Mode is already {}, nothing to do...'.format(mode))\n ctx.exit()\n elif not dev.has_mode(mode):\n click.echo('Mode {} is not supported on this YubiKey!'\n .format(mode))\n ctx.fail('Use --force to attempt to set it anyway.')\n force or click.confirm('Set mode of YubiKey to {}?'.format(mode),\n abort=True, err=True)\n\n try:\n dev.set_mode(mode, chalresp_timeout, autoeject)\n if not dev.can_write_config:\n click.echo(\n 'Mode set! You must remove and re-insert your YubiKey '\n 'for this change to take effect.')\n except ModeSwitchError as e:\n logger.debug('Failed to switch mode', exc_info=e)\n click.echo('Failed to switch mode on the YubiKey. Make sure your '\n 'YubiKey does not have an access code set.')\n\n else:\n click.echo('Current connection mode is: {}'.format(dev.mode))\n supported = ', '.join(t.name for t in TRANSPORT\n .split(dev.config.usb_supported))\n click.echo('Supported USB interfaces are: {}'.format(supported))",
"async def hard_mode(self, ctx):\n self.hard_mode = not self.hard_mode\n #self.auto_hint = not self.hard_mode #disable auto hint in hard mode\n await ctx.channel.send(f\"Hard mode has been set to: {self.hard_mode}\")",
"def detected_mode_set(self, event):\n self.mode.set(2)\n self.change_mode()",
"def set_reserve_mode(self, mode):\n if mode in [0, 1, 2]:\n self.write(\"RMOD %d\"%mode)\n else:\n print(\"Specified mode is invalid! Specify mode as 0, 1 or 2, for high reserve, normal or low noise, resp.\")",
"def task_disable_selinux():\n return sequence([\n run(\"if selinuxenabled; then setenforce 0; fi\"),\n run(\"test -e /etc/selinux/config && \"\n \"sed --in-place='.preflocker' \"\n \"'s/^SELINUX=.*$/SELINUX=disabled/g' \"\n \"/etc/selinux/config\"),\n ])",
"def test_set_swmr_mode_raises(self):\n with self.assertRaises(RuntimeError):\n self.f.swmr_mode = True\n assert not self.f.swmr_mode",
"def set_nv_power_mode2(mode):\n mode_value = int(mode)\n if mode_value < 0 or mode_value > 4:\n print(\">>> Invalid value : \", mode_value , \"Valid range is between 0 to 4 - Mode not changed ! \")\n return\n power_mode = get_nv_power_mode()\n if int(power_mode.split(\"-\")[1]) == mode_value:\n print(\">>> Current mode is already: \", mode_value , \"Mode not changed ! \")\n return\n\n command = \"sudo nvpmodel -m \" + mode\n print(command)\n output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, universal_newlines=True)\n print(output.stdout)\n print(\" Change Mode Completed !\")",
"def change_mode(self):\n master.destroy()\n os.system(\"edit_mode_run.py\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a GeoJson plugin to append into a map with Map.add_plugin.
|
def __init__(self, data):
super(GeoJson, self).__init__()
self.plugin_name = 'GeoJson'
if 'read' in dir(data):
self.data = data.read()
elif type(data) is dict:
self.data = json.dumps(data)
else:
self.data = data
|
[
"def _get_geojson(self):\n pass",
"def new_map(self, name):\n from . import packers\n map2 = HeteroMap()\n self.add(name, self._get_packer(name), map2, packers.BuiltinHeteroMapPacker)\n return map2",
"def __init__(self, data, transition_time=200, loop=True, auto_play=True):\n super(TimestampedGeoJson, self).__init__()\n self.plugin_name = 'TimestampedGeoJson'\n self.template = self.env.get_template('timestamped_geo_json.tpl')\n if 'read' in dir(data):\n self.data = data.read()\n elif type(data) is dict:\n self.data = json.dumps(data)\n else:\n self.data = data\n self.transition_time = int(transition_time)\n self.loop = bool(loop)\n self.auto_play = bool(auto_play)",
"def add_to_map(self):\n pass",
"def load_custom_plugin_mappings(self):\n self.update_plugin_list()\n if self.custom_plugins:\n self.plugins.update(self.custom_plugins)",
"def register_plugin(plugin):\n plugins.append(plugin)",
"def _add_plugin(self, plugin_dir):\n config = get_jigconfig(self.gitrepodir)\n pm = PluginManager(config)\n pm.add(plugin_dir)\n set_jigconfig(self.gitrepodir, pm.config)",
"def map_create_function(countries_list):\r\n\r\n my_map = folium.Map(location=None, tiles='Mapbox Bright', no_wrap=True, width=800, height=500)\r\n\r\n # creating and adding a features to the map\r\n\r\n gj = folium.GeoJson(\r\n data=open(os.path.join(BASE_DIR, \"my_map\", \"static\", \"my_map\", \"world.json\"), \"r\", encoding=\"utf-8-sig\").read(),\r\n style_function=lambda country_data: {\r\n 'fillColor': get_color(country_data, countries_list),\r\n 'fillOpacity': 0.5,\r\n 'color': 'black',\r\n 'line_opacity': 0.5,\r\n 'weight': 1\r\n })\r\n gj.add_to(my_map)\r\n return my_map.get_root().render()",
"def convert_json_to_geojson(input_json, output_geojson):\n \n with open(input_json) as f:\n gj = json.load(f)\n \n ### new geojson created from json\n gj1 = {}\n gj1['type'] = 'FeatureCollection'\n gj1['generator'] = gj['generator']\n gj1['copyright'] = gj['osm3s']['copyright']\n gj1['timestamp'] = gj['osm3s']['timestamp_osm_base']\n gj1['features'] = []\n\n for i, f in enumerate(gj['elements']):\n\n if f['type'] == 'way':\n pixels = []\n for p in f['geometry']:\n pixels.append([p['lon'], p['lat']])\n f1 = {}\n f1['type'] = 'Feature'\n f1['properties'] = f['tags']\n f1['bbox'] = [f['bounds']['minlon'], f['bounds']['minlat'], f['bounds']['maxlon'], f['bounds']['maxlat']]\n f1['geometry'] = {'type': 'LineString',\n 'coordinates': pixels}\n f1['id'] = f['type'] + '/' + str(f['id'])\n gj1['features'].append(f1)\n else: # f['type'] == 'node' or 'relation\n continue\n\n # write geojson to new file\n with open(output_geojson, 'w') as f:\n # method 1: f.write(json.dumps(gj))\n json.dump(gj1, f, indent = 2)",
"def from_dict(cls, the_dict):\n geom = geojson.loads(json.dumps(the_dict))\n result = MultiPolygon([\n [[[0, 0], [0, 0]]],\n [[[1, 1], [1, 1]]]\n ])\n result._geom = geom\n return result",
"def write_geojson(self, outfile):\n\n logging.info('Writing GeoJSON: %s' % outfile)\n\n with open(outfile, 'wb') as f:\n f.write(json.dumps(self.geojson, indent=4))",
"def register_plugin(plugin):\n if plugin.plugin_name not in PLUGINS:\n PLUGINS[plugin.plugin_name] = plugin",
"def create_map(dataf, geoj):\n fig = px.choropleth_mapbox(dataf,\n geojson=geoj,\n locations=\"buildings_id\",\n featureidkey=\"properties.buildings_id\",\n #color='district',\n opacity=0.5,\n )\n fig.update_layout(\n mapbox_style=\"carto-positron\",\n mapbox_zoom=8,\n mapbox_center = {\"lat\": 41.8, \"lon\": -87.8},\n margin={\"r\":15,\"t\":15,\"l\":15,\"b\":15},\n showlegend=False\n )\n fig.update_geos(fitbounds=\"locations\")\n return fig",
"def asPluginData(*args, **kwargs):\n \n pass",
"def get_geojson(geohash, properties={}):\n geohash = geohash.lower()\n\n feature = {'type': 'Feature',\n 'properties': {key: value for key, value in properties.items()},\n 'geometry': {'type': 'Polygon',\n 'coordinates': []}}\n\n feature['geometry']['coordinates'].append(get_polygon(geohash))\n\n return feature",
"def init_plugin(ext_registry: ExtensionRegistry):\n ext_registry.add_extension(component=object(), point='test.util.test_plugin', name='ext1')\n ext_registry.add_extension(component=object(), point='test.util.test_plugin', name='ext2')\n ext_registry.add_extension(component=object(), point='test.util.test_plugin', name='ext3')",
"def add_polygon(\n self,\n polygon_coordinates,\n popup_text: Optional[str] = None,\n popup_maxwith: Optional[int] = None,\n ) -> \"FoliumMapBuilder\":\n polygon = settings.DEFAULT_POLYGON_COORDINATES\n\n if polygon_coordinates:\n polygon = polygon_coordinates\n\n gj = GeoJson(\n data={\n \"type\": \"Feature\",\n \"geometry\": {\"type\": \"Polygon\", \"coordinates\": polygon},\n },\n name=\"Madrid Central\",\n )\n\n if popup_text:\n popup = self._create_popup(text=popup_text, max_with=popup_maxwith)\n gj.add_child(popup)\n\n gj.add_to(self._map)\n\n return self",
"async def geojson(db: Session = Depends(get_db)):\n geojson = FeatureCollection([climb.as_feature() for climb in crud.get_climbs(db)])\n return JSONResponse(content=geojson, media_type=\"application/geo+json\")",
"def from_dict(cls, the_dict):\n geom = geojson.loads(json.dumps(the_dict))\n result = Polygon([[[0, 0], [0, 0]]])\n result._geom = geom\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A function that takes the string value of an individual item of data that will become a tagvalue in FluidDB. The default version of this function in flimp will attempt to cast the value into something appropriate see flimp.parser.csv_parser.clean_row_item for the source.
|
def clean_row_item(item):
# We just want to make sure we return None for empty values. By default
# flimp will ignore tags with None as a value (this can be overridden)
value = item.strip()
if value:
return value
else:
return None
|
[
"def _item_to_value(_, item: str) -> str:\n return item",
"def prep_value(self, db_field, value):\n\t\treturn force_unicode(value)",
"def _parse_value(self,value):\n value = value.strip()\n if not value:\n return None\n\n # assume that values containing spaces are lists of values\n if len(value.split()) > 1:\n return [self._parse_value(vv) for vv in value.split()]\n\n try:\n # see if it's an integer\n value = int(value)\n except ValueError:\n try:\n # see if it's a float\n value = float(value)\n except ValueError:\n # see if it's a bool\n if value[0] == 'T':\n value = True\n elif value[0] == 'F':\n value = False\n\n return value",
"def handle_parsed_input(self, value):\n\n return value",
"def _cast(value):\n if (not value) or isinstance(value, dict) or isinstance(value, list):\n return value\n if _is_multi_value(value):\n return value.split(',')\n elif _is_bool_true(value):\n return True\n elif _is_bool_false(value):\n return False\n return value",
"def type_cast(cls, item: Union[str, Kwarg]) -> Union[int, float, str]:\n if isinstance(item, Kwarg):\n item.value(cls._type_cast(item.value()))\n else:\n item = cls._type_cast(item)\n return item",
"def _escape_csv_cell(item, onError=None):\n if is_rawtype(item):\n return item\n elif onError:\n if isinstance(onError, basestring):\n return onError\n elif hasattr(onError,'__call__'):\n return onError(item)\n else:\n return ''\n else:\n raise CSVConvertError(str(item))",
"def normalizeAttributeValue (\n\n self,\n attribute = None,\n value = None\n ) :\n \n if ( ( utilities.isEmpty( attribute ) ) or ( utilities.isEmpty( value ) ) ) : return None, None\n\n attribute = utilities.string( attribute, format = \"identifier\" )\n\n if attribute == \"reference\" : pass\n\n elif attribute == \"bibtex\" : pass\n\n elif attribute in self.aliasDictionary : attribute = self.aliasDictionary[ attribute ]\n\n elif attribute in self.fieldList : pass\n\n else : return None, None\n\n # first normalization of value: removes external {}, quotes, and strips spaces\n\n value = value.strip( \";,: /\\\\\" )\n\n size = len( value )\n\n while True : \n\n if value.startswith( \"{\" ) and value.endswith( \"}\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( \"(\" ) and value.endswith( \")\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( \"[\" ) and value.endswith( \"]\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( '\"' ) and value.endswith( '\"' ) : value = value[ 1 : -1 ]\n\n if value.startswith( \"'\" ) and value.endswith( \"'\" ) : value = value[ 1 : -1 ]\n\n value = value.strip( \";,: /\\\\\" )\n\n if len( value ) == size : break\n\n size = len( value )\n\n # normalizes fields\n \n if attribute == \"author\" :\n\n value = self.normalizeAuthor( value )\n\n self.author = value\n\n elif ( ( attribute == \"reference\" ) or ( attribute == \"bibtex\" ) ) :\n\n attribute = \"bibtex\"\n\n value = utilities.string( value, format = \"identifier\" )\n \n self.bibtex = value\n\n elif attribute == \"booktitle\" : value = self.normalizeBookTitle( value )\n\n elif attribute == \"description\" :\n\n value = self.normalizeDescription( value )\n\n self.description = value\n\n elif attribute == \"editor\" : value = self.normalizeEditor( value )\n\n elif attribute == \"journal\" : value = self.normalizeJournal( value )\n\n elif attribute == \"month\" : value = self.normalizeMonth( value )\n\n elif attribute == \"pages\" : value = self.normalizePages( value )\n\n elif attribute == \"title\" :\n\n value = self.normalizeTitle( value )\n\n self.title = value\n\n elif attribute == \"year\" :\n\n value = self.normalizeYear( value )\n\n self.year = value\n\n## elif attribute == \"bib\" :\n##\n## value = self.normalizePath( value )\n##\n## self.bibPath = value\n\n elif attribute == \"file\" :\n\n value = self.normalizePath( value )\n\n self.filePath = value\n \n elif attribute == \"owner\" :\n\n value = utilities.string( value, format = \"title\" )\n\n self.owner = value\n\n # other values: strips delimiters\n \n else : value = str( value ).strip( \" ()[].;:,/\\\\{}-_\" )\n\n\n\n # cleans value\n\n## print \"normalize\", str( attribute), str( value )\n\n value = value.strip().replace( \"{\", \"\" ).replace( \"}\", \"\" )\n\n## # recodes attribute: reference becomes bibtex and the remainder has a prefix reference **RF\n##\n## if ( ( not attribute == \"bibtex\" ) and ( not attribute.startswith( \"reference\" ) ) ) :\n##\n## attribute = \"reference\" + utilities.string( attribute, format = \"class\" )\n\n return attribute, value",
"def convert(self, value):",
"def parse_value(cls, value, cast):\n if cast is None:\n return value\n if cast is bool:\n try:\n value = int(value) != 0\n except ValueError:\n value = value.lower().strip() in cls.BOOLEAN_TRUE_STRINGS\n elif isinstance(cast, list):\n value = list(map(cast[0], [x for x in value.split(',') if x]))\n elif isinstance(cast, tuple):\n val = value.strip('(').strip(')').split(',')\n value = tuple(map(cast[0], [x for x in val if x]))\n elif isinstance(cast, dict):\n key_cast = cast.get('key', str)\n value_cast = cast.get('value', str)\n value_cast_by_key = cast.get('cast', {})\n value = dict(map(\n lambda kv: (\n key_cast(kv[0]),\n cls.parse_value(\n kv[1],\n value_cast_by_key.get(kv[0], value_cast)\n )\n ),\n [val.split('=') for val in value.split(';') if val]\n ))\n elif cast is dict:\n value = dict([v.split('=', 1) for v in value.split(',') if v])\n elif cast is list:\n value = [x for x in value.split(',') if x]\n elif cast is tuple:\n val = value.strip('(').strip(')').split(',')\n # pylint: disable=consider-using-generator\n value = tuple([x for x in val if x])\n elif cast is float:\n # clean string\n float_str = re.sub(r'[^\\d,.-]', '', value)\n # split for avoid thousand separator and different\n # locale comma/dot symbol\n parts = re.split(r'[,.]', float_str)\n if len(parts) == 1:\n float_str = parts[0]\n else:\n float_str = f\"{''.join(parts[0:-1])}.{parts[-1]}\"\n value = float(float_str)\n else:\n value = cast(value)\n return value",
"def CoerceValue(value, value_type):\r\n if isinstance(value, tuple):\r\n # In case of a tuple, we run the same function on the value itself and\r\n # add the formatted value.\r\n if (len(value) not in [2, 3] or\r\n (len(value) == 3 and not isinstance(value[2], dict))):\r\n raise DataTableException(\"Wrong format for value and formatting - %s.\" %\r\n str(value))\r\n if not isinstance(value[1], types.StringTypes + (types.NoneType,)):\r\n raise DataTableException(\"Formatted value is not string, given %s.\" %\r\n type(value[1]))\r\n js_value = DataTable.CoerceValue(value[0], value_type)\r\n return (js_value,) + value[1:]\r\n\r\n t_value = type(value)\r\n if value is None:\r\n return value\r\n if value_type == \"boolean\":\r\n return bool(value)\r\n\r\n elif value_type == \"number\":\r\n if isinstance(value, (int, long, float)):\r\n return value\r\n raise DataTableException(\"Wrong type %s when expected number\" % t_value)\r\n\r\n elif value_type == \"string\":\r\n if isinstance(value, unicode):\r\n return value\r\n else:\r\n return str(value).decode(\"utf-8\")\r\n\r\n elif value_type == \"date\":\r\n if isinstance(value, datetime.datetime):\r\n return datetime.date(value.year, value.month, value.day)\r\n elif isinstance(value, datetime.date):\r\n return value\r\n else:\r\n raise DataTableException(\"Wrong type %s when expected date\" % t_value)\r\n\r\n elif value_type == \"timeofday\":\r\n if isinstance(value, datetime.datetime):\r\n return datetime.time(value.hour, value.minute, value.second)\r\n elif isinstance(value, datetime.time):\r\n return value\r\n else:\r\n raise DataTableException(\"Wrong type %s when expected time\" % t_value)\r\n\r\n elif value_type == \"datetime\":\r\n if isinstance(value, datetime.datetime):\r\n return value\r\n else:\r\n raise DataTableException(\"Wrong type %s when expected datetime\" %\r\n t_value)\r\n # If we got here, it means the given value_type was not one of the\r\n # supported types.\r\n raise DataTableException(\"Unsupported type %s\" % value_type)",
"def _capture_unprocessed_field(field_tag, field_value, genotype_info_to_fill):\n try:\n genotype_info_to_fill.unprocessed_info[field_tag] = float(field_value)\n except ValueError: # if the value can't be converted to a float\n split_list = field_value.split(\",\")\n cast_split_list = []\n\n for value in split_list:\n try:\n cast_split_list.append(float(value))\n except ValueError: # if the value can't be converted to a float\n cast_split_list.append(value)\n\n if len(cast_split_list) > 1:\n genotype_info_to_fill.unprocessed_info[field_tag] = cast_split_list\n else:\n genotype_info_to_fill.unprocessed_info[field_tag] = cast_split_list[0]\n\n return genotype_info_to_fill",
"def test_parse_string_value(self):\n self.f.parse_string(self.TEST_TAG_VALUE)\n self.assertIn('tag-value', self.f._filter)\n self.assertEqual([self.TEST_TAG_VALUE], self.f._filter['tag-value'])",
"def decode_row(row):\r\n if not self.encoding:\r\n return row\r\n for i, value in enumerate(row):\r\n if isinstance(value, str):\r\n row[i] = value.decode(self.encoding)\r\n return row",
"def normalize_value(cls, value: Any) -> str | Sequence[str]:\n\n # Treat `None` as empty string.\n if value is None:\n return ''\n\n # Pass through strings\n if (isinstance(value, str)):\n return value\n\n # If it's a byte string, convert it to Unicode, treating it as UTF-8.\n if isinstance(value, bytes):\n return value.decode(\"utf8\")\n\n # BeautifulSoup supports sequences of attribute values, so make sure the children are strings.\n if isinstance(value, Sequence):\n new_value = []\n for v in value:\n if not isinstance(v, (str, bytes)) and isinstance(v, Sequence):\n # This is most certainly a user error and will crash and burn later.\n # To keep things working, we'll do what we do with all objects,\n # And convert them to strings.\n new_value.append(str(v))\n else:\n # Convert the child to a string\n new_value.append(cast(str, cls.normalize_value(v)))\n return new_value\n\n # Try and make anything else a string\n return str(value)",
"def get_value(self, item, source_name):\n return force_unicode(smart_str(item.findtext(source_name))).strip()",
"def test_string_conversion(self, value):\n dset_dict = DatasetList(value)\n assert str(dset_dict) == str(value)",
"def _type_cast(self, item: str): # noqa: ANN\n for cast in self._casts:\n item = cast.type_cast(item)\n return item",
"def parse_value(value):\n if not isinstance(value, basestring):\n return value\n if is_long(value):\n return long(value)\n if is_float(value):\n return float(value)\n return get_nulls(value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Timeout after given duration.
|
def set_timeout(duration, callback=None):
# SIGALRM is only usable on a unix platform!
signal.signal(signal.SIGALRM, raise_signal)
signal.alarm(duration) # alarm after X seconds
if callback:
callback()
|
[
"def set_timeout(cls, timeout):\n ...",
"def set_timeout(self, timeout):\r\n self.timeout = float(timeout)/1000.",
"def delay_timeout(self, delay_timeout):\n\n self._delay_timeout = delay_timeout",
"def set_timeout(self, timeout):\n self.m_timeout = timeout",
"def timeout(self):\n raise NotImplementedError",
"def process_timeout(self):\n self.timers.process_timeout()",
"def timeout(self, value):\n self.__timeout = value",
"def sliding_timeout(timeout):\n if timeout is None:\n return lambda: None\n deadline = time.time() + timeout\n return lambda: deadline - time.time()",
"def time_out():",
"def timeout(*args):\n assert len(args) == 1, \"Requires a single argument\"\n env.container_timeout = int(args[0])",
"def set_duration(self, duration):\n pass",
"def timeout(self, timeout):\n with self._lock:\n old, self._timeout = self._timeout, timeout\n if timeout < old:\n self._shrink(self.size)",
"def set_timeout(self, timeout: int) -> 'BaseMenu':\n self._timeout = timeout\n\n return self",
"def timeouted(self, t):\n self._timeouted = t",
"def set_execution_timeout(self, timeout: float) -> None:\n self.aea._execution_timeout = timeout",
"def timeout(self, value):\n self._serialport.timeout = value",
"def duration(self, duration):\n self._duration = duration",
"def deadlineTimer(deadline):",
"def set_command_timeout(self, timeout):\n self._pub_joint_cmd_timeout.publish(Float64(timeout))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the list of tuples sorted by the index passed as argument.
|
def sort_tuple_list(l, tup_idx=0):
return sorted(l, key=lambda tup: tup[tup_idx])
|
[
"def sort_by_index(elements: Iterable, indexes: Iterable):\n\n return tuple(sorted(elements)[index] for index in indexes)",
"def sortedListOfIndexAssyRec(self):\n l = []\n for ar in self:\n l.append((ar.orgpos, ar))\n # sort\n l.sort(key=lambda k: k[0])\n # done\n return l",
"def test_sorting_indexes():\n data = ['a', 'b', 'c', 'd']\n indexes = (3, 1, 0, 2)\n assert indexes_if_sorted(sort_by_index(data, indexes)) == indexes",
"def indexesSortedByFitness(self):\n return sorted(range(len(self.fitness)), key=lambda k:self.fitness[k], reverse=True)",
"def _sort_indices(array: Iterable) -> List[int]:\r\n return [i[0] for i in sorted(enumerate(array), key=lambda x: x[1])]",
"def GenerateSortIndices(self, p_int, void, p_int_1, p_int_2, p_int_3, *int):\n ...",
"def sort_tuple( tup ):\n return tuple( sorted( list( tup ) ) )",
"def exec_sorted(statement, *args, **kw):\n\n return sorted([tuple(row)\n for row in statement.execute(*args, **kw).fetchall()])",
"def SortGeneration(self, generation, sortIndex):\r\n return generation[sortIndex, :];",
"def sorted_indices(full_list):\n \n return [i[0] for i in sorted(enumerate(full_list), key=lambda x:x[1])]",
"def get_sort(result: List[Tuple[str, int]]) -> None:\n end = len(result)-1\n while end != 0:\n for i in range(end):\n if result[i][1] < result[i+1][1]:\n result[i], result[i+1] = result[i+1], result[i]\n elif result[i][1] == result[i+1][1]:\n if result[i][0] > result[i+1][0]:\n result[i], result[i+1] = result[i+1], result[i]\n end = end - 1",
"def sort(index, reverse=False, limit=None, sort_type=None,\n raise_unsortable=True):",
"def indexes_if_sorted(elements: Iterable):\n\n return tuple(sorted(elements).index(el) for el in elements)",
"def getListOfTuple():\r\n num_of_tuples = input(\"write the number of items you want to sort: \")\r\n list_of_tuples = []\r\n for i in range(int(num_of_tuples)):\r\n print(\"Enter details for item number %d :\" % int(i+1))\r\n tupleFromUser = getTupleFromUser()\r\n list_of_tuples.append(tupleFromUser)\r\n return list_of_tuples",
"def getSortedArgs(self):\n if self.question_dist:\n # There's a question distribtuion - use it\n return self.sort_args_by_distribution()\n ls = []\n for q, args in self.questions.items():\n if (len(args) != 1):\n logging.debug(\"Not one argument: {}\".format(args))\n continue\n arg = args[0]\n indices = list(self.indsForQuestions[q].union(arg.indices))\n if not indices:\n logging.debug(\"Empty indexes for arg {} -- backing to zero\".format(arg))\n indices = [0]\n ls.append(((arg, q), indices))\n return [a for a, _ in sorted(ls,\n key = lambda __indices: min(__indices[1]))]",
"def rank_neighbours(self) -> list[WeightedVertex]:\r\n lst = [(x, self.neighbours[x]) for x in self.neighbours]\r\n lst.sort(key=lambda x: x[1], reverse=True)\r\n new_lst = [x[0] for x in lst]\r\n return new_lst",
"def top_values_indexes(a, n):\r\n return np.argsort(a)[::-1][:n]",
"def pyargsort(seq,cmp=cmp,key=lambda x:x):\n return sorted(range(len(seq)),key=lambda x:key(seq.__getitem__(x)),cmp=cmp)",
"def sorting_indices(a):\n import scipy as sp\n return sp.array( [ list(a).index(i) for i in sorted(a) ] )\n # The above is unstable even though the Kool-Aid(R)-drinkers say\n # stable."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Apply `fn` taking as arguments consecutive elements of `l`.
|
def apply_consecutive_elements(l, fn):
return [fn(i, j) for i, j in zip(l[:-1], l[1:])]
|
[
"def lmap(fn, *args):\n return list(map(fn, args))",
"def apply(func, iterable):\n for item in iterable:\n func(item)\n yield item",
"def apply_to_all_elements(lst, fct):\n return map(fct, lst)",
"def map(fn, lst):\n \"*** YOUR CODE HERE ***\"\n for i in range(len(lst)):\n lst[i] = fn(lst[i])",
"def _apply_function(func, list):\n while True:\n try:\n yield func(list)\n except Exception:\n break",
"def map_seq(func, seq):\n if isinstance(seq, pd.Series):\n return seq.apply(func)\n else:\n return [func(val) for val in seq]",
"def foldl2(link, fn, z):\n def step(x, g):\n \"*** YOUR CODE HERE ***\"\n return lambda z:g(fn(z, x))\n return foldr(link, step, identity)(z)",
"def loopIt(func: Callable, *inps: Iterable) -> List:\n\n return [func(*inp) for inp in zip(*inps)]",
"def mapPar(f, iterable, parallel=1):\n @defer.inlineCallbacks\n def _f(i, res, d, completed, errored):\n completed['running'] += 1\n try:\n if not errored:\n idx, item = i.next()\n res.append(None)\n try:\n res[idx] = yield f(item)\n completed['running'] -= 1\n _f(i, res, d, completed, errored)\n except Exception, err:\n if not errored:\n errored.append(True)\n d.errback(err)\n except StopIteration:\n if completed['running'] == 1:\n d.callback(res)\n\n i = iter(enumerate(iterable))\n res = []\n d = defer.Deferred()\n completed = {'running': 0}\n errored = []\n\n for _ in range(parallel):\n _f(i, res, d, completed, errored)\n\n return d",
"def pipeline_each(data, fns):\n\tfrom functools import reduce\n\treturn reduce(lambda a, x: list(map(x, a)), fns, data)",
"def splitlists(l, f):\n l1 = []\n l2 = []\n for el in l:\n if f(el):\n l1.append(el)\n else:\n l2.append(el)\n return l1, l2",
"def convert(func, seq):\n return [func(eachNum) for eachNum in seq]",
"def loop(elements, func, *args):\n for idx in xrange(elements.size - 1):\n yield func(elements[idx], elements[idx + 1], *args)",
"def ZipWith(iterable, f, *iterables):\n iterables = [iterable] + list(iterables)\n return itt.starmap(f, zip(*iterables))",
"def collect(sequence, function):\n for seq in __builtin__.map(function, sequence):\n for x in seq:\n yield x",
"def triple_map(func, iterable):\n # YOUR CODE GOES HERE #\n for i in iterable:\n yield func(func(func(i)))",
"def apply_do_fn(\n fn: Callable[[Any], Iterator[Any]],\n) -> ExampleTransformFn:\n return functools.partial(_apply_do_fn, fn=fn)",
"def async_apply(iterable, async_fn, unordered=False, concurrent_jobs=50):\n if unordered:\n return _async_apply_unordered(iterable, async_fn, concurrent_jobs)\n return _async_apply_ordered(iterable, async_fn, concurrent_jobs)",
"def apply_func_to_sequence(seq, func, tuple_of_conforming_types = (path_str_type,), tuple_of_sequences_types = (list, tuple,set)):\n if isinstance(seq, tuple_of_conforming_types):\n return func(seq)\n elif isinstance(seq, tuple_of_sequences_types):\n return type(seq)(apply_func_to_sequence(pp, func, tuple_of_conforming_types, tuple_of_sequences_types) for pp in seq)\n else:\n return seq"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
search the source for the original name of a module if it was aliased. if the module is instead simply found, return that.
|
def _module_from_alias(source, module_name):
regular_or_aliased = _aliased_module_regex(module_name)
_search = [regular_or_aliased(i) for i in source.split("\n")]
matches = [i for i in _search if i is not None]
assert len(matches) == 1, ("only mode module name "
"should match '{}', instead "
"found: {}".format(module_name, [i.string for i in matches])) # NOQA
return matches[0]
|
[
"def _ResolveUsingStarImport(self, module, name):\n wanted_name = self._ModulePrefix() + name\n for alias in module.aliases:\n type_name = alias.type.name\n if not type_name or not type_name.endswith(\".*\"):\n continue\n imported_module = type_name[:-2]\n # 'module' contains 'from imported_module import *'. If we can find an AST\n # for imported_module, check whether any of the imported names match the\n # one we want to resolve.\n if imported_module not in self._module_map:\n continue\n imported_aliases, _ = self._ImportAll(imported_module)\n for imported_alias in imported_aliases:\n if imported_alias.name == wanted_name:\n return imported_alias\n return None",
"def find_symbol(target, name, module=MACINTALK_MODULE):\n for mod in target.module_iter():\n if module and module != mod.GetFileSpec().GetFilename():\n continue\n for sym in mod:\n if sym.GetName() == name:\n return sym\n raise RuntimeError('symbol not found: ' + name)",
"def _modulenamemangle(self, modfilename):\n if not self.source:\n return modfilename\n return os.path.splitext(os.path.basename(modfilename))[0]",
"def resolve_alias(name: str) -> str:\n ...",
"def __call__(self, obj, name):\n module_name = self._old_which_module(obj, name)\n module_name_root = module_name.split(\".\", 1)[0]\n if module_name_root == \"dask_sql\":\n return None\n return module_name",
"def lookupmodule(name):\n if sys.modules.get(name):\n return (sys.modules[name], sys.modules[name].__file__)\n if os.path.isabs(name) and readable(name):\n return (None, name)\n f = os.path.join(sys.path[0], name)\n if readable(f):\n return (None, f)\n root, ext = os.path.splitext(name)\n if ext == '':\n name = name + '.py'\n pass\n if os.path.isabs(name):\n return (None, name)\n for dirname in sys.path:\n while os.path.islink(dirname):\n dirname = os.readlink(dirname)\n pass\n fullname = os.path.join(dirname, name)\n if readable(fullname):\n return (None, fullname)\n pass\n return (None, None)",
"def _sanitize_module(name):\n return _sanitize_identifier(name).lower()",
"def modules_to_search(source, line, col, identifier):\n\n # check if identifier is qualified, if it's\n # like \"String.join\" instead of just \"join\"\n qualified_module = _qualified_namespace(source, line, col, identifier)\n if qualified_module:\n return qualified_module\n # search for explicit import\n importers = [_imports_function(i, identifier) for i in source.split(\"\\n\")]\n modules = [i.groups()[0] for i in importers if i]\n if len(modules) > 0:\n log.debug(\"searching exposing imports\")\n log.debug(modules)\n return modules\n # if nothing obvious is left, do all wildcards\n wild = [_wildcard_import(i) for i in source.split(\"\\n\")]\n mods = [i.groups()[0] for i in wild if i]\n log.debug(\"searching wildcard imports\")\n log.debug(mods)\n return mods",
"def getResolvedFullName(*args, **kwargs):\n \n pass",
"def resolve_module(includes, file_module_map):\n for filename in reversed(includes):\n module = file_module_map.get(filename)\n if module:\n return module\n return None",
"def pick_from_module(module, name):\n for command in from_module(module):\n if command.NAME == name:\n return command\n raise NameError('command not found')",
"def _get_module(context: inspect.FrameInfo) -> str:\n global _module_index # pylint: disable=global-statement\n if context.filename not in _module_index:\n _module_index = {\n mod.__file__: mod.__name__\n for mod in sys.modules.values()\n if hasattr(mod, '__file__') and hasattr(mod, '__name__')\n }\n\n return _module_index[context.filename]",
"def _get_module_from_filename(file):\r\n\r\n file = file.lower()\r\n lowercase = methodcaller('lower')\r\n\r\n for name, module in dict_items(sys.modules):\r\n module_file = getattr(module, '__file__', NOTHING)\r\n if module_file is NOTHING:\r\n continue\r\n if file == lowercase(module_file):\r\n return module\r\n raise ValueError(\"couldn't load module file: '%s'\"%file)",
"def mangleModuleName(self, name, module):\n if self.__mangleModuleName is not None:\n return self.__mangleModuleName(self, name, module)\n return name",
"def from_alias_direct(name:str):\n super(Mutator, Mutator).from_alias_direct(name)\n for k,v in mutatorcache.items():\n if name in v.aliases:\n return v\n return None",
"def _saveMangledModuleName(self, name, module=None):\n memo = self.memo\n nid = id(name)\n x = memo.get(nid)\n\n # handle the case, that the name has been replaced before\n if x is not None and isinstance(x[1], tuple) and 2 == len(x[1]) and x[1][0] is name:\n # already replaced\n return x[1][1]\n\n mangled = self.mangleModuleName(name, module)\n if mangled is name:\n # no replacement required\n return mangled\n\n # use the object replacement system\n orc = self._ObjReplacementContainer(name, mangled)\n self.save(orc)\n # remove the replacement from the stack\n self.write(pickle.POP)\n\n # now we can get the replacement from the memo\n x = memo.get(nid)\n assert x is not None and isinstance(x[1], tuple) and 2 == len(x[1]) and x[1][0] is name\n return x[1][1]",
"def get_source_by_name(self, name):\r\n sources = self.call(GetSourcesList())\r\n for source in sources.getSources():\r\n if source[\"name\"] == name:\r\n return source\r\n return None",
"def _extract_def_source(source: str, name: str) -> str:\n match = re.search(\n r\"(<%def\\s+name\\s*=\\s*[\\\"']\" + name + r\"\\(.*?>.*?</%def>)\", source, flags=re.DOTALL\n )\n if not match:\n warnings.warn(f\"Could not find the template definition '{name}'\", SyntaxWarning)\n return source\n\n return match.group(1)",
"def get_module_name(self):\n return self.__mod_name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
if a given identifier is qualified, trace it to the module which was imported
|
def _qualified_namespace(source, line, col, identifier):
lines = source.split("\n")
line_of_id = lines[line]
try:
just_before_id = line_of_id[col - 1]
except IndexError:
print({
"line_of_id": line_of_id,
"line": line,
"col": col,
"identifier": identifier
})
raise
if just_before_id == ".":
until = source.split("\n")[line][:col - 1]
module = _module_name_at_end_of(until)
imported_name = _module_from_alias(source, module)
log.debug("found qualified import {}".format(imported_name))
return [imported_name]
|
[
"def is_import(node):\r\n return node.type in (syms.import_name, syms.import_from)",
"def breakpoint_on_module(session_id, module_type, trace_bp=False):\n session = manager.DebugSessions.retrieve_session(session_id)\n if session is None:\n print(f\"\"\"session ${session_id} doesn't exist\"\"\")\n return\n hook_module = session.get_hook_module()\n hook_module.trace_module(module_type, trace_bp)",
"def _is_import_binding(node, name, package=None):\r\n\r\n if node.type == syms.import_name and not package:\r\n imp = node.children[1]\r\n if imp.type == syms.dotted_as_names:\r\n for child in imp.children:\r\n if child.type == syms.dotted_as_name:\r\n if child.children[2].value == name:\r\n return node\r\n elif child.type == token.NAME and child.value == name:\r\n return node\r\n elif imp.type == syms.dotted_as_name:\r\n last = imp.children[-1]\r\n if last.type == token.NAME and last.value == name:\r\n return node\r\n elif imp.type == token.NAME and imp.value == name:\r\n return node\r\n elif node.type == syms.import_from:\r\n # str(...) is used to make life easier here, because\r\n # from a.b import parses to ['import', ['a', '.', 'b'], ...]\r\n if package and str(node.children[1]).strip() != package:\r\n return None\r\n n = node.children[3]\r\n if package and _find(\"as\", n):\r\n # See test_from_import_as for explanation\r\n return None\r\n elif n.type == syms.import_as_names and _find(name, n):\r\n return node\r\n elif n.type == syms.import_as_name:\r\n child = n.children[2]\r\n if child.type == token.NAME and child.value == name:\r\n return node\r\n elif n.type == token.NAME and n.value == name:\r\n return node\r\n elif package and n.type == token.STAR:\r\n return node\r\n return None",
"def do_import(self, line):\n\n line.replace(';','')\n line = ' '.join(line.split())\n line = line.split()\n if len(line) == 3 and line[1] == 'as':\n mod = line[0]\n modname = line[-1]\n elif len(line) == 1:\n mod = line[0]\n modname = line[0]\n else:\n print(colored('Use: import module OR import module as name',\"red\"))\n return\n command = modname + ' = importlib.import_module(\\''+mod+'\\')'\n if modname in globals().keys():\n try:\n exec('reload('+modname+')', globals())\n except NameError:\n exec('importlib.reload('+modname+')', globals())\n try:\n exec(command ,globals())\n func_list = [f for f in getmembers(globals()[modname]) if (isfunction(f[1]) and hasattr(f[1], 'in_easyaccess'))]\n if len(func_list) > 0:\n print(colored(\"The following functions are accessible by easyaccess\", \"green\"))\n print(colored(\"i.e., they are wrapped with @toeasyaccess\", \"green\"))\n print('')\n for f in func_list:\n print(' '+modname+'.'+f[0]+'()')\n fun_utils.ea_func_dictionary[modname+'.'+f[0]] = f[1]\n else:\n print(colored(\"No function wrapped for easyaccess was found in \"+modname, \"red\"))\n print(colored(\"See documentation to see how to wrap functions\", \"red\"))\n except:\n print_exception()\n return",
"def func_ref_to_import(func):\n return f\"{getmodule(func).__name__}.{func.__name__}\"",
"def trace(self, location, fw):\n func_name = \"trace\" + location\n return (getattr(self, func_name)(fw))",
"def _get_qualified_function_name(method):\n return '{0}.{1}'.format(method.__module__, method.__name__)",
"def import_name(name: str, source: str, namespace: Dict[str, Any]) -> Any:\n level = 0\n while source[level] == \".\":\n level += 1\n assert level < len(source), \"importing from parent isn't supported\"\n module = __import__(source[level:], namespace, None, [name], level)\n return getattr(module, name)",
"def modules_to_search(source, line, col, identifier):\n\n # check if identifier is qualified, if it's\n # like \"String.join\" instead of just \"join\"\n qualified_module = _qualified_namespace(source, line, col, identifier)\n if qualified_module:\n return qualified_module\n # search for explicit import\n importers = [_imports_function(i, identifier) for i in source.split(\"\\n\")]\n modules = [i.groups()[0] for i in importers if i]\n if len(modules) > 0:\n log.debug(\"searching exposing imports\")\n log.debug(modules)\n return modules\n # if nothing obvious is left, do all wildcards\n wild = [_wildcard_import(i) for i in source.split(\"\\n\")]\n mods = [i.groups()[0] for i in wild if i]\n log.debug(\"searching wildcard imports\")\n log.debug(mods)\n return mods",
"def globaltrace_lt(self, frame, why, arg):\r\n if why == 'call':\r\n code = frame.f_code\r\n filename = frame.f_globals.get('__file__', None)\r\n if filename:\r\n # XXX modname() doesn't work right for packages, so\r\n # the ignore support won't work right for packages\r\n modulename = modname(filename)\r\n if modulename is not None:\r\n ignore_it = self.ignore.names(filename, modulename)\r\n if not ignore_it:\r\n if self.trace:\r\n print (\" --- modulename: %s, funcname: %s\"\r\n % (modulename, code.co_name))\r\n return self.localtrace\r\n else:\r\n return None",
"def pre_safe_import_module(api):\n # Dictionary from conventional module names to \"six.moves\" attribute names\n # (e.g., from `tkinter.tix` to `six.moves.tkinter_tix`).\n real_to_six_module_name = eval_statement(\n'''\nimport six\nprint('{')\n\n# Iterate over the \"six._moved_attributes\" list rather than the\n# \"six._importer.known_modules\" dictionary, as \"urllib\"-specific moved modules\n# are overwritten in the latter with unhelpful \"LazyModule\" objects.\nfor moved_module in six._moved_attributes:\n # If this is a moved module or attribute, map the corresponding module. In\n # the case of moved attributes, the attribute's module is mapped while the\n # attribute itself is mapped at runtime and hence ignored here.\n if isinstance(moved_module, (six.MovedModule, six.MovedAttribute)):\n print(' %r: %r,' % (\n moved_module.mod, 'six.moves.' + moved_module.name))\n\nprint('}')\n''')\n\n api.module_graph.add_module(RuntimeModule('six.moves'))\n for real_module_name, six_module_name in real_to_six_module_name.items():\n api.module_graph.alias_module(real_module_name, six_module_name)",
"def AssemblyQualifiedName(self) -> str:",
"def trace(self):\n\n branch = self.func.__module__.split('.')\n\n # Remove part of the stack above the root of the structured API endpoints\n for i in range(1, len(branch)):\n sub_branch = '.'.join(branch[:i])\n module = _get_module(sub_branch)\n\n if hasattr(module, Glossary.ROOT.value):\n return '%s.%s' % ('.'.join(branch[i:]), self.func.__name__)\n\n return '%s.%s' % (self.func.__module__, self.func.__name__)",
"def _get_calling_module(depth=0, *args, **kws):\r\n return _find_module(_get_frame(2+depth).f_globals.get('__name__'))",
"def _on_import_factory(module, raise_errors=True):\n def on_import(hook):\n # Import and patch module\n path = 'ddtrace.contrib.%s' % module\n imported_module = importlib.import_module(path)\n imported_module.patch()\n\n return on_import",
"def star_import(mod_or_name, **kws):\r\n \r\n module = validate_module(mod_or_name)\r\n\r\n imp_meta = kws.pop('import_metadata', False)\r\n ig_priv = kws.pop('ignore_private', False)\r\n ig_list = kws.pop('ignore_list', False) or set()\r\n overwrite = kws.pop('overwrite', False)\r\n r_module = kws.pop('module', False)\r\n prefix = kws.pop('prefix', '')\r\n _validate_kws(kws)\r\n \r\n import_list = true_star_imports(module, ig_priv, ig_list, imp_meta)\r\n rename_list = {}\r\n for name in import_list:\r\n if not prefix:\r\n rename_list[name] = name\r\n continue\r\n pname = '%s%s'%(prefix, name)\r\n if pname in _STAR_IMPORT_IGNORE:\r\n args = name, prefix, pname\r\n m = (\"imported name '%s' using the prefix '%s' will '\"\r\n \"overwrite the special module name: '%s'\") % args\r\n raise TypeError(m)\r\n rename_list[name] = pname\r\n \r\n caller = _get_calling_module()\r\n with Scope(caller) as context, Scope(module) as imported_context:\r\n\r\n imported = {k:imported_context[k] for k in import_list}\r\n \r\n if not overwrite:\r\n for name in import_list & context.keys():\r\n old, new = context[name], imported_context[name]\r\n if old is new:\r\n continue\r\n current_ob = generic_repr(context[name])\r\n imported_ob = generic_repr(imported_context[name])\r\n module_name = context.module_name\r\n imported_name = imported_context.module_name\r\n args = imported_name, name, imported_ob, name, current_ob\r\n error = ImportError('tried importing %s.%s as %s but '\r\n '%r already exists as %s'\r\n %args)\r\n raise error\r\n else:\r\n import_list -= context.keys()\r\n \r\n imported = {v:imported[k] for k, v in dict_items(rename_list)}\r\n context.namespace.update(imported)\r\n return module if r_module else imported",
"def this_module():\n return caller(exclude_first=False)._module",
"def symbolic_trace(\n root: Union[torch.nn.Module, Callable[..., Any]],\n concrete_args: Optional[Dict[str, Any]] = None,\n meta_args: Optional[Dict[str, Any]] = None,\n trace_act_ckpt=False,\n) -> ColoGraphModule:\n graph = ColoTracer(trace_act_ckpt=trace_act_ckpt).trace(root, concrete_args=concrete_args, meta_args=meta_args)\n name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__\n return ColoGraphModule(root, graph, name)",
"def _ResolveUsingStarImport(self, module, name):\n wanted_name = self._ModulePrefix() + name\n for alias in module.aliases:\n type_name = alias.type.name\n if not type_name or not type_name.endswith(\".*\"):\n continue\n imported_module = type_name[:-2]\n # 'module' contains 'from imported_module import *'. If we can find an AST\n # for imported_module, check whether any of the imported names match the\n # one we want to resolve.\n if imported_module not in self._module_map:\n continue\n imported_aliases, _ = self._ImportAll(imported_module)\n for imported_alias in imported_aliases:\n if imported_alias.name == wanted_name:\n return imported_alias\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
given the identifier, give list of module names that you should search in for the symbol
|
def modules_to_search(source, line, col, identifier):
# check if identifier is qualified, if it's
# like "String.join" instead of just "join"
qualified_module = _qualified_namespace(source, line, col, identifier)
if qualified_module:
return qualified_module
# search for explicit import
importers = [_imports_function(i, identifier) for i in source.split("\n")]
modules = [i.groups()[0] for i in importers if i]
if len(modules) > 0:
log.debug("searching exposing imports")
log.debug(modules)
return modules
# if nothing obvious is left, do all wildcards
wild = [_wildcard_import(i) for i in source.split("\n")]
mods = [i.groups()[0] for i in wild if i]
log.debug("searching wildcard imports")
log.debug(mods)
return mods
|
[
"def get_symbols_in_submodule(name):\n symbols = {}\n for k, v in _API_SYMBOLS.items():\n if k.startswith(name):\n symbols[k] = v\n return symbols",
"def find_symbol(target, name, module=MACINTALK_MODULE):\n for mod in target.module_iter():\n if module and module != mod.GetFileSpec().GetFilename():\n continue\n for sym in mod:\n if sym.GetName() == name:\n return sym\n raise RuntimeError('symbol not found: ' + name)",
"def _find_all_symbols(module):\n return [f.name for f in module.functions]",
"def getSymbol(id):",
"def _find_module(self, identifier: str, parts: list):\n\n parts = list(parts)\n (module, origin) = self._action_modules[identifier]\n\n if origin == 'module':\n (module, loaded_parts) = self._load_sub_module(module, parts)\n if isinstance(loaded_parts, str):\n loaded_parts = loaded_parts.split('.')\n parts = parts[len(loaded_parts) if loaded_parts else 0:]\n\n elif origin == 'path':\n # See how \"low\" we can go directory-wise first\n while parts:\n part = parts.pop(0)\n try_dir = os.path.join(base, part)\n if not os.path.isdir(try_dir):\n parts.insert(0, part)\n break\n base = try_dir\n\n # now that we know in which directory we'll need to find the logic:\n sys.path.append(os.path.abspath(base))\n file = parts.pop(0)\n module = import_module(file)\n\n return module, parts",
"def import_symbols(names):\n for addr, name in names.items():\n addr = int(addr)\n name = sanitize_name(name).encode(\"utf-8\")\n idc.MakeName(addr, name)",
"def _context_modules_from_prefix(maybe_fully_qualified_name: str) -> Sequence[str]:\n parts = maybe_fully_qualified_name.split(\".\")[:-1]\n return [\".\".join(parts[0 : i + 1]) for i in range(len(parts))]",
"def _module_name(*components):\r\n return '.'.join(components)",
"def genenames_from10x_mod(genelist):\n genesymbol=[]\n #ensemblid=[]\n for i in range(len(genelist)):\n curgene=genelist[i]\n starts=[]\n for x in re.finditer('_',curgene):\n starts.append(x.start()+1)\n genesymbol.append(curgene[starts[0]:])\n \n return genesymbol#,ensemblid",
"def symbols(self, symbol_list):\n return self.context.asset_finder.lookup_symbols(\n symbol_list,self.context.timestamp)",
"def _module_from_alias(source, module_name):\n regular_or_aliased = _aliased_module_regex(module_name)\n _search = [regular_or_aliased(i) for i in source.split(\"\\n\")]\n matches = [i for i in _search if i is not None]\n assert len(matches) == 1, (\"only mode module name \"\n \"should match '{}', instead \"\n \"found: {}\".format(module_name, [i.string for i in matches])) # NOQA\n return matches[0]",
"def _generate_module_lookup_table(self, model):\n self._modules_by_name = dict()\n for name, module in model.named_modules(prefix=self._model_name):\n self._modules_by_name[name] = module",
"def getList(name):",
"def search(self, key):\n\n symbols=[]\n with self._lock:\n for symbol in self.all():\n try:\n try:\n if key.match(symbol.tag):\n symbols.append(symbol)\n except:\n if key in symbol.tag:\n symbols.append(symbol)\n except:\n pass\n return symbols",
"def _get_imported_symbols(obj: Union[str, types.ModuleType]):\n\n class ImportNodeVisitor(ast.NodeVisitor):\n \"\"\"An `ast.Visitor` that collects the names of imported symbols.\"\"\"\n\n def __init__(self):\n self.imported_symbols = []\n\n def _add_imported_symbol(self, node):\n for alias in node.names:\n name = alias.asname or alias.name\n if name == '*':\n continue\n if '.' in name:\n continue\n self.imported_symbols.append(name)\n\n def visit_Import(self, node): # pylint: disable=invalid-name\n self._add_imported_symbol(node)\n\n def visit_ImportFrom(self, node): # pylint: disable=invalid-name\n self._add_imported_symbol(node)\n\n tree = get_source.get_ast(obj)\n if tree is None:\n return []\n\n visitor = ImportNodeVisitor()\n visitor.visit(tree)\n return visitor.imported_symbols",
"def import_star(modules:[str], ns:dict=None):\n global_imports([f\"from {m} import *\" for m in modules], ns)",
"def inspect_module_names(self) -> Set[str]:\n modules = []\n pattern_1 = r\"import\\s+(?P<module>\\w+)\"\n pattern_2 = r\"from\\s+(?P<module>\\w+)\"\n if not self._is_package:\n with open(str(self.root_filename), \"r\") as file:\n for line in file.readlines():\n m = re.match(pattern_1, line)\n if m:\n module = m.group(\"module\")\n modules.append(module)\n pass\n m = re.match(pattern_2, line)\n if m:\n module = m.group(\"module\")\n modules.append(module)\n pass\n pass\n pass\n pass\n else:\n # pattern = r\"import\\s+(?P<module>\\w+)\"\n for path, _, filenames in walk(str(self.root)):\n dir_path = self.root.joinpath(path)\n for filename in filenames:\n abs_path = dir_path.joinpath(filename)\n\n if not str(abs_path).endswith(\".py\"):\n continue\n pass\n modules.append(filename)\n pass\n pass\n return set(modules)",
"def getImportList(cls, str_):\n if str_.startswith(\"BZh9\"):\n str_ = decompress(str_)\n importModules = []\n opcodesIt = pickletools.genops(str_)\n for opcodes in opcodesIt:\n if opcodes[0].name == \"GLOBAL\":\n importModules.append(opcodes[1])\n return importModules",
"def get_symbol(com_name,i):\n symbol = str(i)\n for c in com_name:\n symbol = symbol + '_' + c[0]\n return symbol"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get elmpackage.json as a dict
|
def get_package_json(path):
with open(os.path.join(path, "elm-package.json")) as p:
return json.loads(p.read())
|
[
"def get_package_data(self) -> dict:\n return self.pack_data",
"def load(self):\n if not self.exists():\n return {}\n with open(self.filepath) as f:\n j = json.load(f)\n try:\n self.pkgname = j['name']\n self.packages = set(j['packages'])\n self.depends = set(j['depends'])\n self.makedepends = set(j['makedepends'])\n except KeyError as e:\n log.warning('Found malformed manifest: {}'.format(e))\n return {}\n\n return j",
"def _load_data(self):\n return self.s3Helper.read_json_object(bucket_name=self.bucket_name,\n obj_key=NPM_PACKAGE_FILE_PATH) or {}",
"def get_package_info(pkg_name):\n global package_info\n if pkg_name in package_info:\n return package_info.get(pkg_name)\n else:\n try:\n yaml_stream = check_output(['apt-cache','show',pkg_name])\n except:\n print \"Unable to find info for package: '%s'\" % pkg_name\n package_info[pkg_name] = {}\n return {}\n d = Deb822(yaml_stream)\n package_info[pkg_name] = d\n return d",
"def pd(self):\n d = dict()\n d[\"descriptor_extension\"] = \"yml\"\n d[\"version\"] = \"0.5\"\n p = dict()\n p[\"description\"] = self.manifest.get(\"description\")\n p[\"maintainer\"] = self.manifest.get(\"maintainer\")\n p[\"name\"] = self.manifest.get(\"name\")\n p[\"vendor\"] = self.manifest.get(\"vendor\")\n p[\"version\"] = self.manifest.get(\"version\")\n d[\"package\"] = p\n return d",
"def json(self):\n with open(self.manifest_path, encoding='utf-8') as f:\n manifest = Manifest(f)\n job_json = manifest.json\n\n # Insert git branch information\n for pkg_doc in job_json['packages']:\n if self._probe_git:\n pkg_doc['git_branch'] = self.package_branch(pkg_doc['name'])\n else:\n pkg_doc['git_branch'] = 'unknown'\n\n # Insert git repo URLs\n for pkg_doc in job_json['packages']:\n pkg_doc['git_url'] = self.package_repo_url(pkg_doc['name'])\n\n return job_json",
"def pkg_info_json(folder=None):\r\n # ---- Checks\r\n if not folder:\r\n folder = sys.prefix + \"\\\\conda-meta\"\r\n folder = Path(folder)\r\n if not folder.is_dir():\r\n print(\"\\nInvalid path... {}\".format(folder))\r\n return\r\n files = list(folder.glob(\"*.json\"))\r\n if not files:\r\n print(\"{} doesn't have any json files\".format(folder))\r\n return\r\n #\r\n # --- Package, Filename, Dependencies\r\n packages = []\r\n m0 = m1 = m2 = 0\r\n for f in files:\r\n ret = parse_json(f, key=\"depends\") # ---- look at dependencies only\r\n nme = str(f.name).rsplit(\"-\", 2)[0] # ---- split off the last two\r\n if len(ret) == 1:\r\n ret = ret[0]\r\n elif len(ret) > 1:\r\n srted = sorted(ret)\r\n ret = \"; \".join([i for i in srted if \"py\" not in i]) # `; ` used\r\n else:\r\n ret = \"None\"\r\n m0 = max(m0, len(nme))\r\n m1 = max(m1, len(str(f.name)))\r\n m2 = max(m2, len(ret))\r\n packages.append((nme, f.name, ret))\r\n dt1 = [(\"Package\", \"<U{}\".format(m0)), (\"Filename\", \"<U{}\".format(m1)),\r\n (\"Dependencies\", \"<U{}\".format(m2))]\r\n packages = np.asarray(packages, dtype=dt1)\r\n #\r\n # ---- Dependency, Counts\r\n z = []\r\n for dep in packages['Dependencies']:\r\n if dep not in (\"\", \" \"):\r\n z += dep.split(\"; \") # split on `; ` delimiter\r\n z = np.asarray(z)\r\n uniq, idx, cnts = np.unique(z, return_index=True, return_counts=True)\r\n uniq2 = [[u, u.split(\" \")[0]][\" \" in u] for u in uniq if u != \"\"]\r\n m0 = max(np.char.str_len(uniq2))\r\n m1 = np.max(np.char.str_len(uniq2)) + 5\r\n dt2 = [(\"Full_name\", \"<U{}\".format(m0)), (\"Counts\", \"i8\"),\r\n (\"Simple_name\", \"<U{}\".format(m1))]\r\n dep_counts = np.asarray(list(zip(uniq, cnts, uniq2)), dtype=dt2)\r\n #\r\n # ---- Package, Required_by\r\n required_by = []\r\n names = packages['Package']\r\n depends = packages['Dependencies']\r\n max_len = 0\r\n for nme in names:\r\n if nme in ('py', 'python'):\r\n required_by.append([nme, \"many\"])\r\n continue\r\n w = names[[nme in i for i in depends]]\r\n if np.size(w) > 0:\r\n v = w.tolist()\r\n v0 = \"; \".join([i.split(\"; \")[0] for i in v])\r\n max_len = max(max_len, len(v0))\r\n required_by.append([nme, v0])\r\n else:\r\n required_by.append([nme, \"None\"])\r\n r_dt = \"<U{}\".format(max_len)\r\n dt = np.dtype([('Package', '<U30'), ('Required_by', r_dt)])\r\n required_by = uts(np.asarray(required_by), dtype=dt)\r\n return packages, dep_counts, required_by",
"def get_setup_json():\n with open(FILEPATH_SETUP_JSON, \"r\") as handle:\n setup_json = json.load(handle) # , object_pairs_hook=OrderedDict)\n\n return setup_json",
"def package_json(context: Context):\n context.write_template('package.json')",
"def get_language_pack(locale: str) -> dict:\n if check_locale(locale):\n for entry_point in entry_points(group=JUPYTERLAB_LANGUAGEPACK_ENTRY):\n if locale == entry_point.name:\n return entry_point.load()\n else:\n return {}\n else:\n print(\"Locale '{locale}' not valid!\".format(locale=locale))\n return {}",
"def get_package_metadata(dependency):\n\n version_symbol_index = dependency.rfind('@')\n name_index = dependency.find('/') + 1\n dependency_name = dependency[name_index:version_symbol_index]\n\n entry = dict()\n\n entry['name'] = dependency\n\n result = json.loads(pypistats.recent(dependency_name, \"month\", format=\"json\"))\n print(result)\n entry['downloads_last_month'] = result['data']['last_month']\n request_url = f'{PYPI_DEPENDENCY_META_URL}{dependency_name}/{dependency[version_symbol_index+1:]}/json'\n json_result = requests.get(request_url)\n print(request_url)\n print(json_result)\n return entry",
"def schema() -> Dict:\n from pkg_resources import resource_string\n import json\n\n data = resource_string(\"ceeder.schemas\", \"cdr-v5.json\")\n return json.loads(data)",
"def getinfo(self, packname: str, complete: bool=False) -> dict:\n\t\tinfo = None\n\n\t\tif packname not in self.__root['packs']:\n\t\t\tinfo = {\n\t\t\t\t\"NOT INSTALLED\": \"PACKAGE NOT INSTALLED\"\n\t\t\t}\n\t\t\tinfo['available-versions'] = dmutils.getversions(packname)\n\t\telif complete:\n\t\t\tinfo = dmutils.getpackinfo(packname)\n\t\t\tinfo['head'] = self.__root['packs'][packname]['head']\n\t\t\tinfo['dev'] = self.__root['packs'][packname]['dev']\n\t\t\tinfo['available-versions'] = dmutils.getversions(packname)\n\t\telse:\n\t\t\tinfo = self.__root['packs'][packname].copy()\n\n\t\treturn info",
"def comparable_representation(top_node, node_pkg):\n d = {\"packages\": {}, \"extensions\": {}}\n for k, v in node_pkg.iter_category(\"extensions\"):\n d[\"extensions\"][k] = v.extension_from(top_node)\n for k, v in node_pkg.iter_category(\"packages\"):\n d[\"packages\"][k] = (v.full_name, v.nodes, v.top_node, v.top_or_lib_node)\n return d",
"def get_opencue_metadata():\n version = get_metadata('attributes/opencue_version', '0.3.6')\n return dict(\n temp_dir=TEMP_DIR + '/opencue',\n install_dir='/opt/opencue',\n version=version,\n )",
"def get_manifest(self):\n logger.debug(\"Getting manifest {}\".format(self))\n text = self.get_text(self.get_manifest_key())\n return json.loads(text)",
"def GetJson(self):\n pretty_string = json.dumps(self.GetManifest(), indent=2)\n # json.dumps sometimes returns trailing whitespace and does not put\n # a newline at the end. This code fixes these problems.\n pretty_lines = pretty_string.split('\\n')\n return '\\n'.join([line.rstrip() for line in pretty_lines]) + '\\n'",
"def _from_npm_registry(self, package_name=str):\n data_dict = None\n api_url = \"https://registry.npmjs.org/\" + str(package_name)\n try:\n response = requests.get(api_url)\n json_data = response.json()\n latest_version = json_data.get(\"dist-tags\", {}).get(\"latest\", None)\n if latest_version:\n latest_version_data = json_data.get(\"versions\", {}).get(latest_version, {})\n data_dict = {\n \"name\": json_data.get(\"name\", \"\"),\n \"description\": json_data.get(\"description\", \"\"),\n \"version\": latest_version,\n \"keywords\": latest_version_data.get(\"keywords\", []),\n \"dependencies\":\n list(latest_version_data.get(\"dependencies\", {}).keys()),\n \"homepage\": json_data.get(\"homepage\", \"\"),\n \"repositoryurl\": json_data.get(\"repository\", {}).get(\"url\", \"\"),\n \"updated_timestamp\": int(datetime.datetime.now().timestamp()),\n }\n # Other fields that were present in past, but not used for training model are\n # below. Removing this fields saves lot of space while storing pacakge data in\n # S3.\n # \"devDependencies\":\n # list(latest_version_data.get(\"devDependencies\", {}).keys()),\n # \"peerDependencies\":\n # list(latest_version_data.get(\"peerDependencies\", {}).keys()),\n # \"readme\": json_data.get(\"readme\", \"\"),\n\n self._track_stats('fetched_from_npm', 1)\n except Exception as e:\n self._track_stats('npm_fetch_errors', 1)\n logger.error(\"Can't fetch the keywords for %s from NPM Registry, it throws %s\",\n package_name, e)\n\n return data_dict",
"def pkg_info():\n try:\n doc = __doc__.decode(\"UTF-8\")\n except (AttributeError, UnicodeError):\n doc = __doc__ # Python3, or some strangeness\n\n return dict(\n # project data & layout\n name = __name__.split('.')[0],\n ## TODO: version = re.search(r\"(?<=\\()[^)]+(?=\\))\", changelog).group(),\n package_dir = {\"\": \"src\"},\n ## TODO: packages = find_packages(projectdir / \"src\", exclude=[\"tests\"]),\n test_suite = \"nose.collector\",\n zip_safe = True,\n include_package_data = True,\n data_files = [\n (\"EGG-INFO\", [\n \"README.md\", \"LICENSE\", \"debian/changelog\",\n ]),\n ],\n entry_points = {\n \"console_scripts\": [\n \"wand = neutrino_wand.cli:run\",\n ],\n },\n\n # dependency management\n install_requires = [\n ],\n setup_requires = [\n \"docutils\",\n \"Sphinx\",\n ],\n extras_require = {\n },\n\n # PyPI\n url = \"https://github.com/jhermann/neutrino-wand\",\n license = \"Apache License Version 2.0\",\n keywords = \"python tool monitoring influxdb devops reporting visualops\",\n author = u\"Jürgen Hermann\",\n author_email = \"jh@web.de\",\n description = doc.split('.')[0].strip(),\n long_description = doc.split('.', 1)[1].strip(),\n classifiers = [\n # values at http://pypi.python.org/pypi?:action=list_classifiers\n \"Development Status :: 3 - Alpha\",\n #\"Development Status :: 4 - Beta\",\n #\"Development Status :: 5 - Production/Stable\",\n \"Operating System :: OS Independent\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: Documentation\",\n \"Topic :: Utilities\",\n ],\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get the path to the elmpackage.json for a given file
|
def _elm_package_for(file_path):
# just troll up the file tree
parts = file_path.split(os.path.sep)
for i in list(reversed(range(len(parts))))[:-1]:
guess_parts = parts[:i] + ["elm-package.json"]
current_guess = "/" + os.path.join(*guess_parts)
if os.path.exists(current_guess):
return current_guess
|
[
"def get_package_json(path):\n with open(os.path.join(path, \"elm-package.json\")) as p:\n return json.loads(p.read())",
"def example_of_how_to_refer_to_a_file_in_the_package(self):\n file_name = pkg_resources.resource_string(\"{{library_name}}\", \"module_data/file_required_by_module.json\")\n logger.info(\"file name is %s\", file_name)",
"def get_blueprint_json_path(file_name):\n return os.path.join(get_soong_out_path(), file_name)",
"def package_path(filename, quoted = 1):\n name = os.path.join(os.path.dirname(__file__),filename)\n if quoted:\n return \"\\\"\" + name + \"\\\"\"\n return name",
"def get_specification_file_location(name: typing.AnyStr, specification: typing.Dict) -> typing.AnyStr:\n return os.path.join(get_save_file_directory(name, specification), \"specification.json\")",
"def find_extension_file() -> Path:\n paths = list(Path(\"web-ext-artifacts/\").glob(\"*.zip\"))\n if len(paths) < 1:\n raise ValueError(\"Can't find extension file. Run `yarn build`.\")\n elif len(paths) > 1:\n raise ValueError(\"Found multiple extension files. Run `yarn rebuild`.\")\n else:\n return paths[0].resolve()",
"def get_config_json_file_path(id):\n return os.path.join(DOCKER_VOLUMES_DIR, id, 'config.json')",
"def version_filesystem_location():\n return os.path.join(source_filesystem_location(), 'version.json')",
"def resource_file(self) -> Path:\n files = _package_directory_types(self._path)\n if not files:\n raise wn.Error(f'no resource found in package: {self._path!s}')\n elif len(files) > 1:\n raise wn.Error(f'multiple resource found in package: {self._path!s}')\n return files[0][0]",
"def get_package_filename(pkg):\n return '%s-%s-%s.%s.rpm' % (pkg.name, pkg.version, pkg.release, pkg.arch)",
"def package_repo_path(self, package_name):\n return os.path.join(self._dirname, 'build', package_name)",
"def jsonFile(self):\n return self.json_file",
"def file_path(self) -> str:\n return self.files[self.__main['location']['file']]",
"def _find_metadata(file_alf):\n ns, obj = file_alf.name.split('.')[:2]\n meta_data_file = list(file_alf.parent.glob(f'{ns}.{obj}*.metadata*.json'))\n if meta_data_file:\n return meta_data_file[0]",
"def get_alembic_file_from_json(self, path):\n \n #initialize class\n json_file = file(path)\n \n data = json.loads(json_file.read())\n \n return data['reference'][0]['animation_cache_file_path']",
"def get_path() -> str:\n config_dir: str = appdirs.user_config_dir(\"plotman\")\n return config_dir + \"/plotman.yaml\"",
"def get_file_path(self, model):\n options = model.git_options()\n return '{}/{}.json'.format(options.get('folder'), model.id)",
"def get_key_file_path(file_name):\n here = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(here, \"..\", \"keys\", file_name)",
"def get_filepath_from_pythonpath(filename):\n for path in sys.path:\n real_path = os.path.join(path, filename)\n if exists(real_path):\n return real_path\n \n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return an html chunk with a table that lists all current users (i.e. participants), their names, and their user ids.
|
def users_table(cls, with_buttons=None):
to_render = "Users table:<br /><table><tr><td>User ID</td><td>Last name</td><td>First name</td>"
if with_buttons != None:
to_render = to_render + "<td>In XP?</td>"
to_render = to_render + "</tr>"
cur_session = xp_management.ExpSession.get_by_id(xp_management.ExpSession.get_open_session())
for user_rec in cls.all():
to_render = to_render + "<tr><td>" + str(user_rec.public_id) + "</td><td>" + getattr(user_rec, "lastname",
"") + "</td><td>" + getattr(
user_rec, "firstname", "") + "</td>"
if with_buttons != None and cur_session != None:
user_id = user_rec.key().id()
if user_id not in cur_session.participants:
to_render = to_render + "<td><a href=\"" + with_buttons + "?i=" + str(user_id) + "\">add</a></td>"
else:
to_render = to_render + "<td>yes <a href=\"" + with_buttons + "?remove=" + str(
user_id) + "\">remove</a></td>"
to_render = to_render + "</tr>"
to_render = to_render + "</table>"
return to_render
|
[
"def show_all_users():\n users = User.query.all()\n\n return render_template('user_list.html', users=users)",
"def list_members():\n\tcheck_admin()\n\tmembers = db.engine.execute('select s.id as id, s.name as name, t.title as title, p.task as task,\\\n \t\tp.progress as progress, s.email as email\\\n \t\tfrom (participations p left join students s on p.stu_id = s.id\\\n \t\tleft join teams t on p.pro_id = t.pro_id)\\\n\t\twhere p.pro_id in (select pro_id from participations as p where p.stu_id = 1)')\n\treturn render_template('admin/members/members.html', members=members, title='Team Members')",
"def profiles():\n users = UserProfile.query.all()\n return render_template(\"profiles.html\",users = users)",
"def displayUsers(userList):\n\n print(\"---------------------------------\")\n for id, u in enumerate(userList):\n print(\"User ID =\", id)\n print(\"Name =\", u.getName())\n print(\"email =\", u.getEmail())\n print(\"Nickname =\", u.getNickname())\n print(\"---------------------------------\")",
"def people():\n\n # this will get all person data with id if user registered in session\n if session.get('user_id'):\n response = nb_session.get(\n f'https://{nation_slug}.nationbuilder.com/api/v1/people/{session[\"user_id\"]}',\n params={'format': 'json'},\n headers={'content-type': 'application/json'}\n )\n person = json.loads(response.text)\n if \"person\" in person:\n answer = person[\"person\"][\"first_name\"] + ' ' + person[\"person\"][\"last_name\"]\n else:\n answer = 0\n else:\n answer = 0 # zero means no user registered in session for frontend\n\n # this will get every person data from API\n response_all = nb_session.get(\n f'https://{nation_slug}.nationbuilder.com/api/v1/people/',\n params={'format': 'json'},\n headers={'content-type': 'application/json'}\n )\n userlist = json.loads(response_all.text)\n\n return render_template('people.html', answer=answer, userlist=userlist)",
"def view_users():\n\n users = []\n for user in crud.get_users():\n print(user.user_id)\n lessons = []\n for lesson in user.lessons:\n lessons.append(lesson.as_dict()) # lessons = dictionary of each lesson\n user_lessons = user.as_dict()\n user_lessons['lessons'] = lessons\n users.append(user_lessons)\n print(f'{users} from server.py /api/users endpoint')\n return {'users': users}",
"def index():\n\n users = User.query.all()\n\n return render_template('index.html', nuser='active',users=users)",
"def exam_scores_all_users_table(request, exam_id):\n exam = get_object_or_404(Exam, pk=exam_id)\n tasks = exam.task_set.all().prefetch_related('scenario_set')\n users = LatestResult.users_for_exam(exam)\n results = []\n for username in users:\n results.append({\n 'user': username,\n 'task_results': LatestResult.get_results_for_user(exam, username)\n })\n return render(request, 'frontend/exams/scores/table_results/index.html', {\n 'exam': exam,\n 'results': results,\n 'tasks': tasks\n })",
"def sessionList(self):\n l_conn = self.m_connectionPool.getconn('sessionList()')\n l_cursor = l_conn.cursor()\n try:\n l_cursor.execute(\"\"\"\n select\n A.\"ST_SESSION_ID\"\n , A.\"DT_CRE\"\n , B.\"N_STORY_COUNT\"\n , C.\"ST_NAME\"\n , C.\"ST_USER_ID\"\n from \"TB_SESSION\" A\n join (\n select \"ST_SESSION_ID\", count(1) as \"N_STORY_COUNT\"\n from \"TB_STORY\"\n group by \"ST_SESSION_ID\"\n ) B on A.\"ST_SESSION_ID\" = B.\"ST_SESSION_ID\"\n join \"TB_USER\" C on C.\"ID_INTERNAL\" = A.\"ID_INTERNAL\"\n order by \"DT_CRE\" desc\n limit {0};\n \"\"\".format(EcAppParam.gcm_sessionDisplayCount))\n\n l_response = ''\n for l_sessionId, l_dtCre, l_count, l_userName, l_userId in l_cursor:\n l_response += \"\"\"\n <tr>\n <td>{0}</td>\n <td>{1}</td>\n <td><a href=\"/session/{2}\">{2}</a></td>\n <td>{3}</td>\n <td style=\"text-align: center;\">{4}</td>\n <tr/>\n \"\"\".format(\n l_userName, l_userId, l_sessionId, l_dtCre.strftime('%d/%m/%Y %H:%M'), l_count)\n except Exception as e:\n self.m_logger.warning('TB_SESSION query failure: {0}'.format(repr(e)))\n raise\n\n l_cursor.close()\n self.m_connectionPool.putconn(l_conn)\n return \"\"\"\n <html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\" >\n <head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=UTF-8\" />\n </head>\n <body>\n <table>\n <tr>\n <td style=\"font-weight: bold;\">ST_NAME</td>\n <td style=\"font-weight: bold;\">ST_USER_ID</td>\n <td style=\"font-weight: bold;\">ST_SESSION_ID</td>\n <td style=\"font-weight: bold;\">DT_CRE</td>\n <td style=\"font-weight: bold;\">N_STORY_COUNT</td>\n <tr/>\n {0}\n </table>\n </body>\n </html>\n \"\"\".format(l_response)",
"def get_userlist(self, room):\n users = \"\"\n with Chat.lock:\n for user in room.users:\n users += \" * {}\".format(user)\n if user == self.name:\n users += \" (** this is you)\\n\"\n else:\n users += \"\\n\"\n users += \"end of list.\"\n return users",
"def followers():\n userid = session[\"user_id\"]\n\n following_user = following_users(userid)\n\n # check if you are going to look at another profile's list of followers or your own list\n username = request.args.get('username')\n\n # if you are going to watch another profile's list get the data of that profile\n if username:\n id_username = get_id(username)\n followers = db.execute(\"SELECT own_username, own_full_name FROM volgend WHERE following_id = :following_id\",\n following_id = id_username)\n\n # get the data of your own profile\n else:\n followers = db.execute(\"SELECT own_username, own_full_name FROM volgend WHERE following_id = :userid\", userid = userid)\n\n # print screen on page\n return render_template(\"followers.html\", users = followers, following_user=following_user)",
"def profiles():\n profs = UserProfile.query.order_by(UserProfile.lastname).all()\n return render_template('profiles.html', users=profs)",
"def admin_users():\n users = User.select()\n return render_template('users.html', users=users)",
"def clm_ajax_get_table_users(request):\n if request.method == 'GET':\n users = prep_data('admin_clm/user/get_list/', request.session)\n\n for item in users:\n item['is_activeName'] = unicode(user_states[item['is_active']])\n\n return messages_ajax.success(users)",
"def update_html():\n user_list = db.select_all_m_qiita_users()\n html.update_page(user_list)",
"def listActiveUsers(request):\n reverseUrl = 'api-datatables-user-list-active-users'\n ### get URL prefix\n prefix = getPrefix(request)\n ### get aoColumns pre-config\n aoColumns = []\n aoColumns += getAoColumnsDictWithTitles(COL_TITLES[reverseUrl])\n ### get filter fields\n filterFields = getFilterFieldIDs(FILTERS[reverseUrl])\n ### get indices of columns to refer by name in render javascript function\n fieldIndices = {}\n for col in ORDER_COLUMNS[reverseUrl]:\n i = None\n try:\n i = ORDER_COLUMNS[reverseUrl].index(col)\n except:\n pass\n fieldIndices[col] = i\n ### get reverse url of the data view\n dataUrl = reverse(reverseUrl)\n ### set request response data\n data = { \\\n 'prefix': prefix, \\\n 'datasrc': str(dataUrl + \"?format=json\"), \\\n 'columns': json_dumps(aoColumns), \\\n 'tableid': 'listactiveusers', \\\n 'caption': 'users', \\\n 'fieldIndices': json_dumps(fieldIndices), \\\n 'filterFields': filterFields, \\\n }\n data.update(getContextVariables(request))\n return render_to_response('pandajob/users/listusers.html', data, RequestContext(request))",
"def allteammember_view(request):\n\tatm = User.objects.filter(profile__is_team_member = True, is_active=True)\n\treturn render(request, 'core_app/team.html', {'teammember': atm} )",
"def user_profile():\n user_id = session[\"user_id\"]\n picks = Pick.query.filter_by(author=user_id).all()\n return render_template(\n \"profile.html\",\n picks=picks\n )",
"def see_inactive_users(self):\n # table result class merely expects an iterator of iterators\n return [('Mike', 'Dan', 'Gabe')]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a number/items it checks if the item/number is present in the range or not
|
def contains(self, item : int) -> bool:
return self.start <= item and item < self.end
|
[
"def _in_range(value, range):\n # TODO: Implement this\n return True",
"def hasRange(*args, **kwargs):\n \n pass",
"def if_numbers_within_bounds(result):\n for number in result:\n if number < 1 or number > 20:\n return False\n return True",
"def hasValidRange(*args, **kwargs):\n \n pass",
"def validate_in_range(x, a, b):\n return a < x < b",
"def number_within_range(number, min, max):\n try:\n if min <= number <= max:\n return True\n return False\n except Exception:\n return False",
"def ran_check(num, low, high):\n if num in range(low, high + 1):\n return '{} is in the range between {} and {}'.format(num, low, high)\n else:\n return 'The number is outside the range'",
"def key_in_range(self, key):\n if isinstance(key, guid.GUIDMixin):\n key = key.guid\n if isinstance(key, basestring):\n key = int(key, base=16)\n return self.range_min <= key < self.range_max",
"def in_price_range(self, upper: int, lower: int=0) -> bool:\n return lower <= self.price <= upper",
"def _verify_valid_range(self, aid):\n\n ranges = None\n\n partitions = list(self._ranges.keys())\n partitions.sort(key=len, reverse=True)\n for partition in partitions:\n if aid.friendly.startswith(partition):\n ranges = self._ranges[partition]\n break\n\n if ranges is None:\n sys.exit('AID \"%s\" must be prefixed with a partition name' %\n aid.friendly)\n\n if not Utils.in_any_range(int(aid.value, 0), ranges):\n emsg = '\"value\" for aid \"%s\" not in valid range %s, got: %s'\n emsg = emsg % (aid.friendly, str(ranges), aid.value)\n sys.exit(emsg)",
"def in_interval(number, start, stop):\r\n ok = False\r\n if number >= start and number <= stop:\r\n ok = True\r\n return ok",
"def find_range(input_list,input_number):\n first_occurrence = input_list.index(input_number)\n last_occurrence = first_occurrence + input_list.count(input_number) - 1\n return Range(first_occurrence, last_occurrence)",
"def checkBucket(self,value,buckets):\n for ind,uBound in enumerate(buckets):\n if value < uBound: return ind\n return False",
"def is_index_available(self, value: int, lst: list) -> bool:\n return value >= 0 and value < len(lst)",
"def is_between(i, lower, upper):\n pass",
"def get_items_in_range(location: tuple, items: list) -> list:\n items_in_range = []\n for item in items:\n distance = manhattan_distance(location, item)\n if distance <= MIN_DISTANCE:\n items_in_range.append(item)\n return items_in_range",
"def __contains__(self, item):\r\n if self == item:\r\n return True\r\n if isinstance(item, RangeSet):\r\n return all(rng in self for rng in item.ranges())\r\n else:\r\n try:\r\n return self._above_start(item) and self._below_end(item)\r\n except TypeError:\r\n try:\r\n rng_item = Range(item)\r\n return rng_item.start in self and rng_item.end in self\r\n except ValueError:\r\n pass\r\n raise TypeError(f\"'{item}' is not comparable with this Range's start and end\")",
"def range_test(self, *args, **kwargs):\n import sys\n print('sys.maxsize:',sys.maxsize)\n # empty range\n print('list(range(0)):',list(range(0)))\n # using range(stop)\n print('list(range(10)):',list(range(10)))\n # using range(start, stop)\n print('list(range(1, 10)):',list(range(1, 10)))\n start = 2\n stop = 14\n step = 2\n print('list(range(start, stop, step)):',list(range(start, stop, step)))\n start = 2\n stop = -14\n step = -2\n print('list(range(start, stop, step)):',list(range(start, stop, step)))\n # value constraint not met\n print('list(range(start, 14, step)):',list(range(start, 14, step)))\n r = range(0, 20, 2)\n print('r:',r)\n print('11 in r:',11 in r)\n print('10 in r:',10 in r)\n print('r.index(10):',r.index(10))\n print('r[5]:',r[5])\n print('r[:5]:',r[:5])\n print('r[-1]:',r[-1])\n return",
"def in_range(self, value):\n return ((self.lower_bound is None or value >= self.lower_bound) and\n (self.upper_bound is None or value <= self.upper_bound))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if the range object is a subrange of the given superrange or not
|
def is_sub_range(self, range_obj) -> bool:
return self.start >= range_obj.start and self.end <= range_obj.end
|
[
"def hasRange(*args, **kwargs):\n \n pass",
"def hasValidRange(*args, **kwargs):\n \n pass",
"def _in_range(value, range):\n # TODO: Implement this\n return True",
"def is_range(self):\n return True",
"def is_in_boundary(x, start, end):\n return x >= start and x <= end",
"def __validation_property_range(self, p):\n for y in p.included_ranges():\n superclasses = y.super_classes_closure()\n for q in p.super_properties():\n if not any(r == y or r in superclasses for r in q.included_ranges()) and not any(\n r == y or r in superclasses for s in q.super_properties_closure() for r in\n s.included_ranges()):\n raise ValidationError(\"Range {} of property {} isn't a subclass of any range of\"\n \" superproperty {}\".format(y.name(), p.name(), q.name()))",
"def overlaps(self, bookmark):\n begin1, end1 = self.range\n begin2, end2 = bookmark.range\n return begin1 <= end2 and end1 >= begin2",
"def _is_segment_in_block_range(segment, blocks):\n for block in blocks:\n if block.start <= segment.start and segment.end <= block.end:\n return True\n\n return False",
"def _fail_outside_range(self, p: Position) -> bool: # pragma: no cover\n c = self.c\n if not p:\n return True\n if self.node_only:\n return True\n if self.suboutline_only or self.file_only:\n if self.root and p != self.root and not self.root.isAncestorOf(p):\n return True\n if c.hoistStack:\n bunch = c.hoistStack[-1]\n if not bunch.p.isAncestorOf(p):\n g.trace('outside hoist', p.h)\n g.warning('found match outside of hoisted outline')\n return True\n return False # Within range.",
"def is_in_range(position: int, ord_letter: int, boundary: int, offset: int) -> bool:\n lower_bound = (ord_letter * boundary + 1) + offset\n upper_bound = ((ord_letter+1) * boundary) + offset\n return lower_bound <= position <= upper_bound",
"def in_range(self, value):\n return ((self.lower_bound is None or value >= self.lower_bound) and\n (self.upper_bound is None or value <= self.upper_bound))",
"def is_between_inclusive(x, begin, end):\n return begin <= x <= end or end < begin <= x or x <= end < begin",
"def is_subset(subsampling, reference):\n if reference is None:\n return True\n elif subsampling is None and reference is not None:\n return False\n else:\n return set(subsampling).issubset(set(reference))",
"def out_of_bounds(position, bounds):\n return (position[0] < 0 or position[0] >= bounds[0] \n or position[1] < 0 or position[1] >= bounds[1])",
"def contains(self, other):\n if not isinstance(other, ChromosomeInterval):\n raise RuntimeError('ChromosomeInterval:contains expects '\n 'ChromosomeInterval, not %s' % other.__class__)\n # print 'testing contains! me:[%d,%d), them:[%d,%d)' % (self.start, self.stop, other.start, other.stop)\n if self.chromosome != other.chromosome:\n # print 'nope'\n return False\n # self |----*\n # other *----|\n if self.stop <= other.start:\n # print 'nope'\n return False\n # self *----|\n # other |----*\n if self.start >= other.stop:\n # print 'nope'\n return False\n # self *------|\n # other *----|\n if self.start > other.start:\n # print 'nope'\n return False\n # self |-----*\n # other |----*\n if self.stop < other.stop:\n #print 'nope'\n return False\n # print 'yup!'\n return True",
"def is_single_range(self):\n return len(self.range_specs) == 1",
"def validate_in_range(x, a, b):\n return a < x < b",
"def IsInBounds( value, min_, max_ ):\n \n return min_ <= value <= max_",
"def __contains__(self, item):\r\n if self == item:\r\n return True\r\n if isinstance(item, RangeSet):\r\n return all(rng in self for rng in item.ranges())\r\n else:\r\n try:\r\n return self._above_start(item) and self._below_end(item)\r\n except TypeError:\r\n try:\r\n rng_item = Range(item)\r\n return rng_item.start in self and rng_item.end in self\r\n except ValueError:\r\n pass\r\n raise TypeError(f\"'{item}' is not comparable with this Range's start and end\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a new Range object which is a combination of the two ranges if they are not disjoint
|
def combine(self, range_obj) -> bool:
if self.is_disjoint(range_obj):
return Range(0)
new_start = min(self.start, range_obj.start)
new_end = max(self.end, range_obj.end)
return Range(new_start, new_end)
|
[
"def union(self, other: \"Interval\") -> \"Interval\":\n return Interval(min(self.start, other.start), max(self.end, other.end))",
"def union(self, rng):\r\n # if RangeSet, return union of that instead\r\n if isinstance(rng, RangeSet):\r\n return rng.union(self)\r\n # convert other range to a format we can really work with\r\n try:\r\n if not isinstance(rng, Range):\r\n rng = Range(rng)\r\n except ValueError:\r\n raise TypeError(\"Cannot merge a Range with a non-Range\")\r\n # do the ranges overlap?\r\n rng_a, rng_b = (self, rng) if self < rng else (rng, self)\r\n if rng_a.isdisjoint(rng_b) and not (rng_a.end == rng_b.start and rng_a.include_end != rng_b.include_start):\r\n return None\r\n # merge 'em\r\n new_start = min((rng_a.start, rng_a.include_start), (rng_b.start, rng_b.include_start),\r\n key=lambda x: (x[0], not x[1]))\r\n new_end = max((rng_a.end, rng_a.include_end), (rng_b.end, rng_b.include_end))\r\n return Range(start=new_start[0], end=new_end[0], include_start=new_start[1], include_end=new_end[1])",
"def overlap(start_1, end_1, start_2, end_2):\n return range(max(start_1, start_2),\n min(end_1, end_2) + 1)",
"def union(self, other):\n\n if not self.is_valid_range(other):\n msg = \"Unsupported type to test for union '{.__class__.__name__}'\"\n raise TypeError(msg.format(other))\n\n # Optimize empty ranges\n if not self:\n return other\n elif not other:\n return self\n\n if not self.overlap(other) and not self.adjacent(other):\n raise ValueError(\"Ranges must be either adjacent or overlapping\")\n\n lower_bound = min(self._lower_bound, other._lower_bound)\n upper_bound = max(self._upper_bound, other._upper_bound)\n return self.__class__(\n lower_bound.value,\n upper_bound.value,\n lower_bound.inc,\n upper_bound.inc,\n )",
"def _make_compatible(self, other):\n a1, b1 = self.domain()\n a2, b2 = other.domain()\n a = min(a1, a2)\n b = max(b1, b2)\n F = self.extend_by_zero_to(a,b)\n G = other.extend_by_zero_to(a,b)\n endpts = list(set(F.end_points()).union(set(G.end_points())))\n endpts.sort()\n return F, G, zip(endpts, endpts[1:])",
"def union_range(self, attrname, r1, r2):\n if not r1 or not r2:\n return self.bounds[attrname]\n\n isd1, isd2 = r1[0], r2[0]\n vals1, vals2 = r1[1], r2[1]\n isd = isd1 and isd2\n\n if isd:\n vals = set(vals1).union(set(vals2))\n else:\n if isd1:\n vals1 = [vals1[0], vals1[1]]\n if isd2:\n vals2 = [vals2[0], vals2[1]]\n vals = min(vals1[0], vals2[0]), max(vals1[1], vals2[1])\n\n return isd, vals",
"def intersection(self, rng):\r\n # if a RangeSet, then return the intersection of that with this instead.\r\n if isinstance(rng, RangeSet):\r\n return rng.intersection(self)\r\n # convert other range to a format we can work with\r\n try:\r\n if not isinstance(rng, Range):\r\n rng = Range(rng)\r\n except ValueError:\r\n raise TypeError(\"Cannot overlap a Range with a non-Range\")\r\n # do the ranges overlap?\r\n rng_a, rng_b = (self, rng) if self < rng else (rng, self)\r\n if rng_a.isdisjoint(rng_b):\r\n return None\r\n # compute parameters for new intersecting range\r\n # new_start = rng_b.start\r\n # new_include_start = new_start in rng_a\r\n # if rng_a.end < rng_b.end:\r\n # new_end = rng_a.end\r\n # new_include_end = new_end in rng_b\r\n # else:\r\n # new_end = rng_b.end\r\n # new_include_end = new_end in rng_a\r\n new_start = max((rng_a.start, rng_a.include_start), (rng_b.start, rng_b.include_start),\r\n key=lambda x: (x[0], not x[1]))\r\n new_end = min((rng_a.end, rng_a.include_end), (rng_b.end, rng_b.include_end))\r\n # create and return new range\r\n return Range(start=new_start[0], end=new_end[0], include_start=new_start[1], include_end=new_end[1])",
"def range_merge(ranges, dist=0):\n if not ranges:\n return []\n\n ranges.sort()\n\n cur_range = list(ranges[0])\n merged_ranges = []\n for r in ranges[1:]:\n # open new range if start > cur_end or seqid != cur_seqid\n if r[1] - cur_range[2] > dist or r[0] != cur_range[0]:\n merged_ranges.append(tuple(cur_range))\n cur_range = list(r)\n else:\n cur_range[2] = max(cur_range[2], r[2])\n merged_ranges.append(tuple(cur_range))\n\n return merged_ranges",
"def join_trixel_bound_sets(b1, b2):\n b1_sorted = HalfSpace.merge_trixel_bounds(b1)\n b2_sorted = HalfSpace.merge_trixel_bounds(b2)\n\n # maximum/minimum trixel bounds outside of which trixel ranges\n # will be considered invalid\n global_t_min = max(b1_sorted[0][0], b2_sorted[0][0])\n global_t_max = min(b1_sorted[-1][1], b2_sorted[-1][1])\n\n b1_keep = [r for r in b1_sorted if r[0]<=global_t_max and r[1]>=global_t_min]\n b2_keep = [r for r in b2_sorted if r[0]<=global_t_max and r[1]>=global_t_min]\n\n dex1 = 0\n dex2 = 0\n n_b1 = len(b1_keep)\n n_b2 = len(b2_keep)\n joint_bounds = []\n\n if n_b1==0 or n_b2==0:\n return joint_bounds\n\n while True:\n r1 = b1_keep[dex1]\n r2 = b2_keep[dex2]\n if r1[0]<=r2[0] and r1[1]>=r2[1]:\n # r2 is completely inside r1;\n # keep r2 and advance dex2\n joint_bounds.append(r2)\n dex2 += 1\n elif r2[0]<=r1[0] and r2[1]>=r1[1]:\n # r1 is completely inside r2;\n # keep r1 and advance dex1\n joint_bounds.append(r1)\n dex1 += 1\n else:\n # The two bounds are either disjoint, or they overlap;\n # find the intersection\n local_min = max(r1[0], r2[0])\n local_max = min(r1[1], r2[1])\n if local_min<=local_max:\n # if we have a valid range, keep it\n joint_bounds.append((local_min, local_max))\n\n # advance the bound that is lowest\n if r1[1] < r2[1]:\n dex1 += 1\n else:\n dex2 += 1\n\n # if we have finished scanning one or another of the\n # bounds, leave the loop\n if dex1 >= n_b1 or dex2 >= n_b2:\n break\n\n return HalfSpace.merge_trixel_bounds(joint_bounds)",
"def rangeset_intersect(ranges0, ranges1, presorted=False):\n\n if len(ranges0) == 0 or len(ranges1) == 0:\n return _np.empty([0, 2])\n rng0, rng1 = list(map(_np.asarray, [ranges0, ranges1]))\n\n if not presorted:\n rng0, rng1 = [r[_np.argsort(r[:,0])] for r in [rng0, rng1]]\n for rng in [rng0, rng1]:\n assert _np.all(rng[:,1] > rng[:,0])\n\n l0, r0 = rng0.T\n l1, r1 = rng1.T\n f0, f1 = [rng.flatten() for rng in [rng0, rng1]]\n\n lin0 = inranges(l0, f1, [1, 0])\n rin0 = inranges(r0, f1, [0, 1])\n lin1 = inranges(l1, f0, [0, 0])\n rin1 = inranges(r1, f0, [0, 0])\n\n #keep only those edges that are within a good area of the other range\n l = weave(l0[lin0], l1[lin1])\n r = weave(r0[rin0], r1[rin1])\n return _np.array([l, r]).T",
"def overlap(a, b):\n return not set(a).isdisjoint(b)",
"def intersect(self, other):\n if not self.overlaps(other):\n return None\n return Interval(\n self._max(self.begin, other.begin),\n self._min(self.end, other.end)\n )",
"def range_interleave(ranges, sizes={}, empty=False):\n ranges = range_merge(ranges)\n interleaved_ranges = []\n\n for ch, cranges in groupby(ranges, key=lambda x: x[0]):\n cranges = list(cranges)\n size = sizes.get(ch, None)\n if size:\n ch, astart, aend = cranges[0]\n if astart > 1:\n interleaved_ranges.append((ch, 1, astart - 1))\n elif empty:\n interleaved_ranges.append(None)\n\n for a, b in pairwise(cranges):\n ch, astart, aend = a\n ch, bstart, bend = b\n istart, iend = aend + 1, bstart - 1\n if istart <= iend:\n interleaved_ranges.append((ch, istart, iend))\n elif empty:\n interleaved_ranges.append(None)\n\n if size:\n ch, astart, aend = cranges[-1]\n if aend < size:\n interleaved_ranges.append((ch, aend + 1, size))\n elif empty:\n interleaved_ranges.append(None)\n\n return interleaved_ranges",
"def merge_ranges(inranges,already_sorted=False):\n if not already_sorted: inranges = sort_ranges(inranges)\n prev = None\n outputs = []\n merged = False\n for rng in inranges:\n #nrng = rng.copy()\n #nrng.set_payload([])\n #nrng.get_payload().append(rng)\n merged = False\n if len(outputs) > 0:\n if rng.overlaps(outputs[-1]) or rng.adjacent(outputs[-1]):\n nrng = rng.merge(outputs[-1])\n #nrng.set_payload(prev.get_payload())\n #nrng.get_payload().append(rng)\n outputs[-1] = nrng\n merged = True\n if not merged:\n outputs.append(rng.copy())\n #prev = nrng\n #if not merged: outputs.append(prev)\n return sort_ranges(outputs)",
"def difference(self, rng):\r\n # if a RangeSet, then return the intersection of one of those with this instead.\r\n if isinstance(rng, RangeSet):\r\n return RangeSet(self).difference(rng)\r\n # convert other range to a workable format\r\n try:\r\n if not isinstance(rng, Range):\r\n rng = Range(rng)\r\n except ValueError:\r\n raise TypeError(\"Cannot diff a Range with a non-Range\")\r\n # completely disjoint\r\n if rng.isempty():\r\n return self\r\n elif self.isdisjoint(rng):\r\n return self\r\n # fully contained\r\n elif self in rng or self == rng:\r\n return None\r\n # fully contained (in the other direction)\r\n elif rng in self:\r\n lower = Range(start=self.start, end=rng.start,\r\n include_start=self.include_start, include_end=not rng.include_start)\r\n upper = Range(start=rng.end, end=self.end,\r\n include_start=not rng.include_end, include_end=self.include_end)\r\n # exclude empty ranges\r\n if lower.isempty():\r\n return upper\r\n elif upper.isempty():\r\n return lower\r\n else:\r\n return RangeSet(lower, upper)\r\n # lower portion of this range\r\n elif self < rng:\r\n new_rng = Range(start=self.start, end=rng.start,\r\n include_start=self.include_start, include_end=not rng.include_start)\r\n return None if new_rng.isempty() else new_rng\r\n # higher portion of this range\r\n else: # self > rng:\r\n new_rng = Range(start=rng.end, end=self.end,\r\n include_start=not rng.include_end, include_end=self.include_end)\r\n return None if new_rng.isempty() else new_rng",
"def self_merge_plus_minus_ranges_w_duplicates (self):\n self.total_unique = None\n self.total = 0\n for chrom in self.__ranges.keys():\n (plus_tags,minus_tags) = self.__ranges[chrom]\n new_plus_tags = array(BYTE4,[])\n #reset counts\n self.__counts[chrom][0] = array(BYTE2,[])\n self.__counts[chrom][1] = array(BYTE2,[])\n ip = 0\n im = 0\n lenp = len(plus_tags)\n lenm = len(minus_tags)\n while ip < lenp and im < lenm:\n if plus_tags[ip] < minus_tags[im]:\n new_plus_tags.append(plus_tags[ip])\n ip += 1\n else:\n new_plus_tags.append(minus_tags[im])\n im += 1\n if im < lenm:\n # add rest of minus tags\n new_plus_tags.extend(minus_tags[im:])\n if ip < lenp:\n # add rest of plus tags\n new_plus_tags.extend(plus_tags[ip:])\n\n self.__ranges[chrom] = [new_plus_tags,[]]\n self.total += len(new_plus_tags)",
"def shpBBoxMerge(a, b):\n return (\n min(a[0], b[0]),\n min(a[1], b[1]),\n max(a[2], b[2]),\n max(a[3], b[3])\n )",
"def union(self, rng_set):\r\n # convert to RangeSet\r\n rng_set = RangeSet._to_rangeset(rng_set)\r\n # simply merge lists\r\n return RangeSet(self._ranges + rng_set._ranges)",
"def range_conflict(ranges, depth=1):\n overlap = set()\n active = set()\n endpoints = _make_endpoints(ranges)\n\n for seqid, ends in groupby(endpoints, lambda x: x[0]):\n active.clear()\n for seqid, pos, leftright, i, score in ends:\n if leftright == LEFT:\n active.add(i)\n else:\n active.remove(i)\n\n if len(active) > depth:\n overlap.add(tuple(sorted(active)))\n\n for ov in overlap:\n yield ov"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns length of the Range
|
def length(self) -> int:
return self.end - self.start
|
[
"def _range_len_ ( self ) :\n return self.size()",
"def __len__(self):\n return sum(len(r) for r in self.ranges)",
"def length(self):\r\n # try normally\r\n try:\r\n return self.end - self.start\r\n except (TypeError, ArithmeticError, ValueError) as _:\r\n pass\r\n if not isinstance(self.start, self.end.__class__):\r\n # try one-way conversion\r\n try:\r\n return self.end - self.end.__class__(self.start)\r\n except (TypeError, ArithmeticError, ValueError) as _:\r\n pass\r\n # try the other-way conversion\r\n try:\r\n return self.start.__class__(self.end) - self.start\r\n except (TypeError, ArithmeticError, ValueError) as _:\r\n pass\r\n raise TypeError(f\"Range of {self.start.__class__} to {self.end.__class__} has no defined length\")",
"def range_length(self):\n if self._range_length is None:\n self._range_length = int(np.prod([len(x) for x in self.space_map.values()]))\n return self._range_length",
"def subrange_length(self, start, end):\n assert 0 <= start <= end <= self.range_length\n if self._shared_filter is None:\n return end - start\n if self._shared_filter_cache is None:\n self._make_shared_filter_cache()\n return self._shared_filter_cache[start:end].count(True)",
"def get_path_length(self, start, end):",
"def length (self):\n chrs = self.regions.keys()\n chrs.sort()\n l = 0\n for chrom in chrs:\n for region in self.regions[chrom]:\n l+= region[1]-region[0]\n return l",
"def length(self, selection=False):\n\t\tif selection:\n\t\t\treturn 0 if self.mask is None else np.sum(self.mask)\n\t\telse:\n\t\t\treturn len(self)",
"def get_length(self):\n\t\treturn self.z[1] - self.z[0]",
"def getLength(self):\n return HopperLowLevel.getSegmentLength(self.__internal_segment_addr__)",
"def length_in(self):\n return self._length_in",
"def getLength(self):\n return HopperLowLevel.getSectionLength(self.__internal_section_addr__)",
"def __get_range(self):\n return self.high - self.low",
"def get_range_ncolumns(self, rangename, wsid=None):\n self.activate(wsid)\n _range = self._range(rangename)\n data = self._get_range_data(_range, False)\n if type(data[0]) is tuple:\n return len(data[0])\n return 1",
"def getLength(self) -> \"int\":\n return _coin.SoPath_getLength(self)",
"def getLength(self) -> \"int\":\n return _coin.SoAuditorList_getLength(self)",
"def range(self):\n return self.hi - self.lo",
"def _get_length(self):\n return self.Data.Length",
"def length(self) -> \"double\":\n return _coin.SbVec4d_length(self)",
"def range_size(self, a, b):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Shifts Range by n by adding n to the limits
|
def shift(self, n: int) -> None:
self.start += n
self.end += n
if self.start > self.end:
self.reset()
|
[
"def rshift(self, n: int) -> None:\n self.end += n\n if self.start > self.end:\n self.reset()",
"def lshift(self, n: int) -> None:\n self.start += n\n if self.start > self.end:\n self.reset()",
"def shift(x, n):\n if n > 0:\n return np.pad(x, (n, 0), mode='constant')[:len(x)]\n else:\n return np.pad(x, (0, -n), mode='constant')[-len(x):]",
"def __extend_past(self, n):\n\t\tif n > self.__min_index:\n\t\t\tself.__points.extend([self.__iter.next() for i in range(self.__min_index + len(self.__points), n)])",
"def shift(list, n=0):\n return list[-n:]+list[:-n]",
"def wrapped_range(self, range_size, start, traversal_length):\n if start > range_size: start %= range_size\n for i in range(traversal_length):\n yield (start + i) % range_size",
"def shift(xs, n):\n if n == 0:\n return xs\n e = np.empty_like(xs)\n if n >= 0:\n e[:n] = np.nan\n e[n:] = xs[:-n]\n else:\n e[n:] = np.nan\n e[:n] = xs[-n:]\n return e",
"def expand_by(self, n: int):\n self.x -= n\n self.y -= n\n self.w += n\n self.h += n",
"def __rshift__(self: bitlist, n: Union[int, Set[int]]) -> bitlist:\n if isinstance(n, set) and len(n) == 1 and isinstance(list(n)[0], int):\n n = list(n)[0] % len(self) # Allow rotations to wrap around.\n return bitlist(list(self.bits[-n:]) + list(self.bits[:-n]))\n\n return bitlist(list(reversed(self.bits[n:])))",
"def shift_vec_to_range(v, old_range_lower_bound=None, new_range_lower_bound=0,v_range=None,inclusive_upper_bound=True):\n # Deal w/ v_range default if not specified\n if v_range is None:\n v_range = np.ptp(v) # set to range of input vector (basically will become a translation\n if old_range_lower_bound is None:\n old_range_lower_bound = np.min(v) # set to range of input vector (basically will become a translation\n \n # overload greater and less than functions (gf and lf) to deal with upper bound inclusivity\n if inclusive_upper_bound:\n gf = np.greater # must actually violate bound\n lf = np.less\n else:\n gf = np.greater_equal # matching bound is a violation\n lf = np.less_equal\n \n # Ensure values of v conform to prescribed range\n if ((np.min(v) < old_range_lower_bound) or (gf(np.max(v) , (old_range_lower_bound + v_range)))):\n raise ValueError ('Values of v violate range prescribed by lower bound ' + str(old_range_lower_bound) + ' and ' + 'inclusive'*inclusive_upper_bound + ' width ' + str(v_range) + ' (saw range of ' + str([np.min(v), np.max(v)]) + ')')\n \n \n shift_direction = np.sign(new_range_lower_bound-old_range_lower_bound) # 1 (+, shift up), (0, none/return), (-, shift down)\n \n if shift_direction == 0: # shift none\n return v\n \n elif shift_direction == 1: # shift up\n # if shifting up, need (lower_bound + range) + n*range to end up in range (first n such that we land >= new_lower_bound)\n n_shift = np.ceil((new_range_lower_bound - (old_range_lower_bound + v_range)) / v_range)\n v = v + n_shift*v_range # apply \n v[v < new_range_lower_bound] += v_range # should only need to clean up once now thanks to our math! (always use normal less than bc you dont wanna add +range to an exact match lower bound with a noninclusive upper bound). Matching lower bound is fine\n \n elif shift_direction == -1: # shift down\n # if shifting down, need lower_bound to end up in range (first n such that we land <= (new_lower_bound + range)\n # this is actually symmetric (as shifting is) so we just have to switch variables... but then we go the other direction so neg\n n_shift = np.ceil((old_range_lower_bound - (new_range_lower_bound + v_range)) / v_range)\n v = v - n_shift*v_range\n v[gf(v,(new_range_lower_bound+v_range))] -= v_range # here we wanna use the over loaded class since matching the uppper bound is problematic if not inclusive\n \n return v",
"def rec_range(self, n):\n \tif n == 0:\n \treturn \n \telif n == 1:\n \treturn (0,)\n \telse:\n \treturn self.rec_range(n-1) + (n-1,)",
"def __lshift__(self: bitlist, n: Union[int, Set[int]]) -> bitlist:\n if isinstance(n, set) and len(n) == 1 and isinstance(list(n)[0], int):\n n = list(n)[0] % len(self) # Allow rotations to wrap around.\n return bitlist(list(self.bits[n:]) + list(self.bits[:n]))\n\n return bitlist(list(reversed(list([0] * n) + list(self.bits))))",
"def sum_range(n, total=0):\n if not n:\n return total\n else:\n raise TailCall(sum_range, n - 1, n + total)",
"def _extend_per_slice(values, limits):\n extended_values = []\n for value, low, high in izip(values, limits[: -1], limits[1:]):\n extended_values += [value] * (high - low)\n return extended_values",
"def make_range(self, first, current, last,\n extra=1, before=5, after=5, max_no=20):\n if last < max_no:\n return range(first, last+extra)\n else:\n if current-first < before:\n return range(first, current+after+extra)\n elif last-current < after:\n return range(current-before, last+extra)\n else:\n return range(current-before, current+after+extra)",
"def newRange(pStart, pSteps):\n\treturn range(pStart, pStart + pSteps)",
"def giveRange(n):\n return [ (n)**2 *2*np.pi , (n+1)**2 *2*np.pi ]",
"def unroll(x, lim = 3.14159265359):\n from numpy import diff\n d = diff(x)\n for i in range(0,len(x)-1):\n if abs(d[i]) > lim: x[i+1:] = x[i+1:] - d[i]\n return x",
"def shift(self, n, fill_value=np.nan):\n if self.singular:\n return # Can't roll for singular coordinates\n elif n == 0:\n return\n\n if (self.unit is not None\n and not isinstance(fill_value, units.Quantity)):\n fill_value = fill_value * self.unit\n\n if self.coordinates.ndim == 1:\n self.coordinates = np.roll(self.coordinates, n)\n else:\n self.coordinates = np.roll(self.coordinates, n, axis=1)\n\n blank = slice(0, n) if n > 0 else slice(n, None)\n if self.coordinates.ndim > 1:\n blank = slice(None), blank\n self.coordinates[blank] = fill_value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Shifts ending point of Range by n
|
def rshift(self, n: int) -> None:
self.end += n
if self.start > self.end:
self.reset()
|
[
"def shift(self, n: int) -> None:\n self.start += n\n self.end += n\n\n if self.start > self.end:\n self.reset()",
"def lshift(self, n: int) -> None:\n self.start += n\n if self.start > self.end:\n self.reset()",
"def shift(x, n):\n if n > 0:\n return np.pad(x, (n, 0), mode='constant')[:len(x)]\n else:\n return np.pad(x, (0, -n), mode='constant')[-len(x):]",
"def __extend_past(self, n):\n\t\tif n > self.__min_index:\n\t\t\tself.__points.extend([self.__iter.next() for i in range(self.__min_index + len(self.__points), n)])",
"def expand_by(self, n: int):\n self.x -= n\n self.y -= n\n self.w += n\n self.h += n",
"def shift(list, n=0):\n return list[-n:]+list[:-n]",
"def shift(self, n, fill_value=np.nan):\n if self.singular:\n return # Can't roll for singular coordinates\n elif n == 0:\n return\n\n if (self.unit is not None\n and not isinstance(fill_value, units.Quantity)):\n fill_value = fill_value * self.unit\n\n if self.coordinates.ndim == 1:\n self.coordinates = np.roll(self.coordinates, n)\n else:\n self.coordinates = np.roll(self.coordinates, n, axis=1)\n\n blank = slice(0, n) if n > 0 else slice(n, None)\n if self.coordinates.ndim > 1:\n blank = slice(None), blank\n self.coordinates[blank] = fill_value",
"def rec_range(self, n):\n \tif n == 0:\n \treturn \n \telif n == 1:\n \treturn (0,)\n \telse:\n \treturn self.rec_range(n-1) + (n-1,)",
"def shift(xs, n):\n if n == 0:\n return xs\n e = np.empty_like(xs)\n if n >= 0:\n e[:n] = np.nan\n e[n:] = xs[:-n]\n else:\n e[n:] = np.nan\n e[:n] = xs[-n:]\n return e",
"def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n if n == 0:\n print_move(start, end)\n else:\n remainder = 2 * 3 - start - end\n print_move(start, remainder)\n move_stack(n - 1, start, remainder)\n print_move(remainder, end)",
"def step_int(start: int, end: int, t: float) -> int:\n return int(start + (end - start) * t)",
"def giveRange(n):\n return [ (n)**2 *2*np.pi , (n+1)**2 *2*np.pi ]",
"def wrapped_range(self, range_size, start, traversal_length):\n if start > range_size: start %= range_size\n for i in range(traversal_length):\n yield (start + i) % range_size",
"def up_to(n, iterable):\n return itertools.takewhile(lambda i: i <= n, iterable)",
"def distribute(n, end_value_range=None, dist=1, sampled_range_of_dist=(0, 1)):\n if isinstance(dist, float) or isinstance(dist, int):\n distribution = lambda x: np.exp(dist * x)\n else:\n distribution = dist\n\n x_increment = np.abs(max(sampled_range_of_dist) - min(sampled_range_of_dist)) / n\n pts = np.array([distribution(x_increment*i) for i in range(n)])\n pts /= abs(max(pts) - min(pts))\n\n if end_value_range is not None:\n pts = pts*(max(end_value_range) - min(end_value_range)) + min(end_value_range)\n return pts",
"def everyNthTile(tiling, n, start=0, end=100000):\n\treturn semstg.AdapterTiling(tiling, start, end, n)",
"def sum_range(n, total=0):\n if not n:\n return total\n else:\n raise TailCall(sum_range, n - 1, n + total)",
"def value_n_from_end(self, n):\n index_from_start = self.size - n - 1\n if index_from_start < 0:\n raise Exception(\"Index out of bound!!\")\n if n == 0:\n node = self.tail\n else:\n i = 0\n node = self.head\n while i == index_from_start:\n node = node.next_node\n i += 1\n return node.data",
"def towers_of_hanoi(n, start, end):\n assert 0 < start <= 3 and 0 < end <= 3 and start != end, \"Bad start/end\"\n \"*** YOUR CODE HERE ***\"\n move = lambda n, a, b: print(\"Move the top disk from rod \"+str(a)+\" to rod \"+str(b))\n def other():\n all = [1, 2, 3]\n for i in all:\n if i != start and i != end:\n return i\n if n == 1:\n move(n, start, end)\n else:\n towers_of_hanoi(n - 1, start, other())\n move(n, start, end)\n towers_of_hanoi(n - 1, other(), end)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Shifts starting point of Range by n
|
def lshift(self, n: int) -> None:
self.start += n
if self.start > self.end:
self.reset()
|
[
"def shift(self, n: int) -> None:\n self.start += n\n self.end += n\n\n if self.start > self.end:\n self.reset()",
"def rshift(self, n: int) -> None:\n self.end += n\n if self.start > self.end:\n self.reset()",
"def shift(x, n):\n if n > 0:\n return np.pad(x, (n, 0), mode='constant')[:len(x)]\n else:\n return np.pad(x, (0, -n), mode='constant')[-len(x):]",
"def step_int(start: int, end: int, t: float) -> int:\n return int(start + (end - start) * t)",
"def shift(list, n=0):\n return list[-n:]+list[:-n]",
"def wrapped_range(self, range_size, start, traversal_length):\n if start > range_size: start %= range_size\n for i in range(traversal_length):\n yield (start + i) % range_size",
"def __extend_past(self, n):\n\t\tif n > self.__min_index:\n\t\t\tself.__points.extend([self.__iter.next() for i in range(self.__min_index + len(self.__points), n)])",
"def shift(self, n, fill_value=np.nan):\n if self.singular:\n return # Can't roll for singular coordinates\n elif n == 0:\n return\n\n if (self.unit is not None\n and not isinstance(fill_value, units.Quantity)):\n fill_value = fill_value * self.unit\n\n if self.coordinates.ndim == 1:\n self.coordinates = np.roll(self.coordinates, n)\n else:\n self.coordinates = np.roll(self.coordinates, n, axis=1)\n\n blank = slice(0, n) if n > 0 else slice(n, None)\n if self.coordinates.ndim > 1:\n blank = slice(None), blank\n self.coordinates[blank] = fill_value",
"def expand_by(self, n: int):\n self.x -= n\n self.y -= n\n self.w += n\n self.h += n",
"def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n if n == 0:\n print_move(start, end)\n else:\n remainder = 2 * 3 - start - end\n print_move(start, remainder)\n move_stack(n - 1, start, remainder)\n print_move(remainder, end)",
"def newRange(pStart, pSteps):\n\treturn range(pStart, pStart + pSteps)",
"def RangeTemplate(n, start=32, branch=4, shared=False):\n rows = []\n width = start\n idx = 1\n while width <= n:\n for i in range(0, n-width//2, width//2):\n row = np.zeros(n, dtype=int)\n row[i:i+width] = np.arange(width) + idx\n if not shared: idx += width\n rows.append(row)\n if shared: idx += width\n width *= branch\n return AugmentedIdentity(np.vstack(rows))",
"def shift(xs, n):\n if n == 0:\n return xs\n e = np.empty_like(xs)\n if n >= 0:\n e[:n] = np.nan\n e[n:] = xs[:-n]\n else:\n e[n:] = np.nan\n e[:n] = xs[-n:]\n return e",
"def everyNthTile(tiling, n, start=0, end=100000):\n\treturn semstg.AdapterTiling(tiling, start, end, n)",
"def extend_indeces(start, n, iInc, jInc):\n return [ (start[0]+k*iInc, start[1]+k*jInc) for k in xrange(0, n) ]",
"def _rangeify(self, slice):\n start, stop, step = slice.start, slice.stop, slice.step\n if step is None:\n step = 1\n if start is None and step >= 0:\n start = 0\n elif start is None and step < 0:\n start = -1\n if stop is None and step >= 0:\n stop = self.__len__()\n elif stop is None and step < 0:\n stop = -self.__len__() - 1\n return range(start, stop, step)",
"def giveRange(n):\n return [ (n)**2 *2*np.pi , (n+1)**2 *2*np.pi ]",
"def smoothstep(min, max, x):\n\n pass",
"def rec_range(self, n):\n \tif n == 0:\n \treturn \n \telif n == 1:\n \treturn (0,)\n \telse:\n \treturn self.rec_range(n-1) + (n-1,)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mark the given address as warm if it was not previously.
|
def _mark_address_warm(computation: ComputationAPI, address: Address) -> bool:
if computation.state.is_address_warm(address):
return False
else:
computation.state.mark_address_warm(address)
return True
|
[
"def freeze(self, address: str):\n address = Address.from_string(address)\n return self.wallet.set_frozen_state([address], True)",
"def setAddress(self, address: ghidra.program.model.address.Address) -> None:\n ...",
"def new_address(self, name, address):\n if address not in self.ip_addresses:\n if any([regex.findall(name) for regex in self.regex_set]):\n self.ip_addresses.update([address])",
"def _autoscan_address(address):\n\n try:\n ipaddress = IPAddress.objects.get(address=address)\n except IPAddress.DoesNotExist:\n ipaddress = None\n if ipaddress and ipaddress.is_buried:\n return\n pinged = ping(address)\n if pinged:\n if not ipaddress:\n ipaddress, created = IPAddress.objects.get_or_create(\n address=address,\n )\n ipaddress.http_family = get_http_family(ipaddress.address)\n (\n ipaddress.snmp_name,\n ipaddress.snmp_community,\n ipaddress.snmp_version,\n ) = get_snmp(ipaddress)\n ipaddress.dead_ping_count = 0\n ipaddress.save(update_last_seen=True)\n else:\n if ipaddress:\n ipaddress.http_family = None\n ipaddress.snmp_name = None\n ipaddress.snmp_community = None\n ipaddress.snmp_version = None\n ipaddress.dead_ping_count += 1\n ipaddress.save(update_last_seen=False)",
"def invalidate_address(self, address, is_master):\n self.logger.debug('{} invalidateAddress {} / {}'.format(\n self.namespec(), self.addresses, address))\n # reassign the difference between current set and parameter\n if address in self.addresses:\n self.addresses.remove(address)\n if address in self.infos:\n # force process info to UNKNOWN at address\n self.infos[address]['state'] = ProcessStates.UNKNOWN\n # check if conflict still applicable\n if not self.evaluate_conflict():\n if len(self.addresses) == 1:\n # if process is running on only one address,\n # the global state is the state of this process\n self.state = next(self.infos[address]['state']\n for address in self.addresses)\n elif self.running():\n # addresses is empty for a running process\n # action expected to fix the inconsistency\n self.logger.warn('no more address for running process '\\\n '{}'.format(self.namespec()))\n self.state = ProcessStates.FATAL\n # notify the failure to dedicated handler, only if local\n # address is master\n if is_master:\n self.supvisors.failure_handler.add_default_job(self)\n elif self.state == ProcessStates.STOPPING:\n # STOPPING is the last state received before the address is lost\n # consider STOPPED now\n self.state = ProcessStates.STOPPED\n else:\n self.logger.debug('process {} still in conflict after address '\\\n 'invalidation'.format(self.namespec()))",
"def update_address(self, address_details):\n pass",
"def move_address(self, address):\n to_change = {}\n to_move = {}\n to_insert = {}\n to_clean = []\n mp = 0\n oa = 0\n (buildings, parts) = self.index_of_building_and_parts()\n exp = \"NOT(localId ~ '_')\"\n ppv, geometries = self.get_parents_per_vertex_and_geometries(exp)\n pbar = self.get_progressbar(_(\"Move addresses\"), address.featureCount())\n for ad in address.getFeatures():\n refcat = self.get_id(ad)\n building_count = len(buildings.get(refcat, []))\n ad_buildings = buildings[refcat]\n ad_parts = parts[refcat]\n if building_count == 0:\n to_clean.append(ad.id())\n oa += 1\n else:\n if ad[\"spec\"] == \"Entrance\":\n self.move_entrance(\n ad,\n ad_buildings,\n ad_parts,\n to_move,\n to_insert,\n ppv,\n )\n if ad[\"spec\"] != \"Entrance\" and building_count > 1:\n to_clean.append(ad.id())\n mp += 1\n if ad[\"spec\"] != \"Parcel\" and building_count == 1:\n to_change[ad.id()] = get_attributes(ad)\n if len(to_insert) > BUFFER_SIZE:\n self.writer.changeGeometryValues(to_insert)\n to_insert = {}\n pbar.update()\n pbar.close()\n address.writer.changeAttributeValues(to_change)\n address.writer.changeGeometryValues(to_move)\n if len(to_insert) > 0:\n self.writer.changeGeometryValues(to_insert)\n msg = _(\"Moved %d addresses to entrance, %d specification changed\")\n log.debug(msg, len(to_move), len(to_change))\n if len(to_clean) > 0:\n address.writer.deleteFeatures(to_clean)\n if oa > 0:\n msg = _(\"Deleted %d addresses without associated building\")\n log.debug(msg, oa)\n report.pool_addresses = oa\n if mp > 0:\n msg = _(\"Refused %d addresses belonging to multiple buildings\")\n log.debug(msg, mp)\n report.multiple_addresses = mp",
"def markAsUndefined(self,addr):\n return HopperLowLevel.markAsUndefined(self.__internal_segment_addr__,addr)",
"def ping_and_warm(\n self,\n ) -> Callable[\n [bigtable.PingAndWarmRequest], Awaitable[bigtable.PingAndWarmResponse]\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"ping_and_warm\" not in self._stubs:\n self._stubs[\"ping_and_warm\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.v2.Bigtable/PingAndWarm\",\n request_serializer=bigtable.PingAndWarmRequest.serialize,\n response_deserializer=bigtable.PingAndWarmResponse.deserialize,\n )\n return self._stubs[\"ping_and_warm\"]",
"def setBumpmapEnabled(state: 'SoState', value: 'SbBool const') -> \"void\":\n return _coin.SoShapeStyleElement_setBumpmapEnabled(state, value)",
"def unfreeze(self, address: str):\n address = Address.from_string(address)\n return self.wallet.set_frozen_state([address], False)",
"def setBumpMapOverride(state: 'SoState', value: 'SbBool const') -> \"void\":\n return _coin.SoTextureOverrideElement_setBumpMapOverride(state, value)",
"def set_address(self, address):\n self._fields['address'] = address",
"def __enter_sequence_main_region_warmup_default(self):\n\t\tself.__entry_action_main_region_warmup()\n\t\tself.__state_vector[0] = self.State.main_region_warmup\n\t\tself.__state_conf_vector_changed = True",
"def set_standby (self):\n log.debug(\"Put request: %s in standby mode\" % self.__id)\n self.__standby = True",
"def jump(self, address):\n self.PC = address",
"def poke(address):\n c = peek(address)\n if c is None: return False\n try: write(address, c)\n except: return False\n return True",
"def jump_and_run(self, address: int) -> bool:\n logger.info(f\"TX-CMD: Jump To Address: 0x{address:08X}\")\n cmd_packet = CmdPacket(CommandTag.JUMP_ADDRESS, address, 0, 0)\n return self._process_cmd(cmd_packet)",
"def set_breakpoint_address(self, break_addr):\n #break_addr = int(break_addr, 16)\n #if not self.start_addr <= break_addr <= self.end_addr:\n # raise AddressOutOfRange(\"Address is out of .text memory range!\")\n #else:\n bp_id = self._next_bp_id()\n self.instruction_breakpoints[break_addr] = bp_id"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a list of interested pii from the document. Return a list of pii entity types of the given document with only the entities of interest and above the confidence threshold.
|
def get_interested_pii(document: Document, classification_config: PiiConfig):
pii_entities = []
for name, score in document.pii_classification.items():
if name in classification_config.pii_entity_types or ALL in classification_config.pii_entity_types:
if score >= classification_config.confidence_threshold:
pii_entities.append(name)
return pii_entities
|
[
"def prob_classify(self, document):\n features = document.get_features()\n probs = self.classifier.prob_classify(features)\n return probs",
"def extract_skills_in_document(document_id) -> List[SkillExtract]:\n\n skills_resource_dir = os.path.join(app.root_path, \"resources/ontologies\")\n skill_nodes = load_skill_nodes_from_rdf_resources(skills_resource_dir)\n\n if len(skill_nodes) == 0:\n app.logger.debug(\"There is no skill to query\")\n return []\n\n result = set()\n es_index = app.config[\"ELASTICSEARCH_INDEX\"]\n\n skill_nodes = list(skill_nodes) # set to list\n skill_nodes_len = len(skill_nodes)\n skill_nodes_dict = dict() # dict by skill name/label to skill_node\n for skill_node in skill_nodes:\n skill_nodes_dict[skill_node.name] = skill_node\n if skill_node.labels is not None:\n for label in skill_node.labels:\n skill_nodes_dict[label] = skill_node\n\n page_index = 0\n page_size = 100\n\n while page_size * page_index < skill_nodes_len:\n page_from = page_index * page_size\n page_to = page_index * page_size + page_size\n page_to = min(page_to, skill_nodes_len)\n page_index = page_index + 1\n\n skill_nodes_page = skill_nodes[page_from:page_to]\n skills_page = []\n for skill_node in skill_nodes_page:\n skills_page.append(skill_node.name)\n skills_page.extend(skill_node.labels)\n\n res = search_skills(skills_page, index=es_index,\n document_ids=[document_id])\n\n for doc in res['hits']['hits']:\n content_lower = doc['_source']['content'].lower()\n for skill in skills_page:\n skill_node = skill_nodes_dict.get(skill)\n regex = re.compile(r\"\\b{}\\b\".format(re.escape(skill.lower())))\n n_match = len(regex.findall(content_lower))\n\n if n_match > 0:\n if skill_node is not None and skill_node.type == \"NamedIndividual\":\n skill_extracts = [SkillExtract(\n name=parent, match_str=skill, n_match=n_match)\n for parent in skill_node.parents]\n result.update(skill_extracts)\n else:\n skill_extract = SkillExtract(name=skill, match_str=skill, n_match=n_match)\n result.add(skill_extract)\n\n result = sorted(result, key=lambda item: item.n_match, reverse=True)\n\n skills_names = set(item.name for item in result)\n app.logger.debug(\"Extract {} skills on document id {}. Skills: {}\".format(\n len(skills_names), document_id, skills_names))\n\n return result",
"def all_poiss(self, event_type=None, tol=0.1, debug=False):\r\n pp = []\r\n for i,e in enumerate(self.energies):\r\n if debug: print ('%3i %8.0f' % (i,e),)\r\n try:\r\n pf = self.select(i, event_type=event_type,poisson_tolerance=tol)\r\n pp.append(pf.poiss)\r\n if debug: print (pf)\r\n except Exception as msg:\r\n print ('Fail poiss fit for %.0f MeV: %s ' % (e,msg))\r\n pp.append(None)\r\n \r\n self.restore()\r\n return np.array(pp)",
"async def get_evidence_types(request: Request):\n attribute_mapping = evidence.map_evidence_attributes()\n return JSONResponse(content=attribute_mapping, status_code=200)",
"def _get_evidence_mask(evidence: List[str], paragraphs: List[str]) -> List[int]:\n evidence_mask = []\n for paragraph in paragraphs:\n for evidence_str in evidence:\n if evidence_str in paragraph:\n evidence_mask.append(1)\n break\n else:\n evidence_mask.append(0)\n return evidence_mask",
"def find_container_p_tags(document):\n ret = []\n\n for p_tag in document.findAll('p'):\n if has_img_tag(p_tag):\n ret.append(p_tag)\n return ret",
"def identify_column_infotypes(\n data_series,\n engine_backend,\n sample_size: Union[int, float] = _SAMPLE_SIZE,\n score_threshold=_DEFAULT_SCORE_THRESHOLD,\n):\n if isinstance(sample_size, int):\n sampled_data = data_series.sample(n=sample_size, random_state=1)\n elif isinstance(sample_size, float):\n sampled_data = data_series.sample(frac=sample_size, random_state=1)\n results = list(\n sampled_data.map(\n lambda x: identify_pii(\n text=x, engine_backend=engine_backend, score_threshold=score_threshold\n )\n )\n )\n if results:\n return sorted(list(set([i.entity_type for obj in results for i in obj])))",
"def get_annot_detect_confidence(ibs, aid_list):\n annotation_detect_confidence_list = ibs.db.get(ANNOTATION_TABLE, ('annot_detect_confidence',), aid_list)\n return annotation_detect_confidence_list",
"def extract_entities(self, pages):\n\n selected_entity_types = [\"ORGANIZATION\", \"PERSON\", \"LOCATION\", \"DATE\"]\n\n final_entities = []\n for page in pages:\n #text = self.__get_clean_text_in_supported_language(page['Content'])\n\n text = page.get('Content')\n\n final_entities = self._call_comprehend(text)\n # detected_entities = comprehend.detect_entities(\n # Text=text,\n # LanguageCode=\"en\"\n # )\n\n # uncomment to see output of comprehend\n # print(detected_entities)\n\n # selected_entities = [x for x in detected_entities['Entities']\n # if x['Score'] > 0.9 and\n # x['Type'] in selected_entity_types]\n\n # for selected_entity in selected_entities:\n # clean_entity = {key: selected_entity[key]\n # for key in [\"Text\", \"Type\"]}\n # if clean_entity not in final_entities:\n # final_entities.append(clean_entity)\n\n return final_entities",
"def classify(self, document):\n probs = self.prob_classify(document)\n return self.c_map(probs)",
"def get_traits(self) -> list:",
"def particle_types(self):\n return self._particle_type_mapping",
"def get_types():\n try:\n return list(mongo.db.documents.distinct(\"dataType\"))\n except:\n abort(500)",
"def opinion_paragraph_sents():\n sid = SentimentIntensityAnalyzer()\n\n sentlist = []\n for op in article.opinions():\n for paragraph in op.fulltext.splitlines():\n sentlist.append([round(sid.polarity_scores(paragraph)['compound'] *100, 2), op.source, paragraph])\n\n df = pd.DataFrame(sentlist, columns=['score', 'source', 'paragraph'])\n df.to_csv('./dataframes/sents_opinion_paragraphs.csv')\n\n return sentlist",
"def ListEntityTypes(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def classify_request(self):\n\n # Detects the response of the text\n try:\n response = self.client.analyze_entities(self.document, encoding_type='UTF32', )\n\n \"\"\"\n 0 = 'UNKNOWN'\n 1 = 'PERSON'\n 2 = 'LOCATION'\n 3 = 'ORGANIZATION'\n 4 = 'EVENT'\n 5 = 'WORK_OF_ART'\n 6 = 'CONSUMER_GOOD'\n 7 = 'OTHER'\n \"\"\"\n\n classified_text = [{}]\n\n for entity in response.entities:\n classified_text.append(entity)\n classified_text.pop(0)\n return classified_text\n except:\n print(\"Classification error\")",
"def generate_candidates_for_doc(self, doc: ConllDocument) -> List[Dict]:\n self.get_kb()\n # The return variable. Stores the list of entities.\n entities = []\n\n # Inner function to append a label_dict to the entities list\n def add_entity(entity_span_s, entity_span_e, entity_tokens, entity_gt):\n entity_text = ' '.join(entity_tokens)\n entity_candidates = [\n c.entity_ for c in self.kb.get_candidates(entity_text)\n ]\n entity_span = [entity_span_s, entity_span_e]\n\n entities.append(\n {'Position': entity_span,\n 'GroundTruth': entity_gt,\n 'Candidates': entity_candidates}\n )\n\n # Helper variables for the iteration:\n # Tokens belonging to current entity\n collected_tokens = []\n # Tag of the current entity (the ground truth)\n current_entity_tag = None\n # Position of the first entity token in the document tokens list\n span_start = None\n\n # Enumerate the document's list of tokens\n for i_token, token in enumerate(doc.tokens):\n\n # If we are looking at the beginning of a named entity\n if token.true_label.startswith(\"Q\") or token.true_label == \"B\":\n\n # Check if we already have collected a named entity\n # This is the case when two named entities follow each other\n if len(collected_tokens) > 0:\n add_entity(span_start, i_token-1,\n collected_tokens, current_entity_tag)\n\n span_start = i_token\n collected_tokens = [token.text]\n current_entity_tag = token.true_label\n\n # If we are looking at the continuation of a named entity\n elif token.true_label == 'I':\n collected_tokens.append(token.text)\n\n # If we're not looking at a token in a named entity\n else:\n # If we have passed the end of a named entity\n if len(collected_tokens) > 0:\n add_entity(span_start, i_token-1,\n collected_tokens, current_entity_tag)\n\n collected_tokens = []\n\n # If the last tokens were a named entity\n if len(collected_tokens) > 0:\n add_entity(span_start, len(doc.tokens)-1,\n collected_tokens, current_entity_tag)\n\n return entities",
"def get_subjectivity(text):\n res = []\n blob = TextBlob(text)\n\n def get_passive_count(text):\n nlp = spacy.load(\"en_core_web_sm\")\n doc = nlp(text)\n count = 0\n for tok in doc:\n if tok.dep_.find(\"subjpass\") == True:\n count += 1\n return count\n\n def get_count(pronouns):\n count = 0\n for pronoun in pronouns:\n count += blob.words.count(pronoun)\n return count\n\n length = len(blob.words)\n\n res.append(round(get_passive_count(text) / length, 2))\n res.append(round(get_count(HEDGE_WORDS) / length, 2))\n res.append(round(get_count(FIRST_PERSON_PRONOUNS) / length, 2))\n res.append(round(get_count(THIRD_PERSON_PRONOUNS) / length, 2))\n res.append([0, 1][blob.sentiment.polarity >= 0])\n res.append(round(blob.sentiment.subjectivity, 2))\n\n return res",
"def getPolymerEntityFilteredTypes(self, dataContainer):\n if not dataContainer or not dataContainer.getName():\n return {}\n wD = self.__fetchEntityAndInstanceTypes(dataContainer)\n return wD[\"epTypeFilteredD\"] if \"epTypeFilteredD\" in wD else {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The function find all dates
|
def findall_date(f_date):
for i in xrange(len(f_date)):
find_date = re.findall('\d{2}-\d{2}-\d{4}|\d{2}.\d{2}.\d{4}|'
'\d{2}.\d{2}.\d{2}|\d{2} \d{2} \d{2}|'
'\d{2} \d{2} \d{4}', str(f_date))
return find_date
|
[
"def search_date(self):",
"def dates(self):\n drs = self._data_record_class.objects.filter(**self._kwargs()).values('date').distinct()\n return [d['date'] for d in drs]",
"def available_dates(self):\n output = []\n\n for row in self.rows[:]:\n if row['Date'] not in output:\n output.append(row['Date'])\n else:\n continue\n\n return output",
"def get_dates(url, start_year, end_year):\n # all URLs of `url`\n dates = []\n\n try:\n for year in range(start_year, end_year + 1):\n # domain name of the URL without the protocol\n # print(\"url \", url)\n content = url + str(year) + \"/contents.html\"\n # print(\"content \",content)\n days = get_href(content, \"contents.html\")\n # print(\"days \",days)\n for day in days:\n dates.append(day)\n except Exception as e:\n raise e\n\n return dates",
"def get_dates(self):\n\t\tdates = []\n\t\tif self.end_date==self.start_date:\n\t\t\tdates.append(self.start_date)\n\t\telse:\n\t\t\tdelta = self.end_date - self.start_date\n\t\t\tfor day in range(0, delta.days+1):\n\t\t\t\tdates.append(self.start_date + timedelta(days=day))\n\t\treturn dates",
"def dates(self) -> list:\n return list(self.__dates__)",
"def dates(html):\n dates = []\n try:\n infobox = get_infobox(html)\n except ValueError as e:\n print('dates: ', e)\n return\n try:\n # Splits result at a keyword\n spot = infobox.get_text().split('Syntynyt')[1]\n for date in re.findall('[0-9]{1,2}\\.\\ [äöÄÖA-Za-z]{3,12}\\ [0-9]{4}', spot):\n print('dates: \"{}\"'.format(date))\n newform = convertDate(date)\n print('dates: newform = {}'.format(newform))\n new = datetime.datetime.strptime(newform, '%d.%m.%Y').date()\n dates.append(new)\n if len(dates) == 2:\n break\n return dates\n except IndexError as e:\n print('dates: something went wrong when parsing dates from infobox...')\n print('dates: ', e)\n return",
"def get_dates(start, end):\n\n files = []\n\n while start <= end:\n p = start\n start += timedelta(days=1)\n files.append(p)\n\n return sorted(files)",
"def oldtest_epoFacade_SearchByDate(self):\n\n from .epofacade import SearchByDate\n\n response = SearchByDate(datetime.date(2000, 1, 1), datetime.date(2000, 1, 2), 1)\n assert response == [\"T 0597/97\"]\n \n response = SearchByDate(datetime.date(2001, 10, 1), datetime.date(2001, 10, 5), 1)\n assert response == [\"T 0610/98\"]\n \n response = SearchByDate(datetime.date(2010, 12, 1), datetime.date(2010, 12, 31))\n assert response[:7] == [\"T 1854/07\", \"T 0832/07\", \"T 1962/08\", \"T 0189/06\", \"T 0528/08\", \"T 0113/10\", \"T 0568/05\" ]",
"def grab_dates(self, soup_object):\n date_rex = re.compile('[JFMASOND][aepuco][nbrynlgptvc]\\.{0,1} [0-3][0-9], 20[0-1][0-6]')\n return [re.match(date_rex, ele.text).group(0) for ele in soup_object.findAll('td') if re.match(date_rex, ele.text)]",
"def get_dates(decks: QuerySet) -> List[date]:\n return list(decks.values_list(\"date_created\", flat=True).distinct())",
"def all_available_dates(reference_stock=\"ANZ\"):\n # use reference_stock to quickly search the db by limiting the stocks searched\n dates = Quotation.objects.mongo_distinct(\n \"fetch_date\", {\"asx_code\": reference_stock}\n )\n ret = sorted(dates, key=lambda k: datetime.strptime(k, \"%Y-%m-%d\"))\n return ret",
"def __dates2days(self):\n\n days = []\n holidays = self.holidays.copy()\n for date_ in self.dates:\n holiday = None\n if len(holidays) > 0 and holidays[0].day == str(date_.day):\n holiday = holidays[0]\n del holidays[0]\n\n # weekday 5 means Saturday\n dayoff = holiday is not None or date_.weekday() >= 5\n days.append(Day(date_, holiday, dayoff))\n return days",
"def list_dates(product=None):\n\n date_folders = _get_links(_URI_ROOT)\n if product:\n dates = []\n for date_folder in date_folders:\n uri = '{0}/{1}'.format(_URI_ROOT, date_folder)\n products = [p[:-1] for p in _get_links(uri)] # remove slash\n for available_product in products:\n if product in available_product:\n dates.append(re.findall('\\d{8}', date_folder)[0])\n dates = list(set(dates))\n else:\n dates = [re.findall('\\d{8}', d)[0] for d in date_folders]\n return sorted(dates)",
"def get_available_dates():\n available_dates = []\n for values in Measurement.objects.annotate(date=TruncDate(\"timestamp\")).values(\"date\").distinct():\n if values[\"date\"] != pendulum.today().date():\n available_dates.append(values[\"date\"])\n return available_dates",
"def getDatesList():\n lstDates = []\n curD.execute(\"SELECT Date FROM DataDates ORDER BY Date;\")\n recs = curD.fetchall()\n for rec in recs:\n lstDates.append(rec[\"Date\"])\n return lstDates",
"def list_dates(self,entries):\n print(\"Search gives following results: \\n\")\n counter = 1\n for entry in entries:\n print(\"[{}] - {}\".format(counter, entry[\"Date\"]))\n counter +=1",
"def create_date_list(f):\n\n city_list = create_city_list(f)\n\n list_of_dates = []\n for orbit in city_list:\n date = orbit[53:61]\n if date not in list_of_dates:\n list_of_dates.append(date)\n\n # Sort list of dates if not already sorted\n list_of_dates = sorted(list_of_dates)\n\n return(list_of_dates)",
"def __iter__(self):\n return iter(self.__dates__)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The function split dates for 3 elements(dmy day, month, year)
|
def split_date(dmy):
for i in xrange(len(dmy)):
if '.' in dmy[i]:
dmy[i] = dmy[i].split('.')
elif '-' in dmy[i]:
dmy[i] = dmy[i].split('-')
else:
dmy[i] = dmy[i].split(' ')
return dmy
|
[
"def breakdate(date):\n day=int(date[6:8])\n month=int(date[4:6])\n year=int(date[0:4])\n return day, month, year",
"def split_date(df, date_col):\n list_date = df[date_col].tolist()\n list_year = [int(date[:4]) for date in list_date]\n list_month = [int(date[5:7]) for date in list_date]\n list_day = [int(date[8:]) for date in list_date]\n df[\"year\"] = list_year\n df[\"month\"] = list_month\n df[\"day\"] = list_day\n return df",
"def hanukkah_dates_split(self, hanukkah_dates: List[str]) -> None:\n for date in hanukkah_dates:\n self.hanukkah_days.append(date[8:10])\n self.hanukkah_months.append(date[5:7])\n self.hanukkah_years.append(date[0:4])",
"def extract_dates(input_date, in_sep='/', out_sep='/'):\n old_date = input_date.strip()\n cleaned_date = clean_date(old_date, in_sep, out_sep, '')\n # Catch returns of False from clean_date()\n if cleaned_date:\n day = cleaned_date[:2]\n month = cleaned_date[3:5]\n year = cleaned_date[6:] \n return day, month, year\n else:\n return False, False, False",
"def split_dates(self):\n # split dates\n for new_date_feature in self.SPLIT_DATES:\n self.df[new_date_feature] = getattr(self.df.index, self.SPLIT_DATES[new_date_feature])",
"def delimited_extract(text):\n dates = []\n for date in DELIM_RE.finditer(text):\n vals = date.groupdict()\n dates.append(\n circadate(\n int(vals['YEAR']),\n int(vals['MONTHNUM']),\n int(vals['DAY'])))\n text = text.replace(date[0], '')\n return dates, text",
"def xr_split_by_strftime(ds,fmt_string='%m-%d-%Y',return_datestrings=False, return_data=True):\n split_data = list(ds.groupby(ds.time.dt.strftime(fmt_string))) # returns list of tuples w/ (time, data)\n if return_data:\n if return_datestrings:\n return split_data\n else:\n return [x[1] for x in split_data]\n else:\n if return_datestrings:\n return [x[0] for x in split_data]\n else:\n None",
"def date_converter(string):\n results = []\n day = '01'\n month = '01'\n year = '1900'\n\n # This is in the form of DD-MM-YYYY or DD.MM.YYYY or DD/MM/YYYY\n date = re.search('(0?[1-9]|[12][0-9]|3[0-1])(\\.|-|/)(0?[1-9]|1[0-2])(\\.|-|/)(20[01][0-9]|\\d\\d)', string)\n \n # This is in the form of MM-DD-YYYY or MM.DD.YYYY or MM/DD/YYYY\n date1 = re.search('(0?[1-9]|1[0-2])(\\.|-|/)(0?[1-9]|[12][0-9]|3[0-1]|[00])(\\.|-|/)(20[01][0-9]|\\d\\d)', string)\n\n # Removes Single quotes from string and creates spaces\n string = string.replace(\"'\", ' ').replace(\"Jan\", \" Jan \").replace(\"JAN\", \" Jan \").replace(\"Feb\", \" Feb \").replace(\"FEB\", \n \" Feb \").replace(\"Mar\", \" Mar \").replace(\"MAR\", \" Mar \").replace(\"Apr\", \" Apr \").replace(\"APR\", \" Apr \").replace(\"May\",\n \" May \").replace(\"MAY\", \" May \").replace(\"Jun\", \" Jun \").replace(\"JUN\", \" Jun \").replace(\"Jul\", \" Jul \").replace(\"JUL\", \n \" Jul \").replace(\"Aug\", \" Aug \").replace(\"AUG\", \" Aug \").replace(\"Sep\", \" Sep \").replace(\"SEP\", \" Sep \").replace(\"Oct\", \n \" Oct \").replace(\"OCT\", \" Oct \").replace(\"Nov\", \" Nov \").replace(\"NOV\", \" Nov \").replace(\"Dec\", \" Dec \").replace(\"DEC\", \n \" Dec \")\n \n # This is in the form of DD-Month-YYYY or DD.Month.YYYY or DD/Month/YYYY\n month1 = re.search(\n '(0?[1-9]|[12][0-9]|3[0-1])(?:st|nd|rd|th)?\\s*[-|/|.\\s]\\s*(Jan(?:uary)?|JAN(?:UARY)?|Feb(?:ruary)?|FEB(?:RUARY)?|Mar(?:ch)'\n '?|MAR(?:CH)?|Apr(?:il)?|APR(?:IL)?|May|MAY|June?|JUNE?|July?|JULY?|Aug(?:ust)?|AUG(?:UST)?|Sept(?:ember)?|SEPT'\n '(?:EMBER)?|Sep(?:tember)?|SEP(?:TEMBER)?|Oct(?:ober)?|OCT(?:OBER)?|Nov(?:ember)?|NOV(?:EMBER)?|Dec(?:ember)?|DEC(?:EMB'\n 'ER)?).?\\s*[-|/|.\\s]\\s*(20[01][0-9]|\\d\\d)', string)\n \n # This is in the form of Month-DD-YYYY or Month.DD.YYYY or Month/DD/YYYY\n month2= re.search(\n '(Jan(?:uary)?|JAN(?:UARY)?|Feb(?:ruary)?|FEB(?:RUARY)?|Mar(?:ch)?|MAR(?:CH)?|Apr(?:il)?|APR(?:IL)?|May|June?|JUNE?|'\n 'July?|JULY?|Aug(?:ust)?|AUG(?:UST)?|Sept(?:ember)?|SEPT(?:EMBER)?|Sep(?:tember)?|SEP(?:TEMBER)?|Oct(?:ober)?|OCT(?:OBER)?|Nov(?:ember)?|NOV(?:EM'\n 'BER)?|Dec(?:ember)?|DEC(?:EMBER)?).?\\s*[-|/|.\\s]\\s*(0?[1-9]|[12][0-9]|3[0-1])(?:st|nd|rd|th)?\\s*[-|/|.,\\s]\\s*(20[01][0-9]|\\d\\d)'\n , string)\n \n if date:\n day = date.group(1)\n month = date.group(3)\n year = date.group(5)\n elif date1:\n day = date1.group(3)\n month = date1.group(1)\n year = date1.group(5)\n elif month1:\n day = month1.group(1)\n month = word_to_num(month1.group(2))\n year = month1.group(3)\n elif month2:\n day = month2.group(2)\n month = word_to_num(month2.group(1))\n year = month2.group(3)\n else:\n return \"Not Found\"\n \n # Make sure all variables have correct number, add zeros if necessary\n month = month.zfill(2)\n day = day.zfill(2)\n if day == '00':\n day = '01'\n if year is not None and len(year) == 2:\n year = '20' + year\n\n # Day-Month-Year \n results.append(day + \"-\" + month + \"-\" + year)\n return results",
"def get_start_end_dates(data, year=None):\n event_edition_start = None\n event_edition_end = None\n\n if not data:\n return event_edition_start, event_edition_end\n\n # Match patterns like \"October 24, 2014\"\n regexp = \"(?P<month>[a-zA-Z]+)\\s(?P<day>[0-9]{2}),\\s(?P<year>[0-9]{4})\"\n m = re.findall(regexp, data)\n if len(m) > 0:\n month, day, year = m[0]\n date_string = '{:s}/{:s}/{:s}'.format(day, month, year)\n event_edition_start = datetime.strptime(date_string, '%d/%B/%Y')\n event_edition_end = datetime.strptime(date_string, '%d/%B/%Y')\n\n # Match patterns like \"October 24-25, 2014\"\n regexp = \"(?P<month>[a-zA-Z]+)\\s(?P<day_start>[0-9]{2})-(?P<day_end>[0-9]{2}),\\s(?P<year>[0-9]{4})\"\n m = re.findall(regexp, data)\n if len(m) > 0:\n month, day_start, day_end, year = m[0]\n date_string = '{:s}/{:s}/{:s}'.format(day_start, month, year)\n event_edition_start = datetime.strptime(date_string, '%d/%B/%Y')\n date_string = '{:s}/{:s}/{:s}'.format(day_end, month, year)\n event_edition_end = datetime.strptime(date_string, '%d/%B/%Y')\n\n # Match patterns like \"Feb 17–19\" and \"February 17-19\"\n regexp = \"(?P<month>[a-zA-Z]+)\\s(?P<day_start>[0-9]{2})-(?P<day_end>[0-9]{2})\"\n m = re.findall(regexp, data)\n if len(m) > 0:\n month, day_start, day_end = m[0]\n if month == \"Sept\":\n month = \"September\"\n date_string = '{:s}/{:s}/{:s}'.format(day_start, month, year)\n try:\n event_edition_start = datetime.strptime(date_string, '%d/%b/%Y')\n except ValueError:\n event_edition_start = datetime.strptime(date_string, '%d/%B/%Y')\n date_string = '{:s}/{:s}/{:s}'.format(day_end, month, year)\n try:\n event_edition_end = datetime.strptime(date_string, '%d/%b/%Y')\n except ValueError:\n event_edition_end = datetime.strptime(date_string, '%d/%B/%Y')\n\n return event_edition_start, event_edition_end",
"def getDateListByMonth(year_month):\n\n if len(year_month) == 6:\n year = year_month[:4]\n month = year_month[4:]\n elif len(year_month) == 4:\n year = \"20\" + year_month[:2]\n month = year_month[2:]\n first_day = year + month + \"01\"\n next_month_first_day = getNextMonthFirstDay(first_day)\n dates = getDateList(first_day, next_month_first_day)\n return dates",
"def split_day_of_month(dom_str):\n digits = []\n for b in dom_str:\n if grammar.is_digit(b):\n digits.append(b)\n if len(digits) > 2:\n raise ValueError(\"Can't read day-of-month from %s\" % dom_str)\n else:\n break\n if len(digits) < 1:\n raise ValueError(\"Can't read day-of-month from %s\" % dom_str)\n return int(join_bytes(digits))",
"def date_parser():",
"def get_date_inf(date_str):\n\t\tyear = date_str.split(\"-\")[0]\n\t\tmonth = date_str.split(\"-\")[1]\n\t\tday = date_str.split(\"-\")[2]\n\t\treturn year, month, day",
"def format_dates(dates):\n\treturn [\"%d/%02d\" % (d.month, d.day) for d in dates]",
"def add_dates(self, split_words_list):\n if self.curr_id in self.individualdata:\n self.individualdata[self.curr_id][self.tempdata + split_words_list[1]] = split_words_list[2]\n elif split_words_list[1] == \"DATE\":\n husband = self.familydata[self.curr_id][\"HUSB\"]\n wife = self.familydata[self.curr_id][\"WIFE\"]\n self.individualdata[husband][self.tempdata + split_words_list[1]] = split_words_list[2]\n self.individualdata[wife][self.tempdata + split_words_list[1]] = split_words_list[2]",
"def date_patterns():\n\tfor year in [' %Y',' %y']:\n\t\tfor mon in ['%b','%B','%m']:\n\t\t\tyield ['%%d %s%s'%(mon, year), DAY, []]\n\t\t\tyield ['%s %%d%s'%(mon, year), DAY, []]\n\tfor mon in ['%b','%B']: # Year empty\n\t\tyield ['%%d %s'%(mon), DAY, [YEAR]]\n\t\tyield ['%s %%d'%(mon), DAY, [YEAR]]\n\tyield ['%%Y %%d %s'%(mon), DAY, []]\n\tyield ['%%Y %s %%d'%(mon), DAY, []]\n\tyield ['%Y %m %d', DAY, []]",
"def combine_date_parts(year: int = 0, month: int = 0, day: int = 1):\n date_kwargs = {'day': day}\n\n if year:\n date_kwargs['year'] = year\n if month:\n date_kwargs['month'] = month\n\n try:\n return datetime.date(**date_kwargs)\n except (OverflowError, ValueError, TypeError):\n return None",
"def split_date():\n basic = []\n dateFormatedList = []\n text = open('files/date_concat_providus.txt', \"r\")\n basic = text.readlines()\n length = len(basic)\n for i in range(length):\n temp = str(basic[i]) \n if temp != \"NULL\\n\":\n temp = temp[0:2] + \"-\" + temp[2:4] + \"-\" + temp[4:]\n dateFormatedList.append(temp)\n #print(tempList)\n temp=\"\"\n else:\n dateFormatedList.append(temp)\n temp=\"\"\n date_formatted = open(\"date_formatted_providus_2021.txt\", \"w\")\n for i in range(length):\n date_formatted.write(dateFormatedList[i])\n \n\n #return basicInt",
"def parse_standard_date(date):\n return [ int(i) for i in date.split(\"/\") ]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the given GW shows up in the routes for both modes.
|
def test_gateway(self):
anIP = "192.168.1.100"
for aMode in trans.mode_list:
tup = trans.transform_to_routes("sampleStatFile.txt", anIP, aMode)
for line in tup[1]:
if anIP in line:
break
else:
print(f"The GW of '{anIP}' is not in the '{aMode}' route commands")
self.assertTrue(False)
self.assertEqual(tup[0], 0)
|
[
"def test_is_hallway(self):\n self.assertFalse(self.gamerules.is_hallway(\"Dinning Room\")) #Not hallway\n self.assertTrue(self.gamerules.is_hallway(\"Hall-Lounge\")) #Is a hallway",
"def check_test_route(self):\n if self.target_ip:\n (retcode,route) = run('/sbin/ip route show {target_ip}'.format(target_ip=self.target_ip))\n if self.gateway:\n if not \"{target_ip} via {gateway}\".format(target_ip=self.target_ip,gateway=self.gateway) in route:\n logger.debug(run('/sbin/ip route del {target_ip}'.format(target_ip=self.target_ip),dry_run=self.dry_run)[1])\n logger.warning('No route for {target_ip} via {gateway}, adding one'.format(target_ip=self.target_ip,gateway=self.gateway))\n logger.debug(run('/sbin/ip route add {target_ip} via {gateway}'.format(target_ip=self.target_ip,gateway=self.gateway),dry_run=self.dry_run)[1])\n elif self.device:\n if not \" {} \".format(self.device) in route:\n logger.warning('No route for {target_ip} through {device}, adding one'.format(target_ip=self.target_ip,device=self.device))\n logger.debug(run('/sbin/ip route add {target_ip} dev {device}'.format(target_ip=self.target_ip,device=self.device),dry_run=self.dry_run)[1])\n else:\n logger.critical('No gateway for {target_ip}'.format(target_ip=self.target_ip))",
"def test_1_gateway_actions(self):\n # create local gateway\n name = os.getenv(\"TG_SERVICES_GW_NAME\")\n location = os.getenv(\"TG_SERVICES_LOCATION\")\n response = self.tg.create_transit_gateway(\n name=name, location=location)\n assert response is not None\n assert response.get_status_code() == 201\n gateway_id = response.get_result().get(\"id\")\n\n # check gateway status until available using get api\n count = 0\n while count < 24:\n response = self.tg.get_transit_gateway(id=gateway_id)\n status = response.get_result().get(\"status\")\n ret_id = response.get_result().get(\"id\")\n assert ret_id == gateway_id\n assert response.get_status_code() == 200\n if status == \"available\":\n break\n else:\n time.sleep(5)\n count += 1\n\n # list gateways\n response = self.tg.list_transit_gateways()\n assert response is not None\n assert response.get_status_code() == 200\n gateways = response.get_result().get(\"transit_gateways\")\n list_result = False\n for gateway in gateways:\n if gateway[\"id\"] == gateway_id:\n list_result = True\n break\n assert list_result\n\n # update gateway name\n update_name = \"update\"+os.getenv(\"TG_SERVICES_GW_NAME\")\n response = self.tg.update_transit_gateway(id=gateway_id,\n name=update_name)\n assert response is not None\n assert response.get_status_code() == 200\n assert response.get_result()[\"name\"] == update_name\n\n # delete gateway\n self.delete_gateway(gateway_id)",
"def test_get_mft_gateways(self):\n pass",
"def test_bgp_routes(self):\n # Get expected advertised routes\n private_cidr = self.project_subnet['cidr']\n floating_ip_cidr = \"{}/32\".format(\n self.neutron_client.list_floatingips(name=NDR_TEST_FIP)\n [\"floatingips\"][0][\"floating_ip_address\"])\n\n # This test may run immediately after configuration.\n # It may take time for routes to propagate via BGP. Do a\n # binary backoff.\n self._assert_cidr_in_peer_routing_table(self._peer_unit, private_cidr)\n logging.info(\"Private subnet CIDR, {}, found in routing table\"\n .format(private_cidr))\n self._assert_cidr_in_peer_routing_table(self._peer_unit,\n floating_ip_cidr)\n logging.info(\"Floating IP CIDR, {}, found in routing table\"\n .format(floating_ip_cidr))",
"def test_ospf(sw):\n cmd = cmd = sw.show('show ip ospf')\n resp = xmltodict.parse(cmd[1])['ins_api']['outputs']['output']\n\n try:\n if resp[\"code\"] == \"400\":\n #most likely feature ospf is not in the configuration.\n return False\n elif resp[\"code\"] == \"501\" and resp[\"clierror\"] == \"Note: process currently not running\\n\":\n #feature ospf is enabled but not configured.\n return False\n elif resp[\"code\"] == \"200\":\n #ospf appears to be configured\n contexts = resp[\"body\"][\"TABLE_ctx\"][\"ROW_ctx\"]\n if len(contexts) > 0:\n return True\n except Exception as oops:\n print type(oops)\n print oops.args\n print oops\n return False",
"def match_w_req(self, req, Zones, WARMUP_PHASE):\n # try:\n # assert self._state == VehState.IDLE\n # except:\n # print(self.is_AV)\n # print(self._state)\n # print(self.time_to_be_available)\n # raise AssertionError \n assert self._state == VehState.IDLE\n self.time_idled = 0\n dest = req.dzone\n matched = False\n for z in Zones:\n if z.id == dest:\n self._state = VehState.SERVING\n self.state_hist.append(self._state)\n self.time_to_be_available = self._get_time_to_destination(self.ozone, dest)\n dist = self._get_distance_to_destination(self.ozone, dest)\n\n self.ozone = dest\n self.zone = z\n\n matched = True\n # don't match incoming, rather join the undecided list. \n # actually, don't join any list because in the next step, \"act\" will take care of it\n # z.join_incoming_vehicles(self)\n # z.join_undecided_vehicles(self)\n #\n # if not WARMUP_PHASE:\n self.req = req\n return True\n\n if not matched:\n print(\"zone {} does not exist \".format(dest))\n # why and when would it return False?\n return False",
"async def test_multiple_gateways(caplog):\n async with Context() as context:\n await Py4JComponent(gateways={\n 'java1': {},\n 'java2': {}\n }).start(context)\n assert isinstance(context.java1, JavaGateway)\n assert isinstance(context.java2, JavaGateway)\n\n records = [record for record in caplog.records if record.name == 'asphalt.py4j.component']\n records.sort(key=lambda r: r.message)\n assert len(records) == 4\n assert records[0].message.startswith(\"Configured Py4J gateway \"\n \"(java1 / ctx.java1; address=127.0.0.1, port=\")\n assert records[1].message.startswith(\"Configured Py4J gateway \"\n \"(java2 / ctx.java2; address=127.0.0.1, port=\")\n assert records[2].message == 'Py4J gateway (java1) shut down'\n assert records[3].message == 'Py4J gateway (java2) shut down'",
"def test_view_own_game_details(self):\n # Alice is team1, not-Bob is team2\n games = self.space.game_set.filter(teams=self.alice_team)\n games = games.filter(teams=self.other_team)\n kwds = {'comp_slug': self.space.slug, 'pk': games[0].pk}\n games_url = reverse(\"game_detail\", kwargs=kwds)\n with self.loggedInAs(\"alice\", \"123\"):\n response = self.client.get(games_url)\n self.assertEqual(200, response.status_code)\n\n with self.loggedInAs(\"bob\", \"123\"):\n response = self.client.get(games_url)\n self.assertEqual(404, response.status_code)",
"def test_both_teams_view_details(self):\n # Alice is team1, Bob is team2\n games = self.space.game_set.filter(teams=self.alice_team)\n games = games.filter(teams=self.bob_team)\n kwds = {'comp_slug': self.space.slug, 'pk': games[0].pk}\n games_url = reverse(\"game_detail\", kwargs=kwds)\n with self.loggedInAs(\"alice\", \"123\"):\n response = self.client.get(games_url)\n self.assertEqual(200, response.status_code)\n\n with self.loggedInAs(\"bob\", \"123\"):\n response = self.client.get(games_url)\n self.assertEqual(200, response.status_code)",
"def test_show_your_opportunity(self):\n response = self.client.get(reverse('your-opportunity'))\n self.common_asserts(response)\n self.assert_good_enrollment_link(response)",
"async def test_plot_waypoint_route(galaxy_fx):\n route = await galaxy_fx.plot_waypoint_route(\"Fuelum\", \"Beagle Point\")\n assert route[0] == 'FUELUM'\n assert route[1] == 'EORLD PRI QI-Z D1-4302'\n assert route[2] == 'PRAE FLYI RO-I B29-113'\n assert route[3] == 'CHUA EOHN CT-F D12-2'\n assert route[4] == 'BEAGLE POINT'",
"def test_bgp(sw):\n cmd = cmd = sw.show('show ip bgp')\n resp = xmltodict.parse(cmd[1])['ins_api']['outputs']['output']\n\n try:\n if resp[\"code\"] == \"400\":\n #most likely feature bgp is not in the configuration.\n return False\n elif resp[\"code\"] == \"501\" and resp[\"clierror\"] == \"Note: process currently not running\\n\":\n #feature bgp is enabled but not configured.\n return False\n elif resp[\"code\"] == \"501\" and resp[\"msg\"] == \"Structured output unsupported\":\n #bgp appears to be configured\n return True\n except Exception as oops:\n print type(oops)\n print oops.args\n print oops\n return False",
"def test_village_creation(self):\n village = self.do_enter_game()\n\n response = self.client.get(reverse(\"hq\", kwargs={\"village_id\": village.id}))\n self.assertEqual(response.status_code, 200)\n\n response = self.client.get(reverse(\"rally\", kwargs={\"village_id\": village.id}))\n self.assertEqual(response.status_code, 200)\n\n response = self.client.get(reverse(\"barracks\", kwargs={\"village_id\": village.id}))\n self.assertRedirects(response, reverse(\"village\", kwargs={\"village_id\": village.id}))",
"def test_standings(self):\n pass",
"def test_gateway_params():\n params = GatewayParameters()\n launch_jvm, gw_params, *rest = Py4JComponent.configure_gateway(False, params)\n assert gw_params is params",
"def test_landing(self):\n i = random.randrange(0, 20)\n odds_to_land_arrival = 0.75\n random_gen = random.uniform(0, odds_to_land_arrival)\n\n if random_gen < odds_to_land_arrival:\n status = \"to_land\"\n new_plane = Plane.generate_Plane(status,i)\n\n self.assertEqual(new_plane.status, \"to_land\")\n self.assertEqual(type(new_plane.arrival_time), int)\n self.assertEqual(type(new_plane.transaction_time), int)\n self.assertEqual(type(new_plane.fuel), int)",
"def test_lowercaseroute(self):\n result = {\n 'basis': 'def2-TZVP'\n }\n self.assertEqual(check_route(self.lowercase), result)",
"def test_no_other_routes_available(self):\n difficulty = create_difficulty(name='New', cols=5, rows=5)\n game = create_game(difficulty=difficulty)\n\n client = APIClient()\n response = client.put('/api/v1/tile/')\n self.assertEquals(response.status_code, 405)\n\n client = APIClient()\n response = client.delete('/api/v1/tile/')\n self.assertEquals(response.status_code, 405)\n\n client = APIClient()\n response = client.delete('/api/v1/tile/1')\n self.assertEquals(response.status_code, 301)\n\n client = APIClient()\n response = client.get('/api/v1/tile/1')\n self.assertEquals(response.status_code, 301)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that we handle 32 bit networks using correct syntax.
|
def test_32_bit_macOS(self):
tup = trans.transform_to_routes("sampleStatFile.txt", "192.168.1.131", "macOS")
host_lines = [line for line in tup[1] if re.search("route -n add \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}/32", line)]
self.assertEqual(tup[0], 0)
self.assertEqual(len(host_lines), 2)
|
[
"def test_unsigned_integer_32(self):\n self.assertIsInstance(self.dataset.structure.ui32, BaseType)\n self.assertEqual(self.dataset.structure.ui32.dtype, np.dtype(\">I\"))\n self.assertEqual(self.dataset.structure.ui32.shape, ())",
"def test_isnetid():\n print('Testing isnetid()')\n\n result = funcs.isnetid('wmw2')\n introcs.assert_true(result)\n\n result = funcs.isnetid('jrs1234')\n introcs.assert_true(result)\n\n result = funcs.isnetid('ww9999')\n introcs.assert_true(result)\n\n result = funcs.isnetid('Wmw2')\n introcs.assert_false(result)\n\n result = funcs.isnetid('wMw2')\n introcs.assert_false(result)\n\n result = funcs.isnetid('wmW2')\n introcs.assert_false(result)\n\n result = funcs.isnetid('ww99a99')\n introcs.assert_false(result)\n\n result = funcs.isnetid('#w999')\n introcs.assert_false(result)\n\n result = funcs.isnetid('w#w999')\n introcs.assert_false(result)\n\n result = funcs.isnetid('ww#999')\n introcs.assert_false(result)",
"def validateNetworkAddress(netAddress):\n\t\n\t# from the link here: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml\n\t # 0.0.0.0/8 - This host on this network\n\t # 100.64.0.0/10 - Shared address space\n\t # 127.0.0.0/8 - Loopback\n\t # 169.254.0.0/16 - Link Local\n\t # 192.0.0.0/24 - IETF protocol assignments\n\t # 192.0.2.0/24 - Documentation (test-net-1)\n\t # 192.31.196.0/24 - AS112-v4\n\t # 192.52.193.0/24 - AMT\n\t # 192.175.48.0/24 - Direct delegation AS112 service\n\t # 192.18.0.0/15 - Benchmarking\n\t # 192.51.100.0/24 - Documentation (test-net-2)\n\t# 203.0.113.0/24 - Documentation (test-net-3)\n\t\n\t# the subnet mask lengths to consider here are 8 10 15 16 24\n\t# goal is to take the network address calculated, bit-and it with the subnet mask to consider, and make sure that the resultant network address is\n\t# not one of the reserved ones\n\t\n\t# make a dictionary of the reserved spaces\n\treservedSpaces = {8:['0.0.0.0','127.0.0.0'], \\\n\t\t\t\t\t 10:['100.64.0.0'], \\\n\t\t\t\t\t 15:['192.18.0.0'], \\\n\t\t\t\t\t 16:['169.254.0.0'], \\\n\t\t\t\t\t 24:['192.0.0.0', '192.0.2.0', '192.31.196.0', '192.52.193.0', '192.175.48.0', '192.51.100.0', '203.0.113.0'] }\n\t\t\t\t\t \n\tfor smLength in reservedSpaces.keys():\n\t\t\n\t\t# first calculate the network address anded with the subnet mask\n\t\tif calculateNetworkAddress(dottedToBinary(netAddress),'1'*smLength+'0'*(32 - smLength)) in reservedSpaces[smLength]:\n\t\t\tprint(\"This fits within one of the IANA reserved IPv4 Spaces: \" + calculateNetworkAddress(dottedToBinary(netAddress),'1'*smLength+'0'*(32 - smLength)) + \\\n\t\t\t\"\\nNo further processing required.\\nSee https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml for further details.\")\n\t\t\tsys.exit()",
"def generate32BitTests(self):\n if not self.run_32bit:\n logging.info('User specified not to run 32 bit version LTP tests.')\n return\n if self.abi_bitness != None and self.abi_bitness != '32':\n logging.info('Skipped 32 bit tests on %s bit ABI.',\n self.abi_bitness)\n return\n\n self.TestNBits(self._32BIT)",
"def test_int16(structure_app):\n dataset = open_url(\"http://localhost:8001/\", structure_app)\n assert (dataset.types.i16.dtype == np.dtype(\">i2\"))",
"def test_correct_neuron_number(self):\n net = PyOpenWorm.Worm().get_neuron_network()\n self.assertEqual(302, len(set(net.neurons())))",
"def test_fld32(self):\n self.feed('flds [rcx]')\n self.check('40d901')",
"def networks_number(mask):\n return int(2**(32-int(mask)-2))",
"def test_little_endian(self):\n # VM 1\n bytestring = b'\\x10\\x00\\x20\\x00'\n assert convert_ATvalue(bytestring, True) == Tag(0x0010, 0x0020)\n\n # VM 3\n bytestring += b'\\x10\\x00\\x30\\x00\\x10\\x00\\x40\\x00'\n out = convert_ATvalue(bytestring, True)\n assert Tag(0x0010, 0x0020) in out\n assert Tag(0x0010, 0x0030) in out\n assert Tag(0x0010, 0x0040) in out",
"def scaleio_network_test(config):",
"def test_valid_ipv4(self):\n self.assertEqual(is_valid_ip_address(\"192.168.0.55\"), True)",
"def _validate_network(cls, network, prefix):\n try:\n value = netaddr.IPNetwork(network + \"/\" + str(prefix))\n except netaddr.core.AddrFormatError:\n raise ValueError(_(\"Invalid IP address and prefix\"))\n mask = value.hostmask\n host = value.ip & mask\n if host.value != 0:\n raise ValueError(_(\"Host bits must be zero\"))",
"def test_adler32(self):\n self.assertEqual(\"081e0256\", self.file_path.adler32)",
"def test_shapelayer_conversion(self):\n\t\tshlx = v2lt.shl2shlx(self.shlt.clone().permute(2,0,1).reshape(1,6,128,128))\n\t\tshl = v2lt.shlx2shl(shlx).reshape(6,128,128).permute(1,2,0)\n\t\tself.assertTrue((shl == self.shlt).all())\n\t\tpass",
"def test_ip_network(self):\n n = 10**4\n data = [\n ('1.2.3.4/30'),\n ('1:2:3:4:5:6::/112'),\n ]\n fns = ip.ip_network, eip.ip_network\n for args in data:\n generic_test(self.report_u, fn_name(), n, fns, args)",
"def testPortComparisonValidation(self):\n bytecode = sock_diag.InetDiagBcOp((sock_diag.INET_DIAG_BC_D_GE, 4, 8))\n self.assertEquals(\"???\",\n self.sock_diag.DecodeBytecode(bytecode))\n self.assertRaisesErrno(\n EINVAL,\n self.sock_diag.DumpAllInetSockets, IPPROTO_TCP, bytecode.Pack())",
"def s32(addr):\n return readtype(pwndbg.typeinfo.int32, addr)",
"def test_ipv4network_init(self):\n n = 10**5\n data = [\n '1.2.3.0/24',\n '1.2.3.4',\n 16384,\n ('10.1.0.0', 16),\n (65536, 16),\n (int(64).to_bytes(4, 'big'), 28),\n ]\n fns = ip.IPv4Network, eip.IPv4Network\n for args in data:\n generic_test(self.report_4n, fn_name(), n, fns, args)",
"def test_float_32(self):\n self.assertIsInstance(self.dataset.structure.f32, BaseType)\n self.assertEqual(self.dataset.structure.f32.dtype, np.dtype(\">f\"))\n self.assertEqual(self.dataset.structure.f32.shape, ())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve the serializer for a device.
|
def __getitem__(self, device):
if not self.initialized:
raise RuntimeError("The registry isn't initialized yet")
return self._serializers[device]
|
[
"def get_serializer(self, format):\n serializer = self._serializers.get(format)\n if not serializer:\n raise ValueError(format)\n return serializer()",
"def _get_serializer(self, model, serializer):\n app_lbl = getattr(model, \"_meta\").app_label\n package = apps.get_app_config(app_lbl).module\n\n if \".\" in serializer: # pragma: no cover\n module, serializer = serializer.split(\".\", 1)\n\n else:\n module = \"serializers\"\n\n module = import_module(\".\".join((package.__name__, module)))\n return getattr(module, serializer)",
"def get_serializer(self, format):\n creator = self.serializer_format_dict.get(format.upper())\n if not creator:\n raise ValueError(format)\n\n return creator()",
"def _get_serializer(output):\n serializers = salt.loader.serializers(__opts__)\n try:\n return getattr(serializers, output)\n except AttributeError:\n raise CommandExecutionError(\n \"Unknown serializer `{}` found for output option\".format(output)\n )",
"def get_model_serializer(model_class):\n serializer = {\n DiscoveredPackage: DiscoveredPackageSerializer,\n CodebaseResource: CodebaseResourceSerializer,\n }.get(model_class, None)\n\n if not serializer:\n raise LookupError(f\"No Serializer found for {model_class}\")\n\n return serializer",
"def get_user_serializer():\n\n return import_string(drfr_settings.USER_SERIALIZER)",
"def get_serializer_class(self):\n\n if self.request.version == 'v6':\n return RecipeSerializerV6\n elif self.request.version == 'v7':\n return RecipeSerializerV6",
"def get_serializer_class(self):\n\n if self.request.version == 'v6':\n return RecipeTypeListSerializerV6\n elif self.request.version == 'v7':\n return RecipeTypeListSerializerV6",
"def get_serializer_class(self):\n \n if self.action == 'list':\n return FooSerializer\n elif self.action == 'retrieve':\n return FooSerializer\n elif self.action == 'create':\n return FooSerializer\n return FooSerializer",
"def get_instance(self):\n\t\tif not SetupSerializer.__instance:\n\t\t\tSetupSerializer.__instance = SetupSerializer()\n\t\t\n\t\treturn SetupSerializer.__instance",
"def get_serializer_class(self):\n if self.action == \"retrieve\":\n return VideoAnalyticsSerializer\n return super().get_serializer_class()",
"def get_serializer_class(self):\n serializer_class = WellExportSerializerV2\n if (self.request.user and self.request.user.is_authenticated and\n self.request.user.groups.filter(name=WELLS_VIEWER_ROLE).exists()):\n serializer_class = WellExportAdminSerializerV2\n\n return serializer_class",
"def get_instance(self):\n\t\tif not RobotSerializer.__instance:\n\t\t\tRobotSerializer.__instance = RobotSerializer()\n\t\t\n\t\treturn RobotSerializer.__instance",
"def get_device(self, id: uuid) -> Device:\n raise NotImplementedError()",
"def get_instance(self):\n\t\tif not ColorSerializer.__instance:\n\t\t\tColorSerializer.__instance = ColorSerializer()\n\t\t\n\t\treturn ColorSerializer.__instance",
"def get_serialization_data(self, serializer: 'Serializer') -> Dict[str, Any]:",
"def get_serializer_class(self):\n renderer_class = getattr(\n getattr(getattr(\n self, 'request', None), 'accepted_renderer', None),\n 'serializer_class', None)\n if renderer_class is not None:\n return renderer_class\n\n return super(FormatAPIView, self).get_serializer_class()",
"def get_serializer_class(self):\n if self.request.auth and self.request.user.is_active:\n serializer = self.serializer_class\n else:\n serializer = UserPartialSerializer\n\n return serializer",
"def get_device_from_tag(id: str):\n # todo this could be more efficient by Device.query... join with tag\n device = Tag.query.filter_by(id=id).one().device\n if device is None:\n raise TagNotLinked(id)\n return app.resources[Device.t].schema.jsonify(device)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize the registry. This method will import all the registered devices and serializers and put them into a mapping.
|
def initialize(self):
if self.initialized:
raise RuntimeError("The registry is already initialized")
for specifier, serializer in self._prematurely.items():
model = apps.get_model(specifier)
self._serializers[model] = self._get_serializer(model, serializer)
self._initialized = True
|
[
"def populate_registry():\n # We import the register_classes modules as a direct submodule of labscript_devices.\n # But they cannot all have the same name, so we import them as\n # labscript_devices._register_classes_script_<num> with increasing number.\n module_num = 0\n for devices_dir in LABSCRIPT_DEVICES_DIRS:\n for folder, _, filenames in os.walk(devices_dir):\n if 'register_classes.py' in filenames:\n # The module name is the path to the file, relative to the labscript suite\n # install directory:\n # Open the file using the import machinery, and import it as module_name.\n fp, pathname, desc = imp.find_module('register_classes', [folder])\n module_name = 'labscript_devices._register_classes_%d' % module_num\n _ = imp.load_module(module_name, fp, pathname, desc)\n module_num += 1",
"def load(self):\n log.debug(\"load iDevices\")\n idevicesDir = self.config.configDir/'idevices'\n if not idevicesDir.exists():\n idevicesDir.mkdir()\n self.__loadExtended()\n self.__loadGeneric()",
"def _load_devices(self):\n self.clear_cache()\n if self.devices:\n return\n try:\n r = self.call_api(endpoint=\"/devices\")\n tmp_devices = json.loads(r.text, object_pairs_hook=AttrDict)\n \n self.devices = AttrDict()\n for device in tmp_devices.devices:\n name = device.hostname.lower()\n self.devices[name] = device\n except requests.exceptions.HTTPError as err:\n raise LibrenmsException(\"Cannot load librenms devices into memory: %s\" % err)",
"def _load_device_support(self):\n self.discoverables = {}\n\n discoverables_format = __name__.rsplit('.', 1)[0] + '.discoverables.{}'\n\n for module_name in os.listdir(os.path.join(os.path.dirname(__file__),\n 'discoverables')):\n if module_name[-3:] != '.py' or module_name == '__init__.py':\n continue\n\n module_name = module_name[:-3]\n\n module = importlib.import_module(\n discoverables_format.format(module_name))\n\n self.discoverables[module_name] = \\\n getattr(module, 'Discoverable')(self)",
"def regice_init(self):\n for peripheral_name in self.svd.peripherals:\n peripheral = self.svd.peripherals[peripheral_name]\n peripheral_obj = RegicePeripheral(peripheral, self.client)\n setattr(self, peripheral_name, peripheral_obj)",
"def __loadUserExtended(self):\n idevicePath = self.config.configDir/'idevices'\n log.debug(\"load extended iDevices from \"+idevicePath)\n if not idevicePath.exists():\n idevicePath.makedirs()\n sys.path = [idevicePath] + sys.path\n for path in idevicePath.listdir(\"*idevice.py\"):\n log.debug(\"loading \"+path)\n moduleName = path.basename().splitext()[0]\n module = __import__(moduleName, globals(), locals(), [])\n module.register(self)\n for path in idevicePath.listdir(\"*block.py\"):\n log.debug(\"loading \"+path)\n moduleName = path.basename().splitext()[0]\n module = __import__(moduleName, globals(), locals(), [])\n module.register()",
"def __loadGeneric(self):\n genericPath = self.config.configDir/'idevices'/'generic.data'\n log.debug(\"load generic iDevices from \"+genericPath)\n if genericPath.exists():\n self.generic = persist.decodeObject(genericPath.bytes())\n self.__upgradeGeneric()\n else:\n self.__createGeneric()\n for idevice in self.generic:\n idevice.id = self.getNewIdeviceId()",
"def device_init(self):\n for driver in self.drivers:\n try:\n eval('self.{}_init()'.format(driver))\n except NotImplementedError:\n self.drivers[driver] = False",
"def init():\n global DEV\n if DEV is not None:\n return\n DEV = {}\n pygm.init()\n for k in xrange(pygm.get_count()):\n _,nm,inp,_,_ = pygm.get_device_info(k)\n if not inp:\n continue\n cls = {\n 'nanoKONTROL MIDI 1' : KorgNanoKontrol,\n }.get( nm, None )\n if cls:\n DEV[k] = cls( nm, k )",
"def __initialize_registers(self):\n for reg, value in self.__regs.iteritems():\n self.write_register(reg, value)",
"def initialize_models(self):\n pass",
"def _load_interfaces(self):\n self._load_devices()\n try:\n r = self.call_api(endpoint=\"/ports?columns=port_id,device_id,ifName\")\n self.interfaces = json.loads(r.text, object_pairs_hook=AttrDict)\n except requests.exceptions.HTTPError as err:\n raise LibrenmsException(\"Cannot load librenms interfaces into memory: %s\" % err)",
"def __init__(self):\n self.G = nx.MultiDiGraph()\n self.registry = {}\n # self.load_biothings()\n self.all_edges_info = self.G.edges(data=True)\n self.all_labels = {d[-1]['label'] for d in self.all_edges_info}\n self.all_inputs = {d[-1]['input_type'] for d in self.all_edges_info}\n self.all_outputs = {d[-1]['output_type'] for d in self.all_edges_info}",
"def __init__(self, **kwargs):\n self.catalog_items = {}\n\n for cls in self.__class__.__subclasses__():\n subclass = cls(**kwargs)\n namespace = subclass.namespace\n catalog_resources = subclass.catalog_resources\n\n for k, v in catalog_resources.items():\n subclass.load(k, v)\n\n setattr(self, namespace, subclass)",
"def __buildCalDeviceMap(self):\n self.__calDeviceMap = {}\n for o in self.__cal:\n if not o.name in self.__calDeviceMap:\n self.__calDeviceMap[o.name] = []\n self.__calDeviceMap[o.name].append(o)",
"def populate_registries(self):\n if len(self.parsers) == 0:\n raise ValueError('GraphBuilder object needs at least one parser in self.parsers to run self.populate_registries.')\n \n for parser in self.parsers:\n parser.resgister_nodes_and_edges(self.node_dict,self.edge_dict,self.graph)",
"def initialise(cls):\n cls.PUIDS = collections.defaultdict(dict)\n pron_id = PronomId.get_default()\n cls.PUIDS.update({pron_id.puid : pron_id})\n for form in cls.FIDO.formats:\n puid = cls.FIDO.get_puid(form)\n mime = form.find('mime')\n mime_text = None\n if not mime is None:\n mime_text = mime.text\n sig_name_text = None\n for sig in cls.FIDO.get_signatures(form):\n sig_name = sig.find('name')\n if not sig_name is None:\n sig_name_text = sig_name.text\n pron_id = PronomId(puid, sig_name_text, mime_text)\n cls.PUIDS.update({puid : pron_id})",
"def initializeRegisterDict(self):\n for day in DAYSOFWEEK: \n self.registerDict[day] = {}",
"def _load(self, create_if_unknown=False):\n try:\n record = self._tydb.search(Query().device_id == self.id)[0]\n except IndexError:\n if create_if_unknown:\n self._create()\n record = self._tydb.search(Query().device_id == self.id)[0]\n else:\n raise UnknownDevice(\"Unknown device\", self.id)\n\n self.name = record['name']\n self._registered = record['registered']\n self._seen_field_ids = []\n if 'fields' in record:\n self._seen_field_ids = record['fields']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Register a device (specifier). The device should be registered as '.' and the serializer as '.'.
|
def register(self, specifier, serializer):
if self.initialized:
raise RuntimeError("The registry is already initialized")
if specifier in self._prematurely.keys():
if serializer == self._prematurely[specifier]:
raise RuntimeError("Double register for {0}".format(specifier))
return # pragma: no cover
self._prematurely[specifier] = serializer
|
[
"def device_register():\n\n resp = routing.base.generate_error_response(code=501)\n resp[\"message\"] = \"Not yet implemented.\"\n\n return json.dumps(resp) + \"\\n\"",
"def register_device(ctx, device, model, nickname, client_type):\n session, api_url, project_id = build_client_from_context(ctx)\n device_base_url = '/'.join([api_url, 'devices'])\n device_url = '/'.join([device_base_url, device])\n payload = {\n 'id': device,\n 'model_id': model,\n }\n if client_type:\n payload['client_type'] = 'SDK_' + client_type\n if nickname:\n payload['nickname'] = nickname\n\n logging.debug(json.dumps(payload))\n r = session.get(device_url)\n if r.status_code == 200:\n click.echo('Updating existing device: %s' % device)\n session.delete(device_url)\n r = session.post(device_base_url, data=json.dumps(payload))\n elif r.status_code in (400, 403, 404):\n click.echo('Creating new device')\n r = session.post(device_base_url, data=json.dumps(payload))\n else:\n raise failed_request_exception('Failed to check existing device', r)\n if r.status_code != 200:\n raise failed_request_exception('Failed to register device', r)\n click.echo('Device instance %s successfully registered' % device)\n logging.debug(r.text)",
"def register_device(self, device_name, ip=''):\n device_id = self.insert({'name':device_name, 'ip':ip}, 'device')\n if not device_id: # That means it failed to add it in the database\n logging.warning(\"ThermoMeasureHandler failled to register a new \\\n device (%s)\" % device_name)\n\n return device_id",
"def add_device(self, **kwargs):\n return self._make_request(\"devices/\", type=POST, **kwargs)",
"def add_device(self, device):\n self.device_list.append(device)",
"def add_device(device_type, config={}):\n return runtime.add_device(device_type, config)",
"def register_element(self, dev_name, idx=None):\n if dev_name not in self.devices:\n logger.error(\n 'Device {} missing. call add_device before adding elements'.\n format(dev_name))\n return\n group_name = self.system.__dict__[dev_name]._group\n if idx is None: # \"if not idx\" will fail for idx==0.0\n idx = dev_name + '_' + str(len(self.group[group_name].keys()))\n self.group[group_name][idx] = dev_name\n return idx",
"def __setattr__(self,name,value):\n def isInDicts(name,cls):\n for c in cls.mro()[:-1]:\n if name in c.__dict__:\n return True\n return False\n from inspect import stack\n if name in self.part_dict:\n head = self if self._head==0 else self.head\n TreeNode(self.part_dict[name]+self.head.nid,self.tree,head).record=value\n elif (name.startswith('_')\n or name in self.__dict__\n or isInDicts(name,self.__class__)\n or isinstance(stack()[1][0].f_locals.get('self',None),Device)):\n super(Device,self).__setattr__(name,value)\n else: print(\"\"\"WARNING: your tried to add the attribute or write to the subnode '%s' of '%s'.\nThis is a deprecated action for Device nodes outside of Device methods. You should prefix the attribute with '_'.\nIf you did intend to write to a subnode of the device you should check the proper path of the node: TreeNNF.\n\"\"\"%(name, self.path))",
"def attach_device(self, device):\n self.devices.append(device)",
"def set_device(self, device):\n self.device = device\n self.model = self.model.to(device)",
"def test_device_can_serialize() -> None:\n d = Device(device_details[\"6\"])\n assert (\n f\"{d}\"\n == '<Device id=\"6\" name=\"Office Door\" type=\"Generic Z-Wave Contact Sensor\">'\n )",
"def registerDev(dev):\n\tassert \"privateKeys\" in dev\n\tassert \"publicKeys\" in dev\n\tassert \"appInfo\" in dev\n\tassert \"type\" in dev\n\tglobal localDev\n\t\n\tfrom sha import sha\n\tlongDevId = LList(\"dev-\" + sha(dev[\"publicKeys\"][\"sign\"]).hexdigest()) + \"-\" + LRndSeq()\n\tlongestCommonDevId = 9\n\ttakenDevIds = set()\n\tfor d in devices():\n\t\tif d.publicKeys == dev[\"publicKeys\"]:\n\t\t\t# update if needed\n\t\t\tfor key,value in dev.items():\n\t\t\t\tif isinstance(value, dict): value = binstruct.Dict(value)\n\t\t\t\tsetattr(d, key, value)\n\t\t\tif localDev.publicKeys[\"sign\"] == d.publicKeys[\"sign\"]:\n\t\t\t\tlocalDev = d\n\t\t\treturn d\n\t\ttakenDevIds.add(d.devId)\n\t\tlongestCommonDevId = max(longestCommonDevId, commonStrLen(longDevId, d.devId))\n\tdevId = longDevId[:longestCommonDevId+1]\n\t\n\t# create new\n\tdevdir = devId\n\tfs.makedirs(devdir)\n\tbinstruct.write(fs.openW(devdir + \"/publicKeys\"), dev[\"publicKeys\"]).close()\n\tfor key in (\"appInfo\",\"type\"):\n\t\tbinstruct.writeEncrypt(\n\t\t\tfs.openW(devdir + \"/\" + key), dev[key],\n\t\t\tsign_rsaprivkey = dev[\"privateKeys\"][\"sign\"])\n\tnewdev = Dev(devId, binstruct.Dict(dev[\"publicKeys\"]))\n\tfor key,value in dev.items():\n\t\tif isinstance(value, dict): value = binstruct.Dict(value)\n\t\tsetattr(newdev, key, value)\n\tbinstruct.writeEncrypt(\n\t\tfs.openW(devdir + \"/name\"), localDevName(),\n\t\tsign_rsaprivkey = dev[\"privateKeys\"][\"sign\"])\n\tif localDev.publicKeys[\"sign\"] == newdev.publicKeys[\"sign\"]:\n\t\tlocalDev = newdev\n\treturn newdev",
"def register_format(self, serializer):\n self._serializers[serializer.format] = serializer",
"def register_adapter(self, adapter):\n key = (adapter.input_protocol, adapter.output_protocol)\n alist = self._adapters.get(key, None)\n if alist is None:\n alist = []\n self._adapters[key] = alist\n alist.append(adapter)",
"def add_device(self, device_id:int = 1, name:str=\"\", dtype:str=\"softhand\"):\n if self.is_update_request:\n print(\"Please add all device before start robot, stop, del and recreate robot\")\n return\n if self.serial_port is None:\n print(\"Please connect robot to serial port first to confirm the connectivity\")\n return\n if self.command_buf is None:\n print(\"Warning, command buffer do not initialize\")\n\n new_device = device(device_id, name, dtype, self.serial_port, self.command_buf)\n new_device.activate()\n self.devices.append(new_device)\n if self.is_lsl:\n print(\"Each device need to reconfigurate lsl.\")\n self.stop_lsl()\n self.start_lsl()",
"def mount_device(device_name):\n device_config = settings.config['network_device'][device_name]\n print('device_mount(' + device_name, *device_config.values(), sep=', ', end=')\\n')\n topology.mount(\n device_name,\n device_config['address'],\n device_config['port'],\n device_config['username'],\n device_config['password'])",
"def _device_to_xml_bare(cls, device: UpnpDevice) -> ET.Element:\n device_el = ET.Element(\"device\", xmlns=\"urn:schemas-upnp-org:device-1-0\")\n ET.SubElement(device_el, \"deviceType\").text = device.device_type\n ET.SubElement(device_el, \"friendlyName\").text = device.friendly_name\n ET.SubElement(device_el, \"manufacturer\").text = device.manufacturer\n ET.SubElement(device_el, \"manufacturerURL\").text = device.manufacturer_url\n ET.SubElement(device_el, \"modelDescription\").text = device.model_description\n ET.SubElement(device_el, \"modelName\").text = device.model_name\n ET.SubElement(device_el, \"modelNumber\").text = device.model_number\n ET.SubElement(device_el, \"modelURL\").text = device.model_url\n ET.SubElement(device_el, \"serialNumber\").text = device.serial_number\n ET.SubElement(device_el, \"UDN\").text = device.udn\n ET.SubElement(device_el, \"UPC\").text = device.upc\n ET.SubElement(device_el, \"presentationURL\").text = device.presentation_url\n\n icon_list_el = ET.SubElement(device_el, \"iconList\")\n for icon in device.icons:\n icon_el = ET.SubElement(icon_list_el, \"icon\")\n ET.SubElement(icon_el, \"mimetype\").text = icon.mimetype\n ET.SubElement(icon_el, \"width\").text = str(icon.width)\n ET.SubElement(icon_el, \"height\").text = str(icon.height)\n ET.SubElement(icon_el, \"depth\").text = str(icon.depth)\n ET.SubElement(icon_el, \"url\").text = icon.url\n\n service_list_el = ET.SubElement(device_el, \"serviceList\")\n for service in device.services.values():\n service_el = ET.SubElement(service_list_el, \"service\")\n ET.SubElement(service_el, \"serviceType\").text = service.service_type\n ET.SubElement(service_el, \"serviceId\").text = service.service_id\n ET.SubElement(service_el, \"controlURL\").text = service.control_url\n ET.SubElement(service_el, \"eventSubURL\").text = service.event_sub_url\n ET.SubElement(service_el, \"SCPDURL\").text = service.scpd_url\n\n device_list_el = ET.SubElement(device_el, \"deviceList\")\n for embedded_device in device.embedded_devices.values():\n embedded_device_el = cls._device_to_xml_bare(embedded_device)\n device_list_el.append(embedded_device_el)\n\n return device_el",
"def add_dev(self, dev):\n if 'id' not in dev:\n dev['id'] = 0\n if self.devs:\n try:\n dev['id'] = self.devs.index(None)\n except ValueError:\n dev['id'] = len(self.devs)\n if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:\n raise exceptions.DuplicateDeviceError(\n 'Duplicate device id: %d' % dev['id'])\n # Add holes to self.devs to ensure self.devs[dev['id']] will be the dev\n while dev['id'] >= len(self.devs):\n self.devs.append(None)\n required_keys = ('region', 'zone', 'ip', 'port', 'device', 'weight')\n missing = tuple(key for key in required_keys if key not in dev)\n if missing:\n raise ValueError('%r is missing required key(s): %s' % (\n dev, ', '.join(missing)))\n dev['weight'] = float(dev['weight'])\n dev['parts'] = 0\n dev.setdefault('meta', '')\n self.devs[dev['id']] = dev\n self.devs_changed = True\n self.version += 1\n return dev['id']",
"def diskio_write(self, device=None):\n self.writeCommand('diskio_write', device)\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a serializer from a app and serializer specifier.
|
def _get_serializer(self, model, serializer):
app_lbl = getattr(model, "_meta").app_label
package = apps.get_app_config(app_lbl).module
if "." in serializer: # pragma: no cover
module, serializer = serializer.split(".", 1)
else:
module = "serializers"
module = import_module(".".join((package.__name__, module)))
return getattr(module, serializer)
|
[
"def get_serializer(self, format):\n creator = self.serializer_format_dict.get(format.upper())\n if not creator:\n raise ValueError(format)\n\n return creator()",
"def get_serializer(self, format):\n serializer = self._serializers.get(format)\n if not serializer:\n raise ValueError(format)\n return serializer()",
"def get_serializer_class(self):\n\n if self.request.version == 'v6':\n return RecipeSerializerV6\n elif self.request.version == 'v7':\n return RecipeSerializerV6",
"def get_model_serializer(model_class):\n serializer = {\n DiscoveredPackage: DiscoveredPackageSerializer,\n CodebaseResource: CodebaseResourceSerializer,\n }.get(model_class, None)\n\n if not serializer:\n raise LookupError(f\"No Serializer found for {model_class}\")\n\n return serializer",
"def get_serializer_class(self):\n \n if self.action == 'list':\n return FooSerializer\n elif self.action == 'retrieve':\n return FooSerializer\n elif self.action == 'create':\n return FooSerializer\n return FooSerializer",
"def get_serializer_class(self):\n\n if self.request.version == 'v6':\n return RecipeTypeListSerializerV6\n elif self.request.version == 'v7':\n return RecipeTypeListSerializerV6",
"def serializer_from_settings():\n if settings.ORG_PROFILE_SERIALIZER:\n return import_string(settings.ORG_PROFILE_SERIALIZER)\n\n return OrganizationSerializer",
"def _get_serializer(output):\n serializers = salt.loader.serializers(__opts__)\n try:\n return getattr(serializers, output)\n except AttributeError:\n raise CommandExecutionError(\n \"Unknown serializer `{}` found for output option\".format(output)\n )",
"def get_instance(self):\n\t\tif not SetupSerializer.__instance:\n\t\t\tSetupSerializer.__instance = SetupSerializer()\n\t\t\n\t\treturn SetupSerializer.__instance",
"def get_user_serializer():\n\n return import_string(drfr_settings.USER_SERIALIZER)",
"def get_serializer_class(self):\n renderer_class = getattr(\n getattr(getattr(\n self, 'request', None), 'accepted_renderer', None),\n 'serializer_class', None)\n if renderer_class is not None:\n return renderer_class\n\n return super(FormatAPIView, self).get_serializer_class()",
"def get_serializer_class(model_name, *args, **kwargs):\n if 'file' == model_name:\n return FileSerializer(*args, **kwargs)\n if 'image' == model_name:\n return ImageSerializer(*args, **kwargs)\n if 'video' == model_name:\n return VideoSerializer(*args, **kwargs)\n\n return TextSerializer(*args, **kwargs)",
"def meta_json_encoder_factory(root):\n imported_serializers = {}\n serializer_package_path = os.path.join(root, \"serializers\")\n for name in os.listdir(serializer_package_path):\n if name.endswith(\"_serializer.py\"):\n module_name = name.split('.')[0]\n module_path = '.'.join([root, \"serializers\", module_name])\n module = import_module(module_path)\n serializer_cls_name = to_class_name(module_name)\n serializer = module.__dict__[serializer_cls_name]\n model_name = generate_model_name_for_serializer(serializer)\n imported_serializers[model_name] = serializer\n\n assert imported_serializers != {}, \"should find at least one\"\n MetaJSONEncoder.serializers = imported_serializers\n return MetaJSONEncoder",
"def get_serializer_class(self):\n serializer_class = WellExportSerializerV2\n if (self.request.user and self.request.user.is_authenticated and\n self.request.user.groups.filter(name=WELLS_VIEWER_ROLE).exists()):\n serializer_class = WellExportAdminSerializerV2\n\n return serializer_class",
"def serializer_factory(model, serializer_class=serializers.ModelSerializer, attrs=None, meta=None):\n attrs = attrs or {}\n meta = meta or {}\n meta.setdefault(\"model\", model)\n attrs.setdefault(\"Meta\", type(str(\"Meta\"), (object,), meta))\n return type(str(\"%sSerializer\" % model.__name__), (serializer_class,), attrs)",
"def get_serializer_class(self):\n if self.request.auth and self.request.user.is_active:\n serializer = self.serializer_class\n else:\n serializer = UserPartialSerializer\n\n return serializer",
"def negotiate_serializer(self, *args, **kwargs):\n serializers = getattr(self, \"SERIALIZERS\",\n current_app.config[\"TOYBOX_SERIALIZERS\"])\n\n if len(serializers) > 0:\n mime_type = request.accept_mimetypes.best_match(serializers.keys())\n if mime_type is None:\n raise werkzeug.exceptions.NotAcceptable()\n return mime_type, serializers[mime_type]\n else:\n raise werkzeug.exceptions.InternalServerError()",
"def get_serializer(method='xml', **kwargs):\r\n if isinstance(method, str):\r\n method = {'xml': XMLSerializer,\r\n 'xhtml': XHTMLSerializer,\r\n 'html': HTMLSerializer,\r\n 'text': TextSerializer}[method.lower()]\r\n return method(**kwargs)",
"def get_serializer(secret_key=None):\n if secret_key is None:\n secret_key = SECRET_KEY\n return URLSafeSerializer(secret_key)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate WAV header that precedes actual audio data sent to the speech translation service.
|
def get_wave_header(frame_rate,stream=True):
if frame_rate not in [8000, 16000]:
raise ValueError("Sampling frequency, frame_rate, should be 8000 or 16000.")
nchannels = channels
bytes_per_sample = sampwidth
data = b'RIFF'
# user 0 length for audio stream
if stream :
data += struct.pack('<L', 0)
else:
data += struct.pack('<L', 1900000)
data += b'WAVE'
data += b'fmt '
data += struct.pack('<L', 16)
data += struct.pack('<H', 0x0001)
data += struct.pack('<H', nchannels)
data += struct.pack('<L', frame_rate)
data += struct.pack('<L', frame_rate * nchannels * bytes_per_sample)
data += struct.pack('<H', nchannels * bytes_per_sample)
data += struct.pack('<H', bytes_per_sample * 8)
#wrong format
#data += struct.pack('<H', 0)
data += b'data'
if stream :
data += struct.pack('<L', 0)
else:
data += struct.pack('<L', 1900000-36)
return data
|
[
"def gen_header(sample_rate=int(config[\"SETTINGS\"][\"RECORD_SAMPLING_RATE\"]), bits_per_sample=16, channels=1):\n data_size = 2000*10**3\n o = bytes(\"RIFF\", 'ascii') # (4byte) Marks file as RIFF\n o += (data_size + 36).to_bytes(4, 'little') # (4byte) File size in bytes excluding this and RIFF marker\n o += bytes(\"WAVE\", 'ascii') # (4byte) File type\n o += bytes(\"fmt \", 'ascii') # (4byte) Format Chunk Marker\n o += (16).to_bytes(4, 'little') # (4byte) Length of above format data\n o += (1).to_bytes(2, 'little') # (2byte) Format type (1 - PCM)\n o += channels.to_bytes(2, 'little') # (2byte)\n o += sample_rate.to_bytes(4, 'little') # (4byte)\n o += (sample_rate * channels * bits_per_sample // 8).to_bytes(4, 'little') # (4byte)\n o += (channels * bits_per_sample // 8).to_bytes(2, 'little') # (2byte)\n o += bits_per_sample.to_bytes(2, 'little') # (2byte)\n o += bytes(\"data\", 'ascii') # (4byte) Data Chunk Marker\n o += data_size.to_bytes(4, 'little') # (4byte) Data size in bytes\n return o",
"def _update_header(params):\n header = params['header']\n if params['order_idx'] == -1:\n params['wavemap'][0] = params['wavecal']\n params['wavemap'][1] = params['spatcal']\n params['wavemap'][2] = params['order_mask']\n header['WCTYPE'] = ('1D', 'Wavecal type (2D or 1D)')\n header['BUNIT1'] = ('cm-1', 'Data units for first plane of image')\n header['BUNIT2'] = ('arcsec', 'Data units for second plane of image')\n header['BUNIT3'] = ('', 'Data units for third plane of image')",
"def get_wav_data():\n # Get requested word and query db for audio url\n word = request.args.get(\"word\")\n audio_url = services.query.get_audio_url(word)\n\n # Fetch audio file from MW API\n # and send to client\n data = requests.get(audio_url).content\n return send_file(BytesIO(data), mimetype='audio/wav')",
"def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n file_sam = open(os.path.join(args.output_dir,'watsonAligned.out.sam'))\n print(file_sam)\n for line in file_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args",
"def test_audio_convert_to_wav(self):\n pass",
"def save_speech(self, data):\n\n\t\tfilename = 'audio'\n\t\t# writes data to WAV file\n\t\tdata = ''.join(data)\n\t\twf = wave.open(filename + '.wav', 'wb')\n\t\twf.setnchannels(self.CHANNELS)\n\t\twf.setsampwidth(self.WIDTH)\n\t\twf.setframerate(self.RATE) # TODO make this value a function parameter?\n\t\twf.writeframes(data)\n\t\twf.close()\n\t\treturn filename + '.wav'",
"def send_wave_data(dev):\n f = open(\"wave1.bin\", \"rb\") # wave1.bin is the waveform to be sent\n data = f.read()\n print(\"data: \", data[0:10])\n print('write bytes:', len(data))\n dev.write_binary_values('C1:WVDT M50,WVNM,wave1,TYPE,5,LENGTH,32KB,FREQ,0.1,AMPL,5.0,OFST,0.0,PHASE,0.0,WAVEDATA,', data, datatype='B', header_fmt='empty') # SDG00100 series\n dev.write(\"C1:ARWV NAME,wave1\")\n f.close()",
"def createHeaderRecord(self):\r\n\r\n # ascii-character limit for every header record information (in bytes)\r\n lenVersion = 8\r\n lenLocalPatientID = 80\r\n lenLocalRecordingID = 80\r\n lenStartDate = 8\r\n lenStartTime = 8\r\n lennBytesHeader = 8\r\n lenEDFPlus = 44\r\n lennDataRecord = 8\r\n lenDurationDataRecord = 8\r\n lennSignals = 4\r\n \r\n HeaderInfolist = [self.Version, self.LocalPatientID, self.LocalRecordingID, self.StartDate, self.StartTime, self.nBytesHeader, self.EDFPlus,\\\r\n self.nDataRecord, self.DurationDataRecord, self.nSignals]\r\n lenHeaderInfo = [lenVersion, lenLocalPatientID, lenLocalRecordingID, lenStartDate, lenStartTime, lennBytesHeader, lenEDFPlus, lennDataRecord,\\\r\n lenDurationDataRecord, lennSignals]\r\n\r\n for i in range(len(HeaderInfolist)):\r\n maxlen = lenHeaderInfo[i]\r\n if len(HeaderInfolist[i]) > maxlen:\r\n # truncates the string if length is greater than limit\r\n HeaderInfolist[i] = HeaderInfolist[i][:maxlen] \r\n \r\n else:\r\n HeaderInfolist[i] = HeaderInfolist[i].ljust(maxlen)\r\n \r\n # converts the list to a string with no separator in between elements\r\n self.HeaderRecord = ''.join(HeaderInfolist) \r\n\r\n # concatenates each BioSignal TechInfo to the Header Record string\r\n for i in range(len(self.BioSignals[0].TechInfo)):\r\n for x in range(len(self.BioSignals)):\r\n self.HeaderRecord = self.HeaderRecord + self.BioSignals[x].TechInfo[i]",
"def header(proto, srcFile=None, robotName='', tags=[]):\n if srcFile:\n header.sourceFile = srcFile\n proto.write('#VRML_SIM R2021a utf8\\n')\n proto.write('# license: Apache License 2.0\\n')\n proto.write('# license url: http://www.apache.org/licenses/LICENSE-2.0\\n')\n if tags:\n proto.write('# tags: %s\\n' % ','.join(tags))\n if robotName:\n proto.write('# This is a proto file for Webots for the ' + robotName + '\\n')\n if header.sourceFile is not None:\n proto.write('# Extracted from: ' + header.sourceFile + '\\n\\n')",
"def sendHyBi00Preamble(self):\r\n\r\n protocol = \"wss\" if self.isSecure() else \"ws\"\r\n\r\n self.sendCommonPreamble()\r\n\r\n self.transport.writeSequence([\r\n \"Sec-WebSocket-Origin: %s\\r\\n\" % self.origin,\r\n \"Sec-WebSocket-Location: %s://%s%s\\r\\n\" % (protocol, self.host,\r\n self.location),\r\n \"WebSocket-Protocol: %s\\r\\n\" % self.codec,\r\n \"Sec-WebSocket-Protocol: %s\\r\\n\" % self.codec,\r\n \"\\r\\n\",\r\n ])",
"def sendHyBi00Preamble(self):\n\n protocol = \"wss\" if self.isSecure() else \"ws\"\n\n self.sendCommonPreamble()\n\n self.transport.writeSequence([\n \"Sec-WebSocket-Origin: %s\\r\\n\" % self.origin,\n \"Sec-WebSocket-Location: %s://%s%s\\r\\n\" % (protocol, self.host,\n self.location),\n \"WebSocket-Protocol: %s\\r\\n\" % self.codec,\n \"Sec-WebSocket-Protocol: %s\\r\\n\" % self.codec,\n \"\\r\\n\",\n ])",
"def _write_header(self, sampling_rate, dtype, nchannels, write_fact=None):\n # this is a bit tricky b/c Chunk is a read-only class\n # however, this only gets called for a pristine file\n # we'll have to go back and patch up the sizes later\n import struct\n\n # main chunk\n out = struct.pack(b\"<4sl4s\", b\"RIFF\", 0, b\"WAVE\")\n # fmt chunk\n tag = etag = self._file_format(self._dtype)\n fmt_size = 16\n if self._dtype.itemsize > 2 or self._nchannels > 2:\n fmt_size = 40\n tag = WAVE_FORMAT_EXTENSIBLE\n\n out += struct.pack(\n b\"<4slHHllHH\",\n b\"fmt \",\n fmt_size,\n tag,\n self._nchannels,\n self._framerate,\n self._nchannels * self._framerate * self._dtype.itemsize,\n self._nchannels * self._dtype.itemsize,\n self._dtype.itemsize * 8,\n )\n\n if tag == WAVE_FORMAT_EXTENSIBLE:\n out += struct.pack(\n b\"<HHlH14s\",\n 22,\n self._dtype.itemsize * 8,\n # use the full bitdepth\n (1 << self._nchannels) - 1,\n etag,\n b\"\\x00\\x00\\x00\\x00\\x10\\x00\\x80\\x00\\x00\\xaa\\x008\\x9b\\x71\",\n )\n\n # fact chunk\n if write_fact or (\n write_fact is None\n and tag in (WAVE_FORMAT_IEEE_FLOAT, WAVE_FORMAT_EXTENSIBLE)\n ):\n out += struct.pack(b\"<4sll\", b\"fact\", 4, self._dtype.itemsize)\n # beginning of data chunk\n out += struct.pack(b\"<4sl\", b\"data\", 0)\n\n self.fp.seek(0)\n self.fp.write(out)\n self._data_offset = self.fp.tell()\n self._bytes_written = 0",
"def speechrec(wavobj):\n # Decode base64 attachment\n outobj = base64.b64decode(wavobj)\n # Set connection parameters\n authhost = 'api.cognitive.microsoft.com'\n authpath = '/sts/v1.0/issueToken'\n speechhost = 'speech.platform.bing.com'\n speechpath = '/recognize?scenarios=smd&appid=D4D52672-91D7-4C74-8AD8-42B1D98141A5&locale=' + globalconfig('speech_language') + \"&format=json&device.os=FreePBX&version=3.0&instanceid=\" + str(uuid.uuid4()) + \"&requestid=\" + str(uuid.uuid4())\n authheaders = {'Ocp-Apim-Subscription-Key' : globalconfig('ms_cognitive_api_key'),\n 'Content-Length' : '0'}\n conn = httplib.HTTPSConnection(authhost)\n # Get authentication token\n conn.request(method=\"POST\", url=authpath, headers=authheaders, body=\"\")\n response = conn.getresponse()\n token = response.read()\n # If we don't get a token then return\n if int(response.status) != 200:\n return \"No transcription available - Auth Error: \" + str(token)\n # Setup for transcription\n headerfields = {\"Accept\" : \"application/json;text/xml\",\n \"Content-Type\" : 'audio/wav; codec=\"audio/pcm\"; samplerate=8000; trustsourcerate=false'\n }\n try:\n headerfields[\"Authorization\"] = \"Bearer \" + token\n except:\n headerfields[\"Authorization\"] = \"Bearer \" + token.decode('utf-8')\n conn = httplib.HTTPSConnection(speechhost)\n # Send wave file for transcription\n conn.request(method=\"POST\", url=speechpath, headers=headerfields, body=outobj)\n resp = conn.getresponse()\n # If there's a problem then return\n if int(resp.status) != 200:\n return \"No transcription available - Server Error: \" + resp.read()\n respval = json.loads(resp.read())\n return respval['header']['name']",
"def make_header(prefix: bytes, data: Mapping, version: Tuple[int, int]) -> bytearray:\n header = bytearray()\n\n header += prefix\n header += bytes(version)\n\n version_start = len(header)\n\n header += bytes(4) # will hold the data length\n\n data_start = len(header)\n\n header += json.dumps(data, sort_keys=True).encode('ascii')\n header += b'\\n'\n\n # want the entire thing to be divisible by 64 for alignment\n if len(header) % 64:\n header += b' '*(64 - len(header) % 64)\n\n assert not len(header) % 64\n\n header[version_start:version_start+4] = struct.pack('<I', len(header) - data_start)\n\n return header",
"def create_header(freqs):\n header = ''\n for i in range (len(freqs)):\n if freqs[i] != 0:\n header = header + str(i) + ' ' + str(freqs[i]) + ' '\n return header[:len(header) - 1]",
"def create_audio(self, words) :\n\t\t# err, obj_list = self.check_words(words)\n\t\t# if err != 0:\n\t\t# \treturn err, obj_list\n\n\t\tcombined_audio = AudioSegment.silent(duration=0)\n\t\taudio = AudioSegment.silent(duration=0)\n\t\tAPP_ROOT = os.path.abspath(os.path.dirname(wordclips.__file__))\n\t\t# SITE_ROOT = os.path.dirname(os.path.realpath(__file__))\n\n\t\tfor word in words:\n\t\t\tif word == '-': #dashes seperated by space will insert extra silences\n\t\t\t\taudio = AudioSegment.silent(duration=100)\n\t\t\telse:\n\t\t\t\t# For now it only word with local folders\n\t\t\t\t# TODO: graceful fail over\n\t\t\t\tclip_path = settings.MEDIA_ROOT + '/' + word + \"/1.wav\"\n\t\t\t\tprint(clip_path)\n\n\t\t\t\t# check if the clip exists\n\t\t\t\tif os.path.isfile(clip_path):\n\t\t\t\t\taudio = AudioSegment.from_wav(clip_path)\n\t\t\t\t\tcombined_audio += audio + AudioSegment.silent(duration=50)\n\t\t\t\telif os.path.isfile(settings.MEDIA_ROOT + '/' + word.lower() + \"/1.wav\"):\n\t\t\t\t\taudio = AudioSegment.from_wav(settings.MEDIA_ROOT + '/' + word.lower() + \"/1.wav\")\n\t\t\t\t\tcombined_audio += audio + AudioSegment.silent(duration=50)\n\t\t\t\telse:\n\t\t\t\t\t# Find to find the current word in the db\n\t\t\t\t\treturn -1, word\n\n\t\tcombined_audio.export(APP_ROOT + \"/../static/they-say.wav\", format=\"wav\")\n\t\t# success\n\t\treturn 0, []",
"def generate_sound(data):\n\n\t# wav generation inspired by:\n\t# https://soledadpenades.com/2009/10/29/fastest-way-to-generate-wav-files-in-python-using-the-wave-module/\n\n\tnoise_output = wave.open('noise.wav', 'w')\n\tnoise_output.setparams((2, 2, SOUND_SAMPLING_RATE, 0,\n\t\t\t\t\t\t\t'NONE', 'not compressed'))\n\n\tfor i in range(0, len(data)):\n\t\t\t# scale from (0, 255) to (-32767, 32767)\n\t\t\tvalue = (data[i] - 128) * 256 \n\t\t\tpacked_value = struct.pack('h', value)\n\t\t\tnoise_output.writeframes(packed_value)\n\t\t\tnoise_output.writeframes(packed_value)\n\n\tnoise_output.close()",
"def generate_header(header_dict):\n header = \"\"\n for key, value in header_dict.items():\n if len(key) > 8:\n raise Exception(\"Header key should be no more than 8 characters\")\n if len(str(value)) > 70:\n print(value)\n raise Exception(\n \"Header value should be no more than 70 characters\")\n key = key.ljust(8)\n if type(value) == int or type(value) == float:\n value = str(value) + \" \" * 50\n value = value.rjust(70)\n else:\n value = value.ljust(70)\n header += key + \"= \" + value\n header += \"END\".ljust(80)\n if \"DIRECTIO\" in header_dict.keys() and header_dict[\"DIRECTIO\"] == 1:\n header += \" \" * (512 - (len(header) % 512))\n return header.encode(\"utf-8\")",
"def wav_data(mono_wav):\n the_data = fft.data_from_file(mono_wav)\n return the_data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a disease subset of functions. A function is considered a disease if its lowercase name is the same as its class and its name is not a function category. Build must be run first
|
def disease_function_subset(ipa, network_dir, printing=False):
disease_names = set()
for function in ipa.functions:
if function.name.lower() == function.function_class.lower():
disease_names.add(function.name)
diseases_to_remove = read_diseases_to_remove(network_dir)
disease_names -= diseases_to_remove
disease_functions = {ipa.name_to_function[disease] for disease in disease_names}
print len(disease_functions), 'diseases'
## print random sample of removed function names
omitted_functions = {function.name for function in ipa.functions - disease_functions}
if printing:
for function_name in random.sample(omitted_functions, 20):
print function_name
return disease_functions
|
[
"def get_functions(text, startswith='def '):\n return get_definition(text, startswith)",
"def get_disasm_all_functions_from(self, _funcea):\n\t\tfdisasm = {}\n\t\tif (_funcea != BADADDR):\n\t\t\tfroot_disasm = self.get_disasm_function_line(_funcea)\n\t\t\tfroot_name = GetFunctionName(_funcea)\n\t\t\tfdisasm[froot_name] = froot_disasm\n\t\t\tfcalled = self.get_all_sub_functions_called(_funcea, _visited=[])\n\t\t\tprint(fcalled)\n\t\t\tif (len(fcalled) > 0):\n\t\t\t\tprint(\"[*] Retrieving assembly from {:d} function(s).\".format(len(fcalled)))\n\t\t\t\tfor finfo in fcalled:\n\t\t\t\t\tfea = finfo[1]\n\t\t\t\t\tfname = finfo[2]\n\t\t\t\t\tfcode = self.get_disasm_function_line(fea)\n\t\t\t\t\tfdisasm[fname] = fcode\n\t\treturn fdisasm",
"def get_category_of_function(func = None, func_name = None):\n \n if func_name is None:\n func_name = get_name_of_function(func)\n\n for k, c in CATEGORIES.items():\n if not callable(c):\n ops = operations_in_menu(c)\n if func_name in ops:\n return c\n return None",
"def all_from_tree(tree: tree.Tree):\n functions = []\n\n top_level = tree.find_data('top_level_instruction')\n for inst in top_level:\n if len(inst.children) > 0 and inst.children[0].data in ['annotation', 'function']:\n functions.append(Function.from_tree(inst.children[0]))\n\n return functions",
"def getModuleFunctions(self):\r\n output = []\r\n for seg_ea in idautils.Segments():\r\n for func_ea in idautils.Functions(idc_bc695.SegStart(seg_ea), idc_bc695.SegEnd(seg_ea)):\r\n func = Function.Function(func_ea)\r\n # if the function starts with '<moduleName>'...\r\n funcName = func.getName()\r\n inModel = len(funcName) >= len(self.name)+1 and funcName[0:len(self.name)+1] == self.name + '_'\r\n if inModel:\r\n output.append(func)\r\n return output",
"def get_functions_dictionary():\n return {\n 'tfidf': extract_tf_idf,\n 'post_length': extract_post_length,\n 'topics': extract_topics,\n 'screamer': extract_screamer,\n 'words': extract_meaningful_words_existence,\n 'off_dis': extract_distance_from_offensive,\n 'not_off_dis': extract_distance_from_not_offensive,\n 'wmd_off': extract_wmd_offensive,\n 'wmd_not_off': extract_wmd_not_offensive,\n 'dis_avg_vec': extract_distance_from_avg_vector\n }",
"def search_code_all_functions_from(self, _funcea, _search):\n\t\tresults = []\n\t\tif (_funcea != BADADDR):\n\t\t\tdisasm = self.get_disasm_all_functions_from(_funcea)\n\t\t\tfor fname, fcode in disasm.iteritems():\n\t\t\t\tfor ins in fcode:\n\t\t\t\t\tif re.search(_search, ins):\n\t\t\t\t\t\tresults.append((fname, ins))\n\t\treturn results",
"def get_all_func_instr_seg(self, _ea=ScreenEA()):\n\t\treturn self.get_all_functions_instr(SegStart(_ea), SegEnd(_ea))",
"def retFC():\n return funClasses",
"def get_all_functions(self, _startea, _endea):\n\t\tfunctions = {}\n\t\tcurEA = _startea\n\t\tfunc = self.get_function_at(curEA)\n\t\tif (func):\n\t\t\twhile (curEA <= _endea):\n\t\t\t\tname = GetFunctionName(curEA)\n\t\t\t\tfunctions[name] = func\n\t\t\t\tfunc = idaapi.get_next_func(curEA)\n\t\t\t\tif (func):\n\t\t\t\t\tcurEA = func.startEA\n\t\t\t\telse:\n\t\t\t\t\tNextHead(curEA)\n\t\t\treturn functions",
"def get_func_list(self):\n return self.func_set.values()",
"def generate_functions(self):\n for tag, obj in self.objects.iteritems():\n if (obj['class'] == 'Function' and not self.is_ignored_tag(tag)):\n self.generate_function(obj)",
"def find_functions(text):\n\n return list(set([\n re.split('[ (]*', line)[1]\n for line in [\n line.strip()\n for line in text.splitlines()\n if 'def ' in line\n ]\n if line.startswith('def ')\n ]))",
"def createFunctionDropwDowns(self):\n\n all_functions = inspect.getmembers(functionLib, inspect.isfunction) \n\n self.c_functions = []\n self.i_functions = []\n self.r_functions = []\n self.v_functions = []\n self.l_functions = []\n\n for functionTupel in all_functions:\n if \"c_\" in functionTupel[0]:\n self.c_functions.append(functionTupel)\n\n elif \"i_\" in functionTupel[0]:\n self.i_functions.append(functionTupel)\n elif \"r_\" in functionTupel[0]:\n self.r_functions.append(functionTupel)\n elif \"v_\" in functionTupel[0]:\n self.v_functions.append(functionTupel)\n elif \"l_\" in functionTupel[0]:\n self.l_functions.append(functionTupel)\n\n \n self.function_c_DropwDown = QtGui.QComboBox()\n self.function_c_DropwDown.addItem(\"Choose Function\")\n self.function_i_DropwDown = QtGui.QComboBox()\n self.function_i_DropwDownNew = QtGui.QComboBox()\n self.function_i_DropwDown.addItem(\"Choose Function\")\n self.function_i_DropwDownNew.addItem(\"Choose Function\")\n self.function_r_DropwDown = QtGui.QComboBox()\n self.function_r_DropwDown.addItem(\"Choose Function\")\n self.function_v_DropwDown = QtGui.QComboBox()\n self.function_v_DropwDownNew = QtGui.QComboBox()\n self.function_v_DropwDown.addItem(\"Choose Function\")\n self.function_v_DropwDownNew.addItem(\"Choose Function\")\n self.function_l_DropwDown = QtGui.QComboBox()\n self.function_l_DropwDown.addItem(\"Choose Function\")\n\n for functionTupel in self.c_functions:\n self.function_c_DropwDown.addItem(functionTupel[0])\n\n for functionTupel in self.i_functions:\n self.function_i_DropwDown.addItem(functionTupel[0])\n self.function_i_DropwDownNew.addItem(functionTupel[0])\n\n for functionTupel in self.r_functions:\n self.function_r_DropwDown.addItem(functionTupel[0])\n \n for functionTupel in self.v_functions:\n self.function_v_DropwDown.addItem(functionTupel[0])\n self.function_v_DropwDownNew.addItem(functionTupel[0])\n\n for functionTupel in self.l_functions:\n self.function_l_DropwDown.addItem(functionTupel[0])\n\n self.function_c_DropwDown.hide()\n self.function_i_DropwDown.hide()\n #self.function_r_DropwDown.hide()\n self.function_v_DropwDown.hide()\n self.function_l_DropwDown.hide()",
"def find_similar_functions_in_tree(self, _funcea, _startea, _threshold=1.0):\n\t\tresults = []\n\t\tif (_funcea != BADADDR):\n\t\t\ttree = self.get_all_sub_functions_called(_startea, _visited=[])\n\t\t\tfor fcall in tree:\n\t\t\t\tfcalled_ea = fcall[1]\n\t\t\t\tfcalled_name = fcall[2]\n\t\t\t\tratio = self.compare_functions(_funcea, fcalled_ea)\n\t\t\t\tif (ratio >= _threshold):\n\t\t\t\t\tresults.append([fcalled_ea, fcalled_name, ratio])\n\t\t\t\n\t\treturn results",
"def createFunctions(self,script):\n functions = []\n scriptArray = script.splitlines()\n #Go through each line looking for class text\n for index,line in enumerate(scriptArray):\n if len(line) > 4:\n if line[0:3] == \"def\":\n #looks for ending of the class\n finishLine = None\n for index2,line2 in enumerate(scriptArray[index+1::]):\n if finishLine is None and len(line2) > 0 and line2[0] != \" \":\n finishLine = index2\n # Creats a class with the relevant code appending it to the classes array\n if finishLine is not None:\n functions.append(Function(\"\\n\".join(scriptArray[index:finishLine])))\n else:\n functions.append(Function(\"\\n\".join(scriptArray[index::])))",
"def selectEventFunctions(self, s0):\n eps = self.epsilon\n\n S = self.switchingFunction(s0)\n dS = self.switchingFunctionDer(s0)[0]\n\n # finding which case we are in:\n # - case 2 if -eps < S < eps (medium thrust)\n # - case 1 if S < -eps (full thrust)\n # - case 0 if eps < S (no thrust)\n\n case = 0 if S > eps else 1 if S < -eps else 2\n\n # checking to see if S is within a certain tolerance from epsilon\n withinTol = np.abs((np.abs(S) - eps)) < 1e-10\n # determine if there is a case error if within tolerance\n if withinTol:\n # not the minimum fuel case\n if eps != 0:\n # at the upper bound, case determined by derivative\n if S > 0:\n case = 2 if dS < 0 else 0\n # at the lower bound, case determined by derivative\n else:\n case = 2 if dS > 0 else 1\n # minimum fuel case, only two cases\n else:\n case = 0 if dS > 0 else 1\n\n eventFunctions = []\n CrossingUpperBound = lambda t, s: self.switchingFunction(s) - eps\n CrossingLowerBound = lambda t, s: self.switchingFunction(s) + eps\n\n CrossingUpperBound.terminal = True\n CrossingLowerBound.terminal = True\n\n if case == 0:\n # crossing upper epsilon from above\n CrossingUpperBound.direction = -1\n # appending event function\n eventFunctions.append(CrossingUpperBound)\n elif case == 1:\n # crossing lower epsilon from below\n CrossingLowerBound.direction = 1\n # appending event function\n eventFunctions.append(CrossingLowerBound)\n else:\n # can either cross lower epsilon from above or upper from below\n CrossingLowerBound.direction = -1\n CrossingUpperBound.direction = 1\n # appending event function\n eventFunctions.append(CrossingUpperBound)\n eventFunctions.append(CrossingLowerBound)\n\n return eventFunctions, case",
"def get_functions(self, scenario, bodies):\n for function in scenario.functions:\n if function.analysis_type == 'aerodynamic':\n # the [6] index returns the value\n if self.comm.Get_rank() == 0:\n function.value = interface.design_pull_composite_func(function.id)[6]\n function.value = self.comm.bcast(function.value, root=0)\n\n return",
"def get_filter_function(study_name: str) -> Callable:\n if study_name not in _filter_funcs:\n return _filter_funcs[\"*\"]\n\n return _filter_funcs[study_name]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Calculates conduit lengths in the network assuming pores are cones and throats are cylinders. A conduit is defined as ( 1/2 pore full throat 1/2 pore ).
|
def cones_and_cylinders(
network, pore_diameter="pore.diameter", throat_diameter="throat.diameter"
):
L_ctc = _get_L_ctc(network)
D1, Dt, D2 = network.get_conduit_data(pore_diameter.split(".", 1)[-1]).T
L1 = D1 / 2
L2 = D2 / 2
# Handle throats w/ overlapping pores
_L1 = (4 * L_ctc**2 + D1**2 - D2**2) / (8 * L_ctc)
mask = L_ctc - 0.5 * (D1 + D2) < 0
L1[mask] = _L1[mask]
L2[mask] = (L_ctc - L1)[mask]
Lt = np.maximum(L_ctc - (L1 + L2), 1e-15)
return np.vstack((L1, Lt, L2)).T
|
[
"def calcNumberOfCoolers(context):\n diameter = context[\"diameter\"]\n propellant = context.get(\"propellant\", 0)\n if propellant == 0:\n return 0\n coolers = math.log(calcClipToAutoloader(context) / (6 * (5*diameter)**1.5 * (propellant ** 0.5)), 0.92)\n if coolers < 0:\n coolers = 0\n return math.ceil(coolers)",
"def get_len_c(self):\n\t\ta2 = power(self.get_len_a, 2)\n\t\tb2 = power(self.get_len_b, 2)\n\n\t\treturn sqrt(a2 + b2)",
"def computeCnxLen(compo):\n # compo = (row, col)\n length = 0\n if compo[0] + 1 < self._width:\n length += self._distance(\n pos2Coord(componentsPosition(compo[0], compo[1])),\n pos2Coord(componentsPosition(compo[0] + 1, compo[1])))\n if compo[1] + 1 < self._height:\n length += self._distance(\n pos2Coord(componentsPosition(compo[0], compo[1])),\n pos2Coord(componentsPosition(compo[0], compo[1] + 1)))\n return length",
"def getDispersiveWetPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getDispersiveWetPathLength_result = _quant_dc(self._swigobj.getDispersiveWetPathLength(_pc.document['nc'], _pc.document['spwid']))\n return _getDispersiveWetPathLength_result",
"def getO3LinesPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getO3LinesPathLength_result = _quant_dc(self._swigobj.getO3LinesPathLength(_pc.document['nc'], _pc.document['spwid']))\n return _getO3LinesPathLength_result",
"def length(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n length = np.sqrt( (act_loc-myo_loc)**2 + ls**2 )\n return length",
"def getN2OLinesPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getN2OLinesPathLength_result = _quant_dc(self._swigobj.getN2OLinesPathLength(_pc.document['nc'], _pc.document['spwid']))\n return _getN2OLinesPathLength_result",
"def num_cones(self):\r\n return self.t.size[0]*self.t.size[1]",
"def getO2LinesPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getO2LinesPathLength_result = _quant_dc(self._swigobj.getO2LinesPathLength(_pc.document['nc'], _pc.document['spwid']))\n return _getO2LinesPathLength_result",
"def critical_length(self) -> float:\n return self._REYNOLDS_NUMBER_AT_TRANSITION * self.fluid_state.dynamic_viscosity / \\\n (self.fluid_state.density * self.velocity)",
"def length(coords):\r\n if len(coords) == 0:\r\n river_len2 = 0\r\n else:\r\n lines = LineString(coords)\r\n river_len2 = 0\r\n for i in range(len(coords)-1):\r\n point1 = lines.coords[i]\r\n point2 = lines.coords[i + 1]\r\n river_len2 += dist(point1,point2) \r\n river_len2 = round(river_len2/1000,2)\r\n return river_len2",
"def getNonDispersiveDryPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getNonDispersiveDryPathLength_result = _quant_dc(self._swigobj.getNonDispersiveDryPathLength(_pc.document['nc'], _pc.document['spwid']))\n return _getNonDispersiveDryPathLength_result",
"def network_diameter(self):\n\n # uplink traversals + downlink traversals + 2 hops to hosts\n return 2*self.num_levels + 2",
"def size(self):\r\n cones = []\r\n cone_size = self.cone_size()\r\n for i in range(self.num_cones()):\r\n cones.append(cone_size)\r\n return cones",
"def get_magnet_length(self, muonShield):\n length = 2 * muonShield.GetShape().GetDZ()\n return length",
"def getNonDispersiveWetPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getNonDispersiveWetPathLength_result = _quant_dc(self._swigobj.getNonDispersiveWetPathLength(_pc.document['nc'], _pc.document['spwid']))\n return _getNonDispersiveWetPathLength_result",
"def chord_length (radius,rho,phi):\n chord = 1 - (rho * rho * np.sin(phi) * np.sin(phi))\n chord = radius * (np.sqrt(chord) + rho * np.cos(phi))\n chord[np.isnan(chord)] = 0\n chord[chord<0] = 0\n\n return chord",
"def cone_volume(radius, height):\n return (pi * radius ** 2 * height)/3",
"def calculate_diameter(self) -> qty.Length:\n # given: friction loss and flow rate\n rho = self._fluid.density()\n mu = self._fluid.kinematic_viscosity()\n pi = math.pi\n dpf = self._dp_fric\n V = self._flow_rate\n l = self._length\n f = 0.03\n i = 0\n di: float = 0.0\n while i < self._max_iterations:\n di = (f * l / dpf * rho * 8.0 / (pi ** 2.0) * V ** 2.0) ** (1.0 / 5.0)\n A = pi * di ** 2.0 / 4.0\n v = V / A\n re = reynolds_number(v, di, mu)\n rel_pipe_rough = self._rough / di\n f_new = darcy_friction_factor(re, rel_pipe_rough)\n if abs(f_new - f) <= 1.0e-5:\n break\n else:\n f = f_new\n i += 1\n if i == self._max_iterations:\n raise OverflowError('too many iterations. no solution found')\n self._cross_section.diameter = qty.Length(di)\n return qty.Length(di)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
For a tsv file, compute sums, sumsquares and counts for each of the given columns within groups defined by groupCols. >>> z = IDotData( names = ( 'a', 'b' ), Records = ( ( 1, 2 ), ( 1, 3 ), ( 2, 4 ), ( 2, 5 ) ) ) >>> computeSumsWithinGroups( inFN = z, cols = 'b', groupCols = 'a', outFN = sys.stdout )
|
def computeSumsWithinGroups( inFN, cols, groupCols, groupsAreContiguous = True, outFN = None, getio = None ):
cols = tuple( MakeSeq( cols ) )
groupCols = tuple( MakeSeq( groupCols ) )
if outFN is None: outFN = AddFileSubdir( 'stats', AddFileSfx( inFN, 'sums', *( cols + groupCols ) ) )
def combiner( inFNs, outFN ): IDotData.mergeColumnSummaries( iDotDatas = inFNs, cols = cols, groupCols = groupCols ).save( outFN )
if getio: return dict( depends_on = inFN, creates = outFN,
splitByCols = { inFN: dict( keyCols = () ) },
combiner = { outFN: combiner } )
IDotData( inFN ).summarizeColumnsWithinGroups( **Dict( 'cols groupCols groupsAreContiguous' ) ).save( outFN )
|
[
"def csvsum():\n parser = _default_arguments()\n parser.add_argument('-c', '--cols', nargs='*',\n help='A list of columns. Each column will have a sum generated.')\n parser.add_argument('-a', '--alphabetize',\n action='store_true',\n help='A flag to indicate the output should be displayed in ' \\\n 'alphabetical order. This argument is only valid if the output ' \\\n 'is transposed. Equivalent to `csvsum ... -T | sort`.')\n parser.add_argument('-p', '--precision',\n type=int,\n help='The number of decimal places to show.')\n parser.add_argument('-t', '--to',\n dest='outformat',\n nargs='?',\n default='csv',\n help='Output file type. Default CSV.')\n parser.add_argument('-T', '--transpose',\n action='store_true',\n help='A flag to indicate the output should be transposed so that ' \\\n 'there are two columns and N rows, where N equals the number ' \\\n 'of columns indicated to sum.')\n\n args, remainder = parser.parse_known_args()\n\n informat = getattr(parsers, args.informat)(designation='inparser')\n outformat = getattr(parsers, args.outformat)(designation='outparser')\n\n informat.parse_args(remainder)\n outformat.parse_args(remainder)\n\n cols, sums = zip(*csvutils.fmap(informat.file, sum,\n parser=informat,\n columns=args.cols))\n\n if args.precision:\n sums = ['{:.{}f}'.format(x, args.precision) for x in sums]\n\n if args.transpose is True:\n if args.alphabetize is True:\n outformat.rows = sorted(zip(cols, sums), key=lambda x: x[0])\n else:\n outformat.rows = zip(cols, sums)\n else:\n outformat.header = cols\n outformat.rows = [sums]\n\n outformat.write(outformat.file)",
"def test_sum_columns():\n rows = query_csv.iter_csv_rows(_PATH, delim=' ')\n _sum = query_csv.sum_columns(rows, ['i', 'f'])\n assert _sum == 12.0",
"def do_file_sums(self, fname):\n\n sums=self.get_sums_struct()\n\n print(\"processing:\",fname)\n try:\n data=fitsio.read(fname) \n except IOError as err:\n print(str(err))\n return None\n\n if 'shear_index' not in data.dtype.names:\n data=self._add_shear_index(data)\n else:\n w,=where(data['shear_index']==-1)\n if w.size > 0:\n data['shear_index'][w]=0\n\n data=self._preselect(data)\n\n sums=self.do_sums1(data)\n\n return sums",
"def aggregate(self, **colname_function_pairs):\n group_colnames = self._group_colnames\n data = self.sort(**dict.fromkeys(group_colnames, 1))\n data._index_ = np.arange(data.nrow)\n stat = data.unique(*group_colnames).select(\"_index_\", *group_colnames)\n indices = np.split(data._index_, stat._index_[1:])\n group_aware = [getattr(x, \"group_aware\", False) for x in colname_function_pairs.values()]\n if any(group_aware):\n groups = Vector.fast(range(len(indices)), int)\n n = Vector.fast(map(len, indices), int)\n data._group_ = np.repeat(groups, n)\n slices = None\n for colname, function in colname_function_pairs.items():\n if getattr(function, \"group_aware\", False):\n # function might leave Nones in its output,\n # once those are replaced with the proper default\n # we can do a fast conversion to DataFrameColumn.\n column = function(data)\n default = function.default\n for i in range(len(column)):\n if column[i] is None:\n column[i] = default\n assert len(column) == stat.nrow\n column = DataFrameColumn.fast(column)\n stat[colname] = column\n else:\n # When using an arbitrary function, we cannot know\n # what special values to expect and thus we end up\n # needing to use the slow Vector.__init__.\n if slices is None:\n slices = [data._view_rows(x) for x in indices]\n stat[colname] = [function(x) for x in slices]\n return stat.unselect(\"_index_\", \"_group_\")",
"def opsum(infile_list,outfile,numfiles,hlen):\n file_array = []\n # create array of files where each file is vector of line strings\n for infile in infile_list:\n with open(infile,\"r+\") as file:\n file_array.append(file.readlines())\n print(\"Number of files: \",len(file_array))\n # write the header and break after triggers line\n with open(outfile,\"w+\") as newfile:\n breakFlag = False\n tot_trig = 0\n for i,line in enumerate(file_array[0]):\n if breakFlag:\n break\n # sum the triggers from all files\n if \"total number of triggers\" in line:\n breakFlag = True\n for file in file_array:\n print(int(file[i].split()[0]))\n # get the triggers from each file and sum them\n tot_trig += int(file[i].split()[0])\n line = \"{} total number of triggers\\n\".format(tot_trig)\n newfile.write(line)\n # free memory\n del file_array\n data = []\n for file in infile_list:\n print(file)\n data.append(np.genfromtxt(file,unpack=True,skip_header=hlen))\n # The shape of the data should be ( #files x #cols x #rows ) = 3 dims.\n if len(np.shape(data)) < 3:\n raise ValueError(\"Files do not match. ( # rows, # cols )\")\n # Throw an error if the number of columns in the unpacked file is not 3\n if np.shape(data)[1] != 3:\n raise ValueError(\"Shape of data is unexpected. File format may be incorrect.\")\n data = np.array(data)\n # sum the counts across the files\n counts = data.sum(axis=0)[1]\n # quadrature sum of the errors\n err = np.sqrt((data**2).sum(axis=0)[2])\n sum_data = np.vstack((data[0][0],counts,err)).transpose()\n # append summed data to the outfile\n with open(outfile,\"ab\") as newfile:\n np.savetxt(newfile,sum_data,fmt=[\"%-15d\",\"%8.5f\",\"%16.10f\"])\n # free memory\n del sum_data\n print(\"done\")",
"def sum(self, col_name: str, group_by: str) -> dict:\n self._validate_col_name(col_name)\n self._validate_col_name(group_by)\n\n result = defaultdict(int)\n col_values = self.data_table[col_name]\n group_by_values = self.data_table[group_by]\n for col_value, group_by_value in zip(col_values, group_by_values):\n if not isinstance(col_value, numbers.Number):\n raise TypeError(\"Column data must be of numeric type, but found: {}.\"\n .format(type(col_value))\n )\n result[group_by_value] += col_value\n return result",
"def _summarizeGroup_udf(self, columns, group_fn):\n arrow_batch_col_name = self._jpkg.ArrowSummarizer.arrowBatchColumnName()\n\n # Check if illegal columns exists\n udf._check_invalid_udfs(columns.values())\n required_col_names = udf._required_column_names(columns.values())\n arrow_summarizer = summarizers.arrow(required_col_names, include_base_rows=False)\n grouped = group_fn(self, arrow_summarizer)\n\n # (1) Turns row in each group into an Arrow file format\n # (2) For each udf, we apply the function and put the\n # result in a new column. If the udf returns multiple\n # values, we put the values in a struct first and later\n # explode it into multiple columns.\n for i, (col_name, udf_column) in enumerate(columns.items()):\n fn, t = udf._fn_and_type(udf_column)\n column_indices = udf_column.column_indices\n arg_type = udf_column.arg_type\n\n if arg_type != 'pandas':\n raise ValueError('Only arg_type == pandas is supported')\n\n def _fn(arrow_bytes):\n pdf = arrowfile_to_dataframe(arrow_bytes)\n inputs = [pdf[index] for index in column_indices]\n ret = fn(*inputs)\n return udf._numpy_to_python(ret)\n\n if isinstance(col_name, tuple):\n struct_col_name = \"__struct_{}\".format(i)\n grouped = grouped.withColumn(\n struct_col_name,\n F.udf(_fn, t)(grouped[arrow_batch_col_name]))\n\n for i in range(len(col_name)):\n grouped = grouped.withColumn(\n col_name[i],\n grouped[struct_col_name]['_{}'.format(i)])\n\n grouped = grouped.drop(struct_col_name)\n else:\n grouped = grouped.withColumn(\n col_name,\n F.udf(_fn, t)(grouped[arrow_batch_col_name]))\n\n return grouped.drop(arrow_batch_col_name)",
"def aggregate(self, table, column, fun, grouped_column=None, valu=None):\n if column == '*':\n column = next(iter(table)) # this takes care of COUNT(*), because we can safely replace column with\n # first key i.e a column of table here\n if column not in table.keys():\n raise NotImplementedError(\"Table does not have any column named \" + str(column))\n\n if grouped_column is not None and grouped_column not in table.keys():\n raise NotImplementedError(\"Table does not have any column named \" + str(column))\n\n if fun == 'MAX':\n val = int(-1e9)\n i = 0\n for v in table[column]:\n if grouped_column is not None:\n if table[grouped_column][i] == valu:\n val = max(val, v)\n else:\n val = max(val, v)\n i += 1\n return val\n elif fun == 'MIN':\n val = int(1e9)\n i = 0\n for v in table[column]:\n if grouped_column is not None:\n if table[grouped_column][i] == valu:\n val = min(val, v)\n else:\n val = min(val, v)\n i += 1\n return val\n elif fun == 'COUNT':\n if grouped_column is not None:\n i = 0\n for v in table[grouped_column]:\n if v == valu:\n i += 1\n return i\n else:\n return len(table[column])\n elif fun == 'SUM':\n if grouped_column is not None:\n s = 0\n i = 0\n for v in table[column]:\n if table[grouped_column][i] == valu:\n s += v\n i += 1\n return s\n else:\n return functools.reduce(lambda a, b: a + b, table[column])\n elif fun == 'AVG':\n summ = 0\n elements = 0\n if grouped_column is not None:\n i = 0\n for v in table[column]:\n if table[grouped_column][i] == valu:\n summ += v\n elements += 1\n i += 1\n else:\n summ = functools.reduce(lambda a, b: a + b, table[column])\n elements = len(table[column])\n return summ / elements\n else:\n raise NotImplementedError(str(fun) + \" function is not implemented in Mini SQL\")",
"def aggregate(self, group_by, operations):\n try:\n i = self._column_names.index(group_by)\n except ValueError:\n raise ColumnDoesNotExistError(group_by)\n\n groups = OrderedDict() \n\n for row in self._data:\n group_name = row[i]\n\n if group_name not in groups:\n groups[group_name] = []\n\n groups[group_name].append(row)\n\n output = []\n\n column_types = [self._column_types[i], NumberType()]\n column_names = [group_by, '%s_count' % group_by]\n\n for op_column, operation in operations:\n try:\n j = self._column_names.index(op_column)\n except ValueError:\n raise ColumnDoesNotExistError(op_column)\n\n column_type = self._column_types[j]\n\n column_types.append(column_type)\n column_names.append('%s_%s' % (op_column, operation))\n\n for name, group_rows in groups.items():\n group_table = Table(group_rows, self._column_types, self._column_names) \n new_row = [name, len(group_table.rows)]\n\n for op_column, operation in operations:\n c = group_table.columns[op_column]\n \n try:\n op = getattr(c, operation)\n except AttributeError:\n raise UnsupportedOperationError(operation, c)\n\n new_row.append(op())\n\n output.append(tuple(new_row))\n \n return self._fork(output, column_types, column_names)",
"def group(data, groups, statistic='mean'):\n matched, failed = match_variables(data, groups)\n for x in failed: print('Warning: Can not find variable', x)\n grp = data.groupby(matched)\n table = eval(\"grp.\" + statistic + \"()\")\n table = table.reset_index()\n return table",
"def aggregate(infile, outfile, reduction, variables=None, \r\n agg_methods=rv.ReduceVar.REDUCE_MEAN, \r\n agg_dim='days') :\r\n in_ds = nc.Dataset(infile)\r\n \r\n # if the user did not specify which variables to reduce, \r\n # guess that they want everything except coordinate variables.\r\n if variables is None: \r\n variables = list(in_ds.variables.keys())\r\n for d in in_ds.dimensions.keys() : \r\n variables.remove(d)\r\n if 'nav_lat' in variables : \r\n variables.remove('nav_lat')\r\n if 'nav_lon' in variables :\r\n variables.remove('nav_lon')\r\n \r\n # set up the \"ReduceVar\" aggregator\r\n # assume that all variables have same dimensions.\r\n v = in_ds.variables[variables[0]]\r\n variable_shape = v.shape\r\n variable_dims = v.dimensions\r\n i_agg = variable_dims.index(agg_dim)\r\n if reduction == REDUCE_MONTHLY : \r\n aggregator = rv.monthly_aggregator(variable_shape, i_agg) \r\n else : \r\n aggregator = rv.ReduceVar(variable_shape, i_agg, reduction)\r\n \r\n # figure out the shape of the output array \r\n output_shape = list(variable_shape)\r\n output_shape[i_agg] = aggregator.reduced\r\n \r\n # create the output file\r\n out_agg = agg.NetCDFTemplate(infile, outfile)\r\n \r\n # don't let the template copy the \"aggregate\" dimension to the new file!\r\n out_agg.createDimension(agg_dim, aggregator.reduced)\r\n \r\n # copy the \"navigation\" variables\r\n out_agg.copyVariable('nav_lat')\r\n out_agg.copyVariable('nav_lon')\r\n \r\n # expand agg_methods if necessary\r\n if not isinstance(agg_methods, collections.Sequence) : \r\n agg_methods = [agg_methods] * len(variables)\r\n\r\n # prepare an index to write the output\r\n out_slice = [ slice(None,None,None) ] * len(variable_shape)\r\n \r\n # loop over the variables \r\n for varname, agg_method in zip(variables, agg_methods) : \r\n v = in_ds.variables[varname]\r\n fill_value = getattr(v, '_FillValue', None)\r\n out_v = out_agg.create_variable(varname, v.dimensions, \r\n v.dtype, fill=fill_value)\r\n\r\n # loop over each reduced index \r\n for reduced_i in range(aggregator.reduced) : \r\n out_slice[i_agg] = reduced_i\r\n out_v[out_slice] = aggregator.reduce(agg_method, reduced_i, v)\r\n \r\n out_agg.close()\r\n in_ds.close()",
"def process(cls, df):\n # Calculate totals for both genders together\n for g in cls.GROUPS[1:]:\n\n # the columns to sum\n cols_to_sum = [f\"{tag}_{g}\" for tag in [\"male\", \"female\"]]\n\n # approximate the sum\n new_cols = [f\"total_{g}\", f\"total_{g}_moe\"]\n df[new_cols] = df.apply(approximate_sum, cols=cols_to_sum, axis=1)\n\n return df",
"def test_sum_non_numeric():\n rows = query_csv.iter_csv_rows(_PATH, delim=' ')\n with pytest.raises(TypeError):\n query_csv.sum_columns(rows, ['i', 's'])",
"def write_func(in_files, out_file, groups):\r\n data_file = h5py.File(out_file, 'a')\r\n image_extensions = ['jpg', 'jpeg', 'png', 'bmp', 'tiff']\r\n count = 0\r\n try:\r\n for in_file in in_files:\r\n if in_file.split('.')[-1] not in image_extensions:\r\n try:\r\n with open(in_file) as ocf:\r\n data = ocf.read()\r\n str_type = h5py.special_dtype(vlen=str)\r\n dset = data_file.create_dataset(\r\n groups[count] + in_file.split('/')[-1],\r\n data=data, shape=(1,),\r\n dtype=str_type\r\n )\r\n attributes = generate_attributes_to_add(\r\n groups[count] + in_file.split('/')[-1])\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v\r\n except FileNotFoundError:\r\n print(in_file, \"not found\")\r\n else:\r\n dset = image_to_hdf5(in_file, data_file, groups[count])\r\n attributes = generate_attributes_to_add(\r\n groups[count] + in_file.split('/')[-1])\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v\r\n if len(groups) == 1:\r\n count = 0\r\n else:\r\n count += 1\r\n except RuntimeError:\r\n pass",
"def cvsInU(*args, **kwargs):\n \n pass",
"def cal_agg(c, d, t, out_path):\n c = c[c['State'] == 'CA']\n c = c.drop(columns = c.columns[0:4])\n c = c.sum(axis = 0)\n\n d = d[d['State'] == 'CA']\n d = d.drop(columns = d.columns[0:4])\n d = d.sum(axis = 0)\n\n data1 = {'date': pd.to_datetime(c.index),\n 'cases': c,\n 'deaths': d}\n\n df1 = pd.DataFrame(data1)\n\n data2 = {'date': pd.to_datetime(t.date, format='%Y%m%d'),\n 'positive': t.positive,\n 'negative': t.negative,\n 'tested': t.total}\n\n df2 = pd.DataFrame(data2)\n\n df = pd.merge(df1, df2, on='date', how='outer')\n df.to_csv(out_path, index = False)",
"def test_read_input_groups(file_groups):\n config = MapcheteConfig(file_groups.path)\n input_files = config.params_at_zoom(0)[\"input\"]\n assert \"file1\" in input_files[\"group1\"]\n assert \"file2\" in input_files[\"group1\"]\n assert \"file1\" in input_files[\"group2\"]\n assert \"file2\" in input_files[\"group2\"]\n assert \"nested_group\" in input_files\n assert \"group1\" in input_files[\"nested_group\"]\n assert \"file1\" in input_files[\"nested_group\"][\"group1\"]\n assert \"file2\" in input_files[\"nested_group\"][\"group1\"]\n assert \"file1\" in input_files[\"nested_group\"][\"group2\"]\n assert \"file2\" in input_files[\"nested_group\"][\"group2\"]\n assert config.area_at_zoom()",
"def prorate_grouped_by_column_value(chain_units_path, data_units_path, chain_groupby_name, data_groupby_name, prorate_cols, output_file_name):\n\n #read the files and add unique identifier\n blks = gp.read_file(data_units_path)\n vtds = gp.read_file(chain_units_path)\n vtds['__ID'] = range(len(vtds))\n blks['__ID'] = range(len(blks))\n\n # get unique values in the common field: chain_groupby_name (or data_groupby_name)\n counties = vtds[chain_groupby_name].unique().tolist()\n\n # create lookupTable structure to use for prorating\n namelookup = pd.DataFrame({x: {y: 0 for y in prorate_cols}\n for x in vtds['__ID'].tolist()}).transpose()\n namelookup.index.names = ['large']\n all_lookups = pd.DataFrame({\"small\":[], \"large\":[]})\n\n bnames=[\"__ID\"]+prorate_cols\n # for each value in the unique values, create a lookup\n # for the untis matching that value in both shapefiles.\n for county in counties:\n b = blks.loc[blks[data_groupby_name] == county]\n for name in prorate_cols:\n b[name] = b[name].astype(float)\n v = vtds.loc[vtds[chain_groupby_name] == county]\n\n l = fasterLookupTable(v, b, '__ID','__ID')\n l = l.merge(b.loc[:, bnames], left_on='small', right_on='__ID')\n\n all_lookups.update(l.loc[:, ['small','large']])\n amounts = l.groupby('large')[prorate_cols].sum()\n namelookup.update(amounts)\n\n all_lookups.to_csv(\"lookupTable.csv\")\n\n namelookup['LG'] = namelookup.index.tolist()\n cs = [x for x in vtds.columns if x not in prorate_cols]\n vtds.loc[:, cs].merge(namelookup, left_on='__ID', right_on='LG').to_file(output_file_name.split('.shp')[0])\n print(\"FINISHED!!!!\")",
"def calc_roi_mean_allsubgroup(src_file, roi_files, group_labels, trg_file):\n reader = CiftiReader(src_file)\n labels = np.unique(group_labels)\n roi_mean_rows = []\n for hemi in hemis:\n maps = reader.get_data(brain_structure[hemi], True)\n for label1 in labels:\n sub_maps = np.atleast_2d(maps[group_labels == label1])\n for label2 in labels:\n roi_file = roi_files.format(hemi=hemi[0], label=label2)\n roi_mask = nib.load(roi_file).get_data().ravel()\n roi_labels = np.unique(roi_mask)\n for roi_label in roi_labels:\n if roi_label == 0:\n continue\n roi_vertices = np.where(roi_mask == roi_label)[0]\n roi_name = roi_names.format(hemi=hemi[0], label=label2, roi_label=int(roi_label))\n roi_name += '_in_subgroup{}'.format(label1)\n roi_means = np.mean(sub_maps[:, roi_vertices], 1)\n roi_mean_row = [roi_name]\n roi_mean_row.extend([str(_) for _ in roi_means])\n roi_mean_rows.append(','.join(roi_mean_row))\n open(trg_file, 'w+').writelines('\\n'.join(roi_mean_rows))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute histograms of the specified columns of the input
|
def computeHistograms( inFN, cols, binSizes = None, outFNs = None, getio = None ):
cols = tuple( MakeSeq( cols ) )
binSizesHere = ( .001, ) * len( cols ) if binSizes is None else tuple( MakeSeq( binSizes ) )
outFNsHere = outFNs
if outFNsHere is None: outFNsHere = [ AddFileSubdir( 'stats', AddFileSfx( inFN, 'hist', col ) ) for col in cols ]
assert len( cols ) == len( binSizesHere ) == len( outFNsHere )
if getio: return dict( depends_on = inFN, creates = outFNsHere )
# add histogram combiner
hists = [ Histogrammer( binSize = binSize ) for binSize in binSizesHere ]
z = IDotData( inFN )
for h, c, outFN in zip( hists, cols, outFNsHere ):
h.addVals( z[ c ] )
h.save( outFN )
|
[
"def vh_histograms(map):\n return np.sum(map, axis=1), np.sum(map, axis=0)",
"def histogram(index, data, columns):\n plt.figure(figsize=(10, 5))\n plt.title(\"Histogram for {}\".format(columns[index]))\n ax = sns.distplot(data[:,index], rug=True)",
"def show_histo(df, bins=20):\r\n\r\n assert(isinstance(df, pd.DataFrame))\r\n\r\n for c in numeric_cols(df):\r\n df[c].hist(bins=bins)\r\n plt.title(c)\r\n plt.show()",
"def plot_histograms(profiler, columns=None):\n\n # get all inds to graph, raise error if user specified doesn't exist (part 1)\n inds_to_graph = []\n if not columns:\n inds_to_graph = list(range(len(profiler.profile)))\n else:\n for column in columns:\n col = column\n if isinstance(col, str):\n col = col.lower()\n if col not in profiler._col_name_to_idx:\n raise ValueError(\"Column \\\"\" + str(col) + \"\\\" is not found as a \"\n \"profiler column\")\n inds_to_graph.extend(profiler._col_name_to_idx[col])\n sorted(inds_to_graph)\n # get all columns which are either int or float (part2)\n def is_index_graphable_column(ind_to_graph):\n \"\"\"\n This function filters ind_to_graph by returning false if there is a\n data type that is not a int or float, otherwise true\n \"\"\"\n col_profiler = profiler.profile[ind_to_graph]\n data_compiler = col_profiler.profiles['data_type_profile']\n if data_compiler.selected_data_type not in ['int', 'float']:\n return False\n return True\n inds_to_graph = list(filter(is_index_graphable_column, inds_to_graph))\n\n if not inds_to_graph:\n warnings.warn(\"No plots were constructed\"\n \" because no int or float columns were found in columns\")\n return\n # get proper tile format for graph\n n = len(inds_to_graph)\n cols = math.ceil(math.sqrt(n))\n rows = math.ceil(n / cols)\n fig, axs = plt.subplots(rows, cols) # this will need to be flattened into a list\n # flatten axes for inputing graphs into the plot\n if not isinstance(axs, (np.ndarray)):\n axs = np.array([axs])\n axs.flatten()\n\n # graph the plots (part 3)\n for col_ind, ax in zip(inds_to_graph, axs):\n col_profiler = profiler.profile[col_ind]\n data_compiler = col_profiler.profiles['data_type_profile']\n data_type = data_compiler.selected_data_type\n data_type_profiler = data_compiler._profiles[data_type]\n plot_col_histogram(data_type_profiler, ax=ax,\n title=str(data_type_profiler.name))\n plt.show()\n return fig",
"def histogram(*args):\n return _seb.histogram(*args)",
"def histogram(self, dataset):\n pass",
"def plot_histogram_for_column(self, column_name, bins, xlabel, ylabel, info_threshold,\n textbox_x_positional_percentage=0.75,\n textbox_drop_percentage=0.05):\n\n title = \"Histogram for column '\" + column_name + \"'\"\n filename = os.path.join(self.plot_dir, column_name + \"_histogram.png\")\n column = self.gt_parser.get_column(column_name)\n\n Plotter.plot_histogram_for_column(xs=column, title=title, filename=filename, bins=bins, xlabel=xlabel,\n ylabel=ylabel, info_threshold=info_threshold,\n textbox_x_positional_percentage=textbox_x_positional_percentage,\n textbox_drop_percentage=textbox_drop_percentage)",
"def df_display_hist_from_list(df_food, list_columns) :\n z = plt.figure(figsize=(4,4))\n for column in list_columns :\n df = df_food.copy()\n zmin, zmax = df_boxplot_min_max(df, column)\n if zmin < zmax :\n list_name = remove_pattern([column],'100g')\n new_column = list_name[0]\n df.rename(columns={column: new_column}, inplace=True)\n column = new_column\n df = pd.DataFrame(df[column], index=df.index)\n df = df[df[column] <= zmax]\n df = df[df[column] >= zmin]\n df = df[df[column] > 0.0]\n #z = plt.figure()\n z = df.plot.hist(bins=50)",
"def plot_distributions(self, df):\r\n for col in df.columns.values:\r\n plt.figure(); plt.hist(df[col], bins=200)\r\n plt.title(\"Histogram showing distribution of \" + str(col))",
"def histogram_data(self):\n all_histograms = {}\n\n for feature_name in self.features.features():\n feature = self.features[feature_name]\n # dropping NaN values - visualization shows untransformed data and nans are imputed only during\n # transformations\n series = feature.data().dropna()\n\n if isinstance(feature, CategoricalFeature):\n bins = len(series.unique())\n elif isinstance(feature, NumericalFeature):\n # if columns is numerical we calculate the number of bins manually\n bins = calculate_numerical_bins(series)\n else:\n bins = 1 # placeholder rule\n\n hist, edges = np.histogram(series, density=True, bins=bins)\n left_edges, right_edges = modify_histogram_edges(edges) # modify edges by adding space between\n\n all_histograms[feature_name] = (hist, left_edges, right_edges)\n\n return all_histograms",
"def equalize_histogram(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def histogram_multiple_viz(\n data,\n column,\n separate_column,\n condition_1,\n condition_2,\n title1,\n title2,\n title3,\n title4,\n color1=\"blue\",\n color2=\"darkorange\",\n): \n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 6))\n data.loc[data[separate_column] == condition_1][column].apply(\n np.log\n ).plot(\n kind=\"hist\",\n bins=100,\n title=title1,\n color=color1,\n xlim=(-3, 10),\n ax=ax1,\n )\n data.loc[data[separate_column] == condition_2][column].apply(\n np.log\n ).plot(\n kind=\"hist\",\n bins=100,\n title=title2,\n color=color2,\n xlim=(-3, 10),\n ax=ax2,\n )\n data.loc[data[separate_column] == condition_1][column].plot(\n kind=\"hist\", bins=100, title=title3, color=color1, ax=ax3\n )\n data.loc[data[separate_column] == condition_2][column].plot(\n kind=\"hist\",\n bins=100,\n title=title4,\n color=color2,\n ax=ax4,\n )\n plt.show()",
"def color_hist(img, nbins=32, bins_range=(0, 256)):\n \"\"\"Compute the histogram of the color channels separately.\"\"\"\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features",
"def build_histograms(preprocessed_images, num_channels):\n histogram_processed_train = np.zeros((len(preprocessed_images), num_channels ** 3))\n for i, img in enumerate(preprocessed_images):\n # chans = cv2.split(image)\n colors = (\"b\", \"g\", \"r\")\n hist = cv2.calcHist([img], [0, 1, 2],\n None, [num_channels, num_channels, num_channels], [0, 256, 0, 256, 0, 256])\n histogram_processed_train[i] = hist.flatten()\n return histogram_processed_train",
"def color_hist(img, nbins=32):\n channel1_hist = np.histogram(img[:,:,0], bins=nbins)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features",
"def get_hist_col(self):\n\n st.subheader(\"histogram\")\n self.hist_col = st.selectbox('Which feature?',\n self.raw_data\\\n .select_dtypes(exclude='object')\\\n .columns)",
"def marginalize(self,iaxis,bin_range=None):\n\n h = Histogram(self._axes[(iaxis+1)%2],style=self._style) \n\n if iaxis == 1:\n\n if bin_range is None: \n h._counts = np.apply_over_axes(np.sum,self._counts,[1]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,self._var,[1]).reshape(h._counts.shape)\n else:\n c = self._counts[:,bin_range[0]:bin_range[1]]\n v = self._var[:,bin_range[0]:bin_range[1]]\n\n h._counts = np.apply_over_axes(np.sum,c,[1]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,v,[1]).reshape(h._counts.shape)\n else:\n\n if bin_range is None: \n h._counts = np.apply_over_axes(np.sum,self._counts,[0]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,self._var,[0]).reshape(h._counts.shape)\n else:\n c = self._counts[bin_range[0]:bin_range[1],:]\n v = self._var[bin_range[0]:bin_range[1],:]\n\n h._counts = np.apply_over_axes(np.sum,c,[0]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,v,[0]).reshape(h._counts.shape)\n\n return h",
"def histogram_viz(\n data,\n column,\n separate_column,\n condition_1,\n condition_2,\n label1,\n label2,\n color1 = None,\n color2 = None,\n):\n plt.hist(\n list(data[data[separate_column] == condition_1][column]),\n alpha=0.5,\n label=label1,\n color=color1,\n )\n plt.hist(\n list(data[data[separate_column] == condition_2][column]),\n alpha=0.5,\n label=label2,\n color=color2,\n )\n plt.legend(loc=\"upper right\")\n plt.show()",
"def histogram_fluctuations(self, ax=None, **kwargs):\n _ = ax.hist(self.C_ij, **kwargs)\n ax.set_ylabel('Num. pairs')\n ax.set_xlabel('Fluctuation')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert DotData to TSV
|
def DotData2TSV( inFN, outFN, readOpts = {}, getio = None ):
if getio: return dict( depends_on = inFN, creates = outFN )
DotData( Path = inFN, **readOpts ).saveToSV( outFN )
|
[
"def TSV2DotData( inFN, outFN, getio = None ):\n if getio: return dict( depends_on = inFN, creates = outFN )\n DotData( SVPath = inFN ).save( outFN )",
"def tsv2npy( inFN, outFN = None, getio = None ):\n if outFN is None: outFN = ReplaceFileExt( inFN, '.npy' )\n if getio: return dict( depends_on = inFN, creates = outFN )\n z = DotData( SVPath = inFN )\n np.save( outFN, z )",
"def convert_obo_to_tsv(input_path, output_path=None):\n\n # read in data\n logger.info(\"Parsing %s\", input_path)\n with _open_input_stream(input_path) as input_stream:\n obo_records_dict = parse_obo_format(input_stream)\n\n # print stats and output .tsv\n print_stats(obo_records_dict, input_path)\n\n if output_path is None:\n write_tsv(obo_records_dict, output_stream=sys.stdout)\n else:\n with open(output_path, \"w\") as output_stream:\n write_tsv(obo_records_dict, output_stream)\n\n logger.info(\"Done\")",
"def TblIterFromDotData( dotData ):\n\n return IterReader( headings = dotData.dtype.names,\n recordsIter = dotData if dotData.numCols() > 1 else map( (lambda x: (x,)), dotData ) )",
"def export_vtk(self, filename):\n\n pass",
"def create_tsv(output, data):\n if not output:\n output = open('evaluation.dat', 'w')\n i = 0\n for item in data:\n path, label = item.split(':')\n xml_file = open(path).read()\n completion_rate = calculate_completion_rate(xml_file)\n output.write('{0}\\t{1}\\t{2}\\r\\n'.format(str(i), label, str(completion_rate)))\n i += 1\n output.close()",
"def Transform(self, content):\r\n tsv_output = ''\r\n if content:\r\n column_headers = content.get('columnHeaders', [])\r\n rows = content.get('rows', [])\r\n\r\n if column_headers:\r\n self.writer.OutputHeaders(content)\r\n\r\n if rows:\r\n self.writer.OutputRows(content)\r\n\r\n out = self.output.getvalue()\r\n # Get UTF-8 output\r\n decoding = out.decode('UTF-8')\r\n # and re-encode to UTF-16 for Excel TSV\r\n tsv_output = decoding.encode('UTF-16')\r\n self.output.close()\r\n\r\n return tsv_output",
"def _dumpvtk_dumper(dataset):\r\n slf = []\r\n # write the head\r\n slf.append('# vtk DataFile Version 3.0')\r\n slf.append(dataset.title)\r\n slf.append('ASCII')\r\n slf.append('DATASET UNSTRUCTURED_GRID')\r\n # write the points\r\n slf.append('POINTS {} double'.format(len(dataset.points)))\r\n for point in dataset.points:\r\n slf.append('{} {} {}'.format(*point.coordinate))\r\n # write the cells\r\n size = sum([c.cell_size()+1 for c in dataset.cells])\r\n slf.append('CELLS {} {}'.format(len(dataset.cells), size))\r\n for cell in dataset.cells:\r\n slf.append(' '.join(['{:d}'.format(cell.cell_size())] +\r\n ['{:d}'.format(p) for p in cell.points]))\r\n \r\n slf.append('CELL_TYPES {}'.format(len(dataset.cells)))\r\n for cell in dataset.cells:\r\n slf.append('{:d}'.format(cell.cell_type))\r\n # write point data\r\n slf.append('POINT_DATA {}'.format(len(dataset.points)))\r\n for key,field in dataset.point_data.items():\r\n # scalars\r\n if type(field) == ScalarField:\r\n slf.append('SCALARS {} double'.format(field.data_name))\r\n slf.append('LOOKUP_TABLE default')\r\n for d in field.data:\r\n slf.append('{}'.format(d.real))\r\n###############################################################################\r\n# ## Deprecated #\r\n# # vectors #\r\n# elif type(field) == VectorField: #\r\n# slf.append('VECTORS {} double'.format(field.data_name)) #\r\n# for d in field.data: #\r\n# slf.append('{} {} {}'.format(*d)) #\r\n###############################################################################\r\n # vectors (VectorField or Field), use field expression in VTK\r\n else:\r\n slf.append('FIELDS {} 1'.format(key))\r\n slf.append('{} {} {} double'.format(field.data_name,\r\n field.ncomponents, field.size()))\r\n for d in field.data:\r\n slf.append(' '.join(['{}'.format(i.real) for i in d]))\r\n # write cell data\r\n slf.append('CELL_DATA {}'.format(len(dataset.cells)))\r\n for key,field in dataset.cell_data.items():\r\n # scalars\r\n if type(field) == ScalarField:\r\n slf.append('SCALARS {} double'.format(field.data_name))\r\n slf.append('LOOKUP_TABLE default')\r\n for d in field.data:\r\n slf.append('{}'.format(d.real))\r\n###############################################################################\r\n# ## Deprecated #\r\n# # vectors #\r\n# elif type(field) == VectorField: #\r\n# slf.append('VECTORS {} double'.format(field.data_name)) #\r\n# for d in field.data: #\r\n# slf.append('{} {} {}'.format(*d)) #\r\n###############################################################################\r\n # vectors (VectorField or Field), use field expression in VTK\r\n else:\r\n slf.append('FIELDS {} 1'.format(key))\r\n slf.append('{} {} {} double'.format(field.data_name,\r\n field.ncomponents, field.size()))\r\n for d in field.data:\r\n slf.append(' '.join(['{}'.format(i.real) for i in d]))\r\n slf.append('')\r\n return '\\n'.join(slf)",
"def make_deps_tsv(outFileName):\n\n depsData = get_deps()\n outFileObj = open(outFileName, 'w')\n for dep in depsData:\n outFileObj.write(dep[\"code\"] + \"\\t\" + dep[\"name\"] + \"\\n\")",
"def _dump_data(self, fileobj):\n if not fileobj and self._file:\n root = os.path.splitext(self._file.name)[0]\n fileobj = root + \".txt\"\n\n close_file = False\n\n if isinstance(fileobj, str):\n fileobj = open(fileobj, \"w\")\n close_file = True\n\n linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)\n\n # Process each row of the table and output one row at a time\n def format_value(val, format):\n if format[0] == \"S\":\n itemsize = int(format[1:])\n return \"{:{size}}\".format(val, size=itemsize)\n elif format in np.typecodes[\"AllInteger\"]:\n # output integer\n return f\"{val:21d}\"\n elif format in np.typecodes[\"Complex\"]:\n return f\"{val.real:21.15g}+{val.imag:.15g}j\"\n elif format in np.typecodes[\"Float\"]:\n # output floating point\n return f\"{val:#21.15g}\"\n\n for row in self.data:\n line = [] # the line for this row of the table\n\n # Process each column of the row.\n for column in self.columns:\n # format of data in a variable length array\n # where None means it is not a VLA:\n vla_format = None\n format = _convert_format(column.format)\n\n if isinstance(format, _FormatP):\n # P format means this is a variable length array so output\n # the length of the array for this row and set the format\n # for the VLA data\n line.append(\"VLA_Length=\")\n line.append(f\"{len(row[column.name]):21d}\")\n _, dtype, option = _parse_tformat(column.format)\n vla_format = FITS2NUMPY[option[0]][0]\n\n if vla_format:\n # Output the data for each element in the array\n for val in row[column.name].flat:\n line.append(format_value(val, vla_format))\n else:\n # The column data is a single element\n dtype = self.data.dtype.fields[column.name][0]\n array_format = dtype.char\n if array_format == \"V\":\n array_format = dtype.base.char\n if array_format == \"S\":\n array_format += str(dtype.itemsize)\n\n if dtype.char == \"V\":\n for value in row[column.name].flat:\n line.append(format_value(value, array_format))\n else:\n line.append(format_value(row[column.name], array_format))\n linewriter.writerow(line)\n if close_file:\n fileobj.close()",
"def dot_to_dep(self,dot_file,dep_file) :\n\t\tth_dep=[]\n\t\trew_dep=[]\n\t\tensure_dir(dep_file)\n\t\twith open(dot_file,\"r\") as dot_f :\n\t\t lines = dot_f.readlines()\n\t\t for line in lines :\n\t\t for word in self.types :\n\t\t if \"\\\\\\\"Label\"+word+\"\\\\\\\"\" in line :\n\t\t rew_dep.append(word)\n\t\t for word in self.get_all_th() :\n\t\t if \"\\\\\\\"Label\"+word+\"\\\\\\\"\" in line :\n\t\t th_dep.append(word)\n\t\tself.write_dep(th,rew,dep_file)",
"def detection_limits_to_tsv(detection_limits):\n s = \"\"\n\n if detection_limits['lookup_selected'] != {}:\n s += (\"Lookup Selected Peaks:\\n\\n\"\n \"Isotope\\tPeaknum\\tLD\\tLC\\n\")\n\n for row in sorted(detection_limits['lookup_selected']):\n s += \"{0}\\t{1}\\t{2}\\t{3}\\n\".format(*row)\n s += \"\\n\\n\\n\"\n\n if detection_limits['clicked_selected'] != {}:\n s += (\"Peaks Selected from Plot:\\n\\n\"\n \"Index\\tLD\\tLC\\n\")\n\n for key in sorted(detection_limits['clicked_selected'].keys()):\n s += \"{0}\\t{1}\\t{2}\\n\".format(key, \n detection_limits['clicked_selected'][key][0],\n detection_limits['clicked_selected'][key][1],\n )\n return s",
"def to_tsv(self, output_file):\n csvw = csv.writer(output_file, delimiter=\"\\t\", quoting=csv.QUOTE_NONE)\n for row in self.summary:\n csvw.writerow(row)",
"def convert_custom_csv_to_tsv(input, output_path, label_col, text_col, id_col=None, skip_header=True,\r\n output_format=DEFAULT_OUT_FORMAT):\r\n convert_custom_input_to_tsv(input, \",\", output_path, label_col, text_col, id_col=id_col, skip_header=skip_header,\r\n output_format=output_format)\r\n return None",
"def save_labels_tsv(labels, filename, log_dir, dataset):\n with open(os.path.join(log_dir, filename), 'w') as f:\n for label in labels.numpy():\n f.write('{}\\n'.format(dataset.LABELS[int(label)]))",
"def write_tsv(self, filename):\n\n output = StringIO()\n\n # Add the header line\n output.write('model_name\\t')\n output.write('\\t'.join([r.func_name for r in self.reporters]))\n output.write('\\n')\n\n # Transpose the results list\n results = zip(*self.results)\n\n for model_name, result_row in zip(self.names, results):\n output.write(model_name + '\\t')\n output.write('\\t'.join([r.get_text() for r in result_row]))\n output.write('\\n')\n\n with open(filename, 'w') as f:\n f.write(output.getvalue())",
"def write_tsv(obo_records_dict, output_stream, separator=\", \"):\n\n header = [\"id\", \"name\", \"is_a\", \"namespace\", \"def\"]\n records = yield_all(obo_records_dict)\n\n for record in records:\n row = []\n for tag in header:\n value = record.get(tag)\n if value is None:\n row.append(\"\")\n elif isinstance(value, list):\n row.append(separator.join(map(str, value)))\n else:\n row.append(str(value))\n output_stream.write(\"\\t\".join(row))\n output_stream.write(\"\\n\")",
"def write_tsv(file, array, header=''):\n\n _np.savetxt(file, array, fmt='%.4f', delimiter='\\t', header=header, comments='#')",
"def get_tsv(self, file):\n # Download it\n url = \"https://download.bls.gov/pub/time.series/cu/{}\".format(file)\n logger.debug(\" - {}\".format(url))\n\n tsv_path = os.path.join(self.THIS_DIR, '{}.tsv'.format(file))\n response = requests.get(url)\n with open(tsv_path, 'w') as f:\n f.write(response.text)\n\n # Convert it to csv\n reader = csv.reader(open(tsv_path, 'r'), delimiter=\"\\t\")\n csv_path = os.path.join(self.THIS_DIR, '{}.csv'.format(file))\n writer = csv.writer(open(csv_path, 'w'))\n for row in reader:\n writer.writerow([cell.strip() for cell in row])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a TSV file to a DotData dir.
|
def TSV2DotData( inFN, outFN, getio = None ):
if getio: return dict( depends_on = inFN, creates = outFN )
DotData( SVPath = inFN ).save( outFN )
|
[
"def DotData2TSV( inFN, outFN, readOpts = {}, getio = None ):\n if getio: return dict( depends_on = inFN, creates = outFN )\n DotData( Path = inFN, **readOpts ).saveToSV( outFN )",
"def load_from_tsv(tsv_file):\n # Load data from files\n all_examples = list(open(tsv_file, \"r\", encoding='utf-8').readlines())\n split_lines = [l.split('\\t') for l in all_examples]\n sentences = [s[0].strip() for s in split_lines]\n label_integers = [int(s[1].strip()) for s in split_lines]\n label_values = list(set(label_integers))\n if len(label_values) > 2 or min(label_values) != 0 or max(label_values) != 1:\n raise Exception('Labels are not in correct format {0} {1}'.format(label_values[0], label_values[1]))\n labels = np.array([[0, 1] if l == 1 else [1, 0] for l in label_integers])\n return SentenceData(sentences, labels)",
"def dot_to_dep(self,dot_file,dep_file) :\n\t\tth_dep=[]\n\t\trew_dep=[]\n\t\tensure_dir(dep_file)\n\t\twith open(dot_file,\"r\") as dot_f :\n\t\t lines = dot_f.readlines()\n\t\t for line in lines :\n\t\t for word in self.types :\n\t\t if \"\\\\\\\"Label\"+word+\"\\\\\\\"\" in line :\n\t\t rew_dep.append(word)\n\t\t for word in self.get_all_th() :\n\t\t if \"\\\\\\\"Label\"+word+\"\\\\\\\"\" in line :\n\t\t th_dep.append(word)\n\t\tself.write_dep(th,rew,dep_file)",
"def tsv2npy( inFN, outFN = None, getio = None ):\n if outFN is None: outFN = ReplaceFileExt( inFN, '.npy' )\n if getio: return dict( depends_on = inFN, creates = outFN )\n z = DotData( SVPath = inFN )\n np.save( outFN, z )",
"def generate_db(tsv_file, db_file):\n logger.info(\"Converting tsv %s to db file %s\", tsv_file, db_file)\n if os.path.exists(db_file):\n os.remove(db_file)\n db = TinyDB(db_file)\n with codecs.open(tsv_file, \"rb\", encoding=\"utf-8\") as f:\n row = f.readline().split(\"\\t\")\n headers = [SanskritObject(x).canonical() for x in row[0:8]]\n logger.info(\"Found dhatu tsv headers: {}\".format(str(headers)))\n # FIXME - Rewrite from here\n for row in f:\n entries = row.split(\"\\t\")[:len(headers)]\n entries = [SanskritObject(e).canonical() for e in entries]\n j = dict(zip(headers, entries))\n db.insert(j)\n db.close()\n logger.info(\"Saved dhatus database\")",
"def parse_table_to_tracy_file(self, filename: str) -> None:\n parse_table_to_tracy_file(self.name, self.table, filename)",
"def get_tsv(self, file):\n # Download it\n url = \"https://download.bls.gov/pub/time.series/cu/{}\".format(file)\n logger.debug(\" - {}\".format(url))\n\n tsv_path = os.path.join(self.THIS_DIR, '{}.tsv'.format(file))\n response = requests.get(url)\n with open(tsv_path, 'w') as f:\n f.write(response.text)\n\n # Convert it to csv\n reader = csv.reader(open(tsv_path, 'r'), delimiter=\"\\t\")\n csv_path = os.path.join(self.THIS_DIR, '{}.csv'.format(file))\n writer = csv.writer(open(csv_path, 'w'))\n for row in reader:\n writer.writerow([cell.strip() for cell in row])",
"def split_dotfile(dotfile):\n return os.path.dirname(dotfile), os.path.basename(dotfile)",
"def getdot(cs_filename: str):\n cs = CommunicatingSystem.parse(cs_filename)\n fp = Path(cs_filename)\n of = fp.parent\n\n cs.to_dot(str(of))\n L.info(f\"Machines saved to {str(of)}\")",
"def convert_to_df(path):\n return pd.read_csv(path, sep='\\t')",
"def to_dvh_file(self, file_path_name):\n save(self.dvh_data, file_path_name)",
"def write_dot_file(self, out_file_path):\n nx.nx_agraph.write_dot(self, out_file_path)",
"def export_vtk(self, filename):\n\n pass",
"def create_data_file(X, y, data_file):\n dump_svmlight_file(X, y, data_file, zero_based=False)",
"def getClassData(file_path=None, new_file_path=None):\n\n\tdf = pd.read_csv(file_path)\n\tprint(df.label.unique())\n\tlabel_list = df.label.unique()\n\tprint(df.shape)\n\tfor label in label_list:\n\t\tdf1 = df[df.label == label]\n\t\tdf1.drop(['label'], inplace=True, axis=1)\n\t\tfile_name = label + '.csv'\n\t\tnew_file_name = os.path.join(new_file_path, file_name)\n\n\t\tdf1.to_csv(new_file_name, index=False)",
"def _read_into_dataframe(self):\n if(self._filename.endswith('.csv') or self._filename.endswith('.tsv')):\n separator = define_separator(self._filename)\n self._data = read_csv(self._filename, sep=separator)\n else:\n raise NotImplementedError(\"File formats different from ['csv', 'tsv'] are not implemented yet.\")",
"def clean_agilent_uvvis_data(filename):\n df = pd.read_csv(filename, encoding='utf-16', skiprows=5, sep='\\t')\n\n df = df.transpose()\n\n df_t = df.iloc[[0]].values[0]/60.0\n\n return df, df_t",
"def load_teds(fileteds):\n with open(fileteds + '.ted', 'r') as auxfile:\n datateds = json.load(auxfile)\n return datateds",
"def _parse_ods_file(file):\n\n storm = pd.read_csv(file)\n\n # Combine the date and time columns into a single datetime column\n storm.insert(0, 'DateTime',\n ['{0} {1}'.format(storm['Date'][i], storm['Time'][i])\n for i in\n range(len(storm))])\n storm['DateTime'] = pd.to_datetime(storm['DateTime'],\n format='%m/%d/%y %H:%M:%S.%f')\n\n storm.set_index('DateTime', inplace=True)\n\n # Convert the values in the duration column to timedelta objects\n storm['Duration(s)'] = pd.to_timedelta(storm['Duration (ms)'],\n unit='ms')\n\n # Remove unnecessary columns\n # _ = storm.pop('Date')\n # _ = storm.pop('Time')\n # _ = storm.pop('Comments')\n # _ = storm.pop('Duration (ms)')\n try:\n _ = storm.pop('Flash')\n except KeyError:\n pass\n\n storm['Initiation Height (km)'] = pd.to_numeric(\n storm['Initiation Height (km)'], errors='coerce')\n\n storm.sort_index(inplace=True)\n\n return storm"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a TSV file to an .npy file.
|
def tsv2npy( inFN, outFN = None, getio = None ):
if outFN is None: outFN = ReplaceFileExt( inFN, '.npy' )
if getio: return dict( depends_on = inFN, creates = outFN )
z = DotData( SVPath = inFN )
np.save( outFN, z )
|
[
"def convert_to_npy(filename):\n\n if filename[-4:] == \".txt\":\n filename = filename[:-4] # Removing extension.\n\n print(f\"Converting {filename}.txt to Numpy binary...\")\n t1 = time.time()\n\n data = np.loadtxt(filename + \".txt\", unpack=True)\n np.save(filename + \".npy\", data)\n\n print(f\"Numpy binary saved to {filename}.npy in {time.time() - t1:.4f} seconds.\")",
"def load_npy(self, filename):\n self.set_data(np.load(filename))",
"def write_tsv(file, array, header=''):\n\n _np.savetxt(file, array, fmt='%.4f', delimiter='\\t', header=header, comments='#')",
"def np_to_file(list_of_nparrays, file_path, compresslevel):\n # Pickle to tmp file and overwrite to prevent writing partial files.\n tmp_file_path = file_path + '._tmp_'\n with tf.io.gfile.GFile(tmp_file_path, 'wb') as f:\n with gzip.GzipFile(fileobj=f, compresslevel=compresslevel) as gzipf:\n for x in list_of_nparrays:\n np.save(gzipf, x, allow_pickle=False)\n # Moving a file is much less error-prone than pickling large files.\n tf.io.gfile.rename(tmp_file_path, file_path, overwrite=True)",
"def _read_npy_file(file):\n data = np.load(file, allow_pickle=True)\n labels = []\n\n for sequences, label in data:\n labels.append(str(label))\n\n return labels",
"def load_from_tsv(tsv_file):\n # Load data from files\n all_examples = list(open(tsv_file, \"r\", encoding='utf-8').readlines())\n split_lines = [l.split('\\t') for l in all_examples]\n sentences = [s[0].strip() for s in split_lines]\n label_integers = [int(s[1].strip()) for s in split_lines]\n label_values = list(set(label_integers))\n if len(label_values) > 2 or min(label_values) != 0 or max(label_values) != 1:\n raise Exception('Labels are not in correct format {0} {1}'.format(label_values[0], label_values[1]))\n labels = np.array([[0, 1] if l == 1 else [1, 0] for l in label_integers])\n return SentenceData(sentences, labels)",
"def run_numpy(in_file, out_file):\n data = np.loadtxt(in_file, delimiter=',')\n data = np.array([[1, 2, 3], [1, 4, 9]])\n np.savetxt(out_file, data, delimiter=',')",
"def save_npy(self, filename):\n np.save(filename, self.data)",
"def load_data_array(fname):\n data = np.genfromtxt(fname)\n #data = np.load(fname)\n return data",
"def np_from_file(file_path, compresslevel):\n if not tf.io.gfile.exists(file_path):\n raise FileNotFoundError(file_path)\n res = []\n with tf.io.gfile.GFile(file_path, 'rb') as f:\n with gzip.GzipFile(fileobj=f, compresslevel=compresslevel) as gzipf:\n while True:\n try:\n res.append(np.load(gzipf, allow_pickle=False))\n except Exception: # pylint: disable=broad-except\n break\n return res",
"def csv_to_ndarray(fname): \n\t\ttry:\n\t\t\treturn np.genfromtxt(fname, delimiter=\",\")\t\n\t\texcept Exception, e:\n\t\t\tprint \"Error loading file %s:\" % fname\n\t\t\traise",
"def load_synapses_npy(npy_path):\n records = np.load(npy_path, allow_pickle=True)\n\n numeric_cols = ['z', 'y', 'x', 'conf', 'label', 'body', 'sv']\n numeric_cols = [*filter(lambda c: c in records.dtype.names, numeric_cols)]\n\n df = pd.DataFrame(records[numeric_cols])\n\n if 'point_id' in records.dtype.names:\n df.index = records['point_id']\n\n df['kind'] = pd.Series(records['kind'], dtype='category')\n if 'user' in records.dtype.names:\n df['user'] = pd.Series(records['user'], dtype='category')\n\n return df",
"def _file_to_array(self, file, type=int):\n\n mlist = []\n for line in open(file):\n mlist.append(line)\n return np.asarray(mlist, dtype=type)",
"def txt_to_array(pathname, shape):\n import numpy as np\n f = open(pathname, 'r')\n data = np.array(\n [float(i) for i in f.read().split()]).reshape(shape)\n f.close()\n return data",
"def to_numpy(fp):\n return to_dataframe(fp).values",
"def data2array(filepath):\n file = open(filepath, 'r')\n skip_bill = file.readline() #skip over column name\n lines = file.readlines()\n\n lst = []\n #iterate through the lines and append to list\n for line in lines:\n line = line.strip() #get rid of the \\n\n value = float(line) #get the float value\n lst.append(value)\n\n arr = np.asarray(lst)\n return arr",
"def txt_to_1D(txt_file, out_file_base):\n\t# read in par file\n\tdata = np.loadtxt(txt_file)\n\t\n\t# save the columns of data as separate .1D files\n\tfor i in xrange(data.shape[1]):\n\t\tout_file = '{0}{1}.1D'.format(out_file_base, i+1)\n\t\tnp.savetxt(out_file, data[:,i])",
"def npy2csv(filename_npy, filename_csv):\n # Read the file\n x = np.load(filename_npy)\n print(\"Columns of the input array: {0}\".format(x.dtype.names))\n # Write out the file\n # %f : float\n # %i : integer\n # for formatting options: https://docs.scipy.org/doc/numpy-1.13.0/reference\n # /generated/numpy.savetxt.html\n np.savetxt(filename_csv, x, fmt=\"%f,%i,%f\",\n header=\"box_size,count,lacunarity\")\n print(\"\\nFile was saved: {0}\".format(filename_csv))\n return True",
"def export_vtk(self, filename):\n\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Normalize data within bins, using previously computed bin means
|
def normalizeInBins( inData, valCol, binCol, binMin, binMax, binStep, binMeans, commonStd ):
binColValues = 1.0 - ( 1.0 - inData[ binCol ].values )
binCount = int( ( binMax - binMin ) / binStep )
bins = np.arange( binMin, binMax, binStep )
means = np.zeros( len( inData ) )
for i in range( binCount ):
# binBot = bins[i]
binTop = bins[i]
theIdx = ( (binTop - binColValues) < binStep ) & ( ( binTop - binColValues ) >= 0 )
means[ theIdx ] = binMeans[ i ]
result = ( inData[ valCol ].values - means ) / commonStd
if False:
# Fast version
bins = np.linspace( binMin, binMax, binCount+1 )
binsHere = np.digitize( inData[ binCol ], bins ) - 1
np.clip( binsHere, 0, binCount-1, out = binsHere );
means = np.take( binMeans, binsHere )
result = ( inData[ valCol ].values - means ) / commonStd
return result
|
[
"def normalize_bins(self):\n self.norm_bin = np.ones(self.nbins)\n for i in range(self.nbins):\n f = lambda z: self.raw_dndz_bin(z, i)\n\n norm = integrate.simps(f(np.linspace(self.z_min,self.z_max,1000)), x=np.linspace(self.z_min,self.z_max,1000))\n\n \n self.norm_bin[i] = 1.0/norm\n print(self.norm_bin[i])",
"def post_normalize(hist):\n hn = hist.Clone()\n for i in range(1, hn.nbins()+1):\n hn.SetBinContent(i, hn.GetBinContent(i) / hn.GetBinWidth(i))\n hn.SetBinError(i, hn.GetBinError(i) / hn.GetBinWidth(i))\n if hn.GetBinContent(i)<0:\n hn.SetBinContent(i, 0)\n return hn",
"def meanNormalize(im, mask):\n # get the values and sort them into mask regions, normalizing each separately\n func = lambda p, m: (p, m)\n vals, masks = getPixelValues(im, mask, func)\n d = lists2dict(masks, vals)\n # get a dictionary of region -> mean\n means = dict([(region, getMean(rvals)) for region, rvals in d.iteritems()])\n # now actually scale the values -- we want them between 0 and roughly 255 to end with\n # if a mean is low but a value is high, we can get very high numbers, so clamp those to 510\n normvals = [clamp(v*(127.5/max(means[m], 1.0)), 0.0, 510) for v, m in izip(vals, masks)]\n return normvals",
"def normalize(data):\n\n res = (data - np.mean(data, axis=0)[np.newaxis, :])\n std = np.std(data, axis=0)[np.newaxis, :]\n std = np.where(std == 0, 1, std)\n return res / std",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n return (x - mvec)/stdvec",
"def normalize(train_data):\n\t# Keep track for feature and mean, std\n\tnormalize_np = np.zeros((len(train_data), 2))\n\tfor i in range(1, len(train_data)):\n\n\t\trow_mean = np.mean(train_data[i])\n\t\trow_std = np.std(train_data[i])\n\t\ttrain_data[i] = (train_data[i]-row_mean)/row_std\n\n\t\tnormalize_np[i, 0], normalize_np[i, 1] = np.copy(row_mean), np.copy(row_std)\n\n\tnormalize_np[0, 1] = 1\n\treturn train_data, normalize_np",
"def normalize(self, hist):\n total = np.sum(hist)\n return np.zeros(hist.shape) if total == 0 else hist / total",
"def normalize(data):\n minvalue = np.min(data)\n maxvalue = np.max(data)\n valrange = maxvalue - minvalue\n vals = np.zeros(len(data))\n for i in range(len(data)):\n if valrange == 0.0:\n vals[i] = -0.5\n else:\n vals[i] = ((data[i] - minvalue) / valrange) - 0.5\n return vals",
"def binMeanCalc( binnedPoints ):\n\n means = [ [0,0,0,0] for i in range(len(binnedPoints)) ]\n\n for index, tBin in enumerate(binnedPoints):\n means[index][0] = np.mean([ i[0] for i in tBin ]) if len([ i[0] for i in tBin ]) > 0 else 0\n means[index][1] = np.mean([ i[1] for i in tBin ]) if len([ i[1] for i in tBin ]) > 0 else 0\n means[index][2] = np.mean([ i[2] for i in tBin ]) if len([ i[2] for i in tBin ]) > 0 else 0\n means[index][3] = np.mean([ i[3] for i in tBin ]) if len([ i[3] for i in tBin ]) > 0 else 0\n\n return means",
"def normalize(inp: np.ndarray, mean: float, std: float):\n inp_out = (inp - mean) / std\n return inp_out",
"def normalize(self, value: np.ndarray) -> np.ndarray:\n std = np.sqrt(self.var)\n if self.count == 0 or np.equal(std, 0).any():\n return value\n return (value - self.mean) / (std + self.eps)",
"def binav(y, bins = 1, ax = -1):\n y = np.asanyarray(y)\n if ax != -1:\n y = np.rollaxis(y, ax, start = y.ndim)\n \n if y.shape[-1] % int(bins) == 1.0 and y.ndim > 1:\n y = y.swapaxes(-1, 0)\n y = y[0:-1]\n y = y.swapaxes(0, -1)\n elif y.shape[-1] % int(bins) > 1.0 and y.ndim > 1:\n b = y.shape[-1] % int(bins)\n y = y.swapaxes(-1, 0)\n y = y[b/2:-b/2]\n y = y.swapaxes(0, -1)\n elif y.shape[-1] % int(bins) > 1.0 and y.ndim == 1:\n b = y.shape[-1] % int(bins)\n y = y[b/2:-b/2]\n elif y.shape[-1] % int(bins) == 1.0 and y.ndim == 1:\n y = y[0:-1]\n a = y.shape[-1] / int(bins)\n newshape = (y.shape[0:-1] + (a,) + (bins,))\n yn = y.reshape(newshape).mean(axis = -1).squeeze()\n if ax != -1:\n yn = np.rollaxis(yn, -1, start = ax)\n \n return yn",
"def normalize_X(self,X):\r\n X_n = X.copy()\r\n for i in range(X_n.shape[1]):\r\n X_n[:, i] = (X_n[:, i] - self.lower_bound[i]) / (self.upper_bound[i] - self.lower_bound[i])\r\n return X_n",
"def normalize(self, x):\n return self.mean_std_tracker.normalize(x)",
"def precompute_normalization(self, *bands):\n if not self.normalization_parameters:\n return\n\n for band in bands or self.bands:\n if band not in self.normalization:\n self._get_normalization_limits(band)",
"def normalize(data, base='auto'):\n # Note: reflpak supported visualization like \"counts per 10000 monitor\"\n # so that the displayed data looked roughly like the measured data, except\n # all scaled to a common monitor. This is not available in reductus.\n\n # TODO: consistent use of data.detector.counts vs. data.v\n # see in particular the detector/monitor dead time, spectral efficiency,\n # dark current, etc.\n\n from .scale import apply_norm\n data = copy(data)\n apply_norm(data, base)\n return data",
"def energyNormalize(im, mask):\n # get the values and sort them into mask regions, normalizing each separately\n func = lambda p, m: (p, m)\n vals, masks = getPixelValues(im, mask, func)\n d = lists2dict(masks, vals)\n # get a dictionary of region -> (mean, stddev)\n stats = dict([(region, (getMean(rvals), getStdDev(rvals))) for region, rvals in d.iteritems()])\n func = lambda v, m: energyNormFunc(v, *stats[m])\n normvals = [func(v, m) for v, m in izip(vals, masks)]\n return normvals",
"def normalize(signal):\n if np.std(signal) != 0:\n std = np.std(signal)\n else :\n std = 1\n y = np.array((signal-np.mean(signal))/std, dtype=np.float64)\n return y",
"def normalize_by_histogram(gray):\n hist, bins = np.histogram(gray.flatten(), 256, [0,256])\n cdf = hist.cumsum()\n cdf_normalized = cdf * hist.max()/ cdf.max()\n cdf_m = np.ma.masked_equal(cdf, 0)\n cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())\n cdf = np.ma.filled(cdf_m, 0).astype('uint8')\n gray_norm = cdf[gray] # Now we have the look-up table\n return gray_norm"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Define rules to compute mean and stddev for a given column in the given tsv files
|
def DefineRulesTo_computeMeanStd( pr, inFNs, colNum, outFN, addRuleArgs = {} ):
pr.addRule( commands = ' | '.join(( 'tail -q -n +2 ' + ' '.join( MakeSeq( inFNs ) ),
'cut -f %d' % colNum,
'grep -iv nan',
'../Operations/Ilya_Operations/tblstats' )),
depends_on = inFNs,
saveOutputTo = outFN,
**addRuleArgs )
|
[
"def find_mean_std(subparsers):\n\n subparsers.add_parser(\n \"find_mean_std\",\n help=\"Find mean and std to normalize data\",\n )",
"def standardise_stddev(dataframe):\n\n data = dataframe.copy()\n\n for col in data.columns:\n if col == data.columns[-1]:\n preprocess_values.update({\n \"stddev\": data[col].std(),\n \"mean\": data[col].mean()\n })\n\n data[col] = (data[col] - data[col].mean()) / data[col].std()\n\n return data",
"def calc_mean_std(size, path):\n\n nr_mean = 0\n nr_std = 0\n ni_mean = 0\n ni_std = 0\n\n for i in range(size):\n nr = np.load(path + 'n_real_' + str(i) + '.npy')\n ni = np.load(path + 'n_imag_' + str(i) + '.npy')\n\n nr_mean += np.mean(nr)\n nr_std += np.std(nr)\n ni_mean += np.mean(ni)\n ni_std += np.std(ni)\n\n nr_mean = nr_mean/size\n nr_std = nr_std/size\n ni_mean = ni_mean/size\n ni_std = ni_std/size \n\n return nr_mean, nr_std, ni_mean, ni_std",
"def load_mean_std(self, file):\n\t\twith h5py.File(file, 'r') as h5f:\n\t\t\tself.mean_2d = h5f['mean_2d'][:]\n\t\t\tself.std_2d = h5f['std_2d'][:]\n\t\t\tself.mean_3d = h5f['mean_3d'][:]\n\t\t\tself.std_3d = h5f['std_3d'][:]",
"def run_averages(file_input='brain_sample.csv', file_output='brain_average.csv'):\n # Open the file to analyse\n planes = np.loadtxt(file_input, dtype=int, delimiter=',')\n\n # Calculates the averages through the sagital/horizontal planes\n # and makes it as a row vector\n averages = planes.mean(axis=1)[np.newaxis, :]\n\n # write it out on my file\n np.savetxt(file_output, averages, fmt='%.1f', delimiter=',')",
"def compute_mean_std(self, verbose=False):\n sum_intensities = 0.0\n numel = 0\n\n with mt_datasets.DatasetManager(self,\n override_transform=mt_transforms.ToTensor()) as dset:\n pbar = tqdm(dset, desc=\"Mean calculation\", disable=not verbose)\n for sample in pbar:\n input_data = sample['input']\n sum_intensities += input_data.sum()\n numel += input_data.numel()\n pbar.set_postfix(mean=\"{:.2f}\".format(sum_intensities / numel),\n refresh=False)\n\n training_mean = sum_intensities / numel\n\n sum_var = 0.0\n numel = 0\n\n pbar = tqdm(dset, desc=\"Std Dev calculation\", disable=not verbose)\n for sample in pbar:\n input_data = sample['input']\n sum_var += (input_data - training_mean).pow(2).sum()\n numel += input_data.numel()\n pbar.set_postfix(std=\"{:.2f}\".format(np.sqrt(sum_var / numel)),\n refresh=False)\n\n training_std = np.sqrt(sum_var / numel)\n return training_mean.item(), training_std.item()",
"def DefineRulesTo_normalizeOneColumn( pr, inFN, colName, meanStdFN, outFN, addRuleArgs = {} ):\n\n pr.addInvokeRule( invokeFn = normalizeOneColumn,\n invokeArgs = Dict( 'inFN colName meanStdFN outFN' ),\n **addRuleArgs )",
"def standardize_df(df):\n return (df-df.mean())/df.std()",
"def calc_mean_std(self):\n\n # get ob_next sets from memory\n memory_len = len(self._memory)\n all_obs_next = []\n col_len = len(self._memory[memory_len - 1].obs_nex)\n \n for i in range(memory_len):\n all_obs_next.append(self._memory[i].obs_nex)\n \n # cacualte average and standard diviation for each features \n return (np.mean(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1), \n np.std(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1))",
"def add_arith_mean_cols(assay_results_df, input_dir):\n for metab in assay_results_df.index:\n resistant = assay_results_df.ix[metab, :6]\n sensitive = assay_results_df.ix[metab, 6:12]\n overall = assay_results_df.ix[metab, :12]\n\n for count, group in enumerate([resistant, sensitive, overall]):\n arith_mean = np.mean(group)\n arith_var = np.var(group)\n if count == 0:\n assay_results_df.ix[metab, 'resistant_amean'] = arith_mean\n assay_results_df.ix[metab, 'resistant_avar'] = arith_var\n if count == 1:\n assay_results_df.ix[metab, 'sensitive_amean'] = arith_mean\n assay_results_df.ix[metab, 'sensitive_avar'] = arith_var\n if count == 2:\n assay_results_df.ix[metab, 'overall_amean'] = arith_mean\n assay_results_df.ix[metab, 'overall_avar'] = arith_var\n\n assay_results_df.to_csv(input_dir + 'assay_results_extended.tsv',\n sep='\\t',\n na_rep='NaN')\n\n return assay_results_df",
"def analyse(self):\n analysers = {'String': StringAnalyser, 'Integer': NumericalAnalyser,\n 'Float': NumericalAnalyser, 'Enum': EnumAnalyser}\n for column in self.columns:\n column.define_most_common()\n if not column.empty:\n column.define_type()\n column.define_outliers()\n if column.type in analysers:\n column.analysis = analysers[column.type](column.values)",
"def calc_mean_std_dev(january, febuary, march, april, may, june, july, august, september, october, november, december):\n january_mean = (sum(january)/len(january))\n febuary_mean = (sum(febuary)/len(febuary))\n march_mean = (sum(march)/len(march))\n april_mean = (sum(april)/len(april))\n may_mean = (sum(may)/len(may))\n june_mean = (sum(june)/len(june))\n july_mean = (sum(july)/len(july))\n august_mean = (sum(august)/len(august))\n september_mean = (sum(september)/len(september))\n october_mean = (sum(october)/len(october))\n november_mean = (sum(november)/len(november))\n december_mean = (sum(december)/len(december))\n january_std_dev = np.std(january)\n febuary_std_dev = np.std(febuary)\n march_std_dev = np.std(march)\n april_std_dev = np.std(april)\n may_std_dev = np.std(may)\n june_std_dev = np.std(june)\n july_std_dev = np.std(july)\n august_std_dev = np.std(august)\n september_std_dev = np.std(september)\n october_std_dev = np.std(october)\n november_std_dev = np.std(november)\n december_std_dev = np.std(december)\n# print(january_mean, febuary_mean, march_mean, april_mean, may_mean)\n means = [january_mean, febuary_mean, march_mean, april_mean, may_mean, june_mean, july_mean, august_mean, september_mean, october_mean, november_mean, december_mean]\n std_dev = [january_std_dev, febuary_std_dev, march_std_dev, april_std_dev, may_std_dev, june_std_dev, july_std_dev, august_std_dev, september_std_dev, october_std_dev, november_std_dev, december_std_dev]\n# d= dict(monthly_values)\n# print(type(monthly_values), monthly_values)\n \n return means, std_dev",
"def compute_mean_stdev_column(self, all_files_dictionary_centralized=None):\n all_files = self.dictionary_check(all_files_dictionary_centralized)\n self.print_memory_usage()\n\n # use keys of openpose here\n all_mean_stdev = {} # holds means and stdev of each directory, one json file per directory\n once = 1\n all_files_xy = {'all': {}}\n self.print_memory_usage()\n print(\"load data into dictionary\")\n\n for subdir in all_files.keys():\n # load files from one folder into dictionary\n for file in all_files[subdir]:\n temp_df = all_files[subdir][file]\n if once == 1:\n for k in self.keys:\n all_files_xy['all'][k] = {'x': [[] for x in range(len(temp_df['people'][0][k][0::3]))],\n 'y': [[] for x in range(len(temp_df['people'][0][k][1::3]))]}\n\n once = 0\n\n for k in self.keys:\n for i in range(len(temp_df['people'][0][k][0::3])):\n all_files_xy['all'][k]['x'][i].append(temp_df['people'][0][k][0::3][i])\n all_files_xy['all'][k]['y'][i].append(temp_df['people'][0][k][1::3][i])\n\n self.print_memory_usage()\n print(\"Files read, computing mean and stdev\")\n\n for k in self.keys:\n mean_stdev_x = []\n mean_stdev_y = []\n self.print_memory_usage()\n for list in np.array(all_files_xy['all'][k]['x']):\n\n warnings.simplefilter(action='ignore', category=FutureWarning)\n if 'Null' in list:\n mean_stdev_x.append([\"Null\", \"Null\"])\n else:\n list = [float(item) for item in list]\n mean_stdev_x.append([np.mean(list), statistics.pstdev(list)])\n\n for list in np.array(all_files_xy['all'][k]['y']):\n if 'Null' in list:\n mean_stdev_y.append([\"Null\", \"Null\"])\n else:\n list = [float(item) for item in list]\n mean_stdev_y.append([np.mean(list), statistics.pstdev(list)])\n\n all_mean_stdev[k] = [np.array(mean_stdev_x).T.tolist(), np.array(mean_stdev_y).T.tolist()]\n\n # write the computed means and std_dev into json file\n f = open(self.path_to_target_dir / \"all_mean_stdev.json\", \"w\")\n f.write(json.dumps(all_mean_stdev))\n f.close()\n\n return all_mean_stdev",
"def calculate_mean_std(dataset):\n if dataset == \"CIFAR10\":\n train_transform = T.ToTensor()\n train_set = datasets.CIFAR10(root=\"./data\", train=True, download=True, transform=train_transform)\n mean = train_set.data.mean(axis=(0, 1, 2)) / 255\n std = train_set.data.std(axis=(0, 1, 2)) / 255\n return mean, std",
"def add_mean_and_std(df):\r\n mean_series = df.mean(axis=0)\r\n std_series = df.std(axis=0)\r\n ret = df.copy()\r\n ret.loc[0] = mean_series\r\n ret.loc[-1] = std_series\r\n return ret.sort_index()",
"def stdDevFromStats():\n\n volList = read()\n\n stdDevValue = stdev(volList) # Calling the standard deviation function from module\n print(\"Standard Deviation value from stat module :\" + str(stdDevValue)+ \"\\n\")",
"def setSD(self, col_name):\n self.SD = self.table.groupby(\"filtercode\")[col_name].std()",
"def getHeightStddev(self, recogniser_csv_file):\n df = pandas.read_csv(recogniser_csv_file, dtype={\"I\": object}, usecols =[\"I\", \"H\"], converters={\"H\": ast.literal_eval})\n group_v = df.loc[:,['I','H']].groupby('I')\n std_dev = [0.0 for i in range(1, len(self.i_labels))]\n std_dev_est = [0.0 for i in range(1, len(self.i_labels))]\n for counter in range(1,len(self.i_labels)):\n true_height = float(self.heights[counter])\n gr = group_v.get_group(self.i_labels[counter])\n avg_val = 0\n for g_counter in range(0, len(gr)):\n l_val = gr.iloc[g_counter,1]\n est = l_val[0]\n std_dev[counter-1] += math.pow(est - true_height, 2)\n avg_val += est\n \n if len(gr) > 0:\n std_dev[counter-1] = math.sqrt(std_dev[counter-1]/len(gr)) \n avg_val /= len(gr)\n \n for g_counter in range(0, len(gr)):\n l_val = gr.iloc[g_counter,1]\n est = l_val[0]\n std_dev_est[counter-1] += math.pow(est - avg_val, 2)\n \n if len(gr) > 1:\n std_dev_est[counter-1] = math.sqrt(std_dev_est[counter-1]/(len(gr)-1))\n \n return std_dev, std_dev_est",
"def getTimeStddev(self, recogniser_csv_file, recog_folder):\n df = pandas.read_csv(recogniser_csv_file, dtype={\"I\": object}, usecols =[\"I\", \"T\"], converters={\"T\": ast.literal_eval})\n group_v = df.loc[:,['I','T']].groupby('I')\n std_dev_est = [0.0 for i in range(1, len(self.i_labels))]\n values = []\n for counter in range(1,len(self.i_labels)):\n t_values = []\n gr = group_v.get_group(self.i_labels[counter])\n avg_val = 0\n for g_counter in range(0, len(gr)):\n l_val = gr.iloc[g_counter,1]\n est = self.getTimeSlot(l_val)\n t_values.append(est)\n avg_val += est\n \n values.append(t_values)\n if len(gr) > 0:\n avg_val /= len(gr)\n \n for g_counter in range(0, len(gr)):\n l_val = gr.iloc[g_counter,1]\n est = self.getTimeSlot(l_val)\n std_dev_est[counter-1] += math.pow(est - avg_val, 2)\n \n if len(gr) > 1:\n std_dev_est[counter-1] = math.sqrt(std_dev_est[counter-1]/(len(gr)-1))\n \n times_curves = []\n for v in values:\n time_curve = []\n for v_counter in range(0, len(v)):\n t_curve = self.getCurve(mean = v[v_counter], stddev = self.stddev_time, min_value = self.time_min, max_value = self.time_max, weight = 1.0)\n if v_counter == 0:\n time_curve = t_curve[:]\n else:\n time_curve = [x + y for x, y in zip(time_curve, t_curve)]\n time_curve = self.normaliseSum(time_curve)\n times_curves.append(time_curve)\n \n with open(recog_folder + \"time_stddev.csv\", 'wb') as outcsv:\n writer = csv.writer(outcsv)\n for row_counter in range(0, len(times_curves[0])):\n row = [row_counter]\n for time_curve_counter in range(0, len(times_curves)):\n row.append(times_curves[time_curve_counter][row_counter])\n writer.writerow(row)\n \n return std_dev_est"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Define rules to normalize one column
|
def DefineRulesTo_normalizeOneColumn( pr, inFN, colName, meanStdFN, outFN, addRuleArgs = {} ):
pr.addInvokeRule( invokeFn = normalizeOneColumn,
invokeArgs = Dict( 'inFN colName meanStdFN outFN' ),
**addRuleArgs )
|
[
"def normalize_table(self):\n pass",
"def _standardize_column_values(dataframe):\n\n # TODO Use None instead of \"-\"; but may affect downstream pipelines that use \"-\" already\n if \"structure.alternate_model\" in dataframe.columns:\n dataframe[\"structure.alternate_model\"].replace(\"\", \"-\", inplace=True)\n if \"ligand.expo_id\" in dataframe.columns:\n dataframe[\"ligand.expo_id\"].replace(0, \"-\", inplace=True)\n if \"ligand_allosteric.expo_id\" in dataframe.columns:\n dataframe[\"ligand_allosteric.expo_id\"].replace(0, \"-\", inplace=True)\n if \"structure.resolution\" in dataframe.columns:\n dataframe[\"structure.resolution\"].replace(0, np.nan, inplace=True)\n\n # In case of drugs\n if \"drug.brand_name\" in dataframe.columns:\n dataframe[\"drug.brand_name\"] = dataframe[\"drug.brand_name\"].apply(\n lambda x: x.split(\";\") if x != \"\" else []\n )\n if \"drug.synonyms\" in dataframe.columns:\n dataframe[\"drug.synonyms\"] = dataframe[\"drug.synonyms\"].apply(\n lambda x: x.split(\"\\t\") if x != \"\" else []\n )\n\n return dataframe",
"def normalization(column):\n max_val = max(column)\n min_val = min(column)\n norm_col = column.copy()\n for i in range(len(column)):\n norm_col[i] = round((column[i] - min_val) / (max_val - min_val), 4)\n # print(round(norm_col[i], 4))\n # print('\\n\\n')\n return norm_col",
"def normalize(cls, obj):\n if isinstance(obj, str):\n obj = {'type': obj}\n x = cls.metaschema()\n validators = {u'$ref': _validate_schema}\n normalizers = {tuple(): [_normalize_schema]}\n validator_class = copy.deepcopy(cls.validator())\n obj = validator_class(x).normalize(obj, no_defaults=True,\n normalizers=normalizers,\n validators=validators)\n return obj",
"def normalization(table, categories):\n # Loop over the categories to normalize them.\n for column in categories:\n # Find the extreme values of the column.\n min_val = table[column].min()\n max_val = table[column].max()\n\n # Remap the column values to be between 0 and 1.\n table[column] = (table[column] - min_val) / (max_val - min_val)\n return table",
"def normalize(self):\n list(map(lambda normalization: normalization[0](self.entry,normalization[1]), self.normalizations))",
"def fix(self):\n self._row[self._current_column]['value'] = self.validationRules[self._current_column]['fix_value']",
"def normalize(df):\n result = df.copy()\n for feature_name in df.columns:\n max_value = df[feature_name].max()\n min_value = df[feature_name].min()\n result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)\n return result",
"def denormalizeRatings(data):\n\n normRating = data[data.columns[1]]\n normRating = normRating.apply(\n lambda ratings: ratings * (maxRating - minRating) + minRating\n )\n\n return data",
"def normalize_df(df):\n return (df-df.min())/(df.max()-df.min())",
"def NormalizeColumn(inLst, NormalizeTo = 1.0, startRow = 0, colIndex = None):\r\n indb = copy.deepcopy(inLst)\r\n functSpace = {}\r\n if colIndex ==None:\r\n valFunct = lambda x:float(x)\r\n def access(indb, index, val):\r\n indb[index] = val\r\n else:\r\n cmd = 'valFunct = lambda x: float(x[' + str(colIndex) + '])'\r\n exec(cmd) in functSpace\r\n def access(indb, index, val):\r\n indb[index][colIndex] = val\r\n if 'str' in str(type(NormalizeTo)):\r\n if NormalizeTo.lower() =='sum': NormalizeTo = SumArr(inLst[startRow:])\r\n if NormalizeTo =='avg': NormalizeTo = AvgArr(inLst[startRow:])\r\n if NormalizeTo =='median': NormalizeTo = MedianArr(inLst[startRow:])\r\n allvals = []\r\n for i in range(startRow, len(indb), 1):\r\n val = valFunct(indb[i])\r\n allvals.append(val)\r\n sumOfLine = sum(allvals)\r\n for i in range(startRow, len(indb), 1):\r\n if sumOfLine == 0:\r\n normalizedVal = 0.0\r\n else:\r\n normalizedVal = float(valFunct(indb[i]))/float(NormalizeTo)\r\n access(indb, i, normalizedVal)\r\n return indb",
"def test_normalize_with_null_columns():\n X = np.array([[1, 2, 0], [0, 1, 0], [1, 1, 0]])\n assert_raises(ValueError, normalize_matrix_on_axis, X)",
"def test_check_single_column():\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n\n @pa.check(\"a\")\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n # pylint:disable=no-self-argument\n assert cls is Schema\n return series < 100\n\n df = pd.DataFrame({\"a\": [101]})\n schema = Schema.to_schema()\n err_msg = r\"Column\\s*a\\s*int_column_lt_100\\s*\\[101\\]\\s*1\"\n with pytest.raises(pa.errors.SchemaErrors, match=err_msg):\n schema.validate(df, lazy=True)",
"def main_sanitize_data(self):\n # Sanitize column names\n self.data.columns = self.data.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')\n\n # Mandatory Sanitization\n self.data = self.data.apply(self.mandatory_sanitization)\n\n # Specific Column Sanitization\n self.data['business'] = self.data['business'].loc[self.data['business'].notnull()].apply(self.sanitize_business_name)\n self.data['title'] = self.data['title'].str.capitalize().str.replace(\".\", \"\")\n self.data['first_name'] = self.data['first_name'].str.capitalize()\n self.data['last_name'] = self.data['last_name'].str.capitalize()\n self.data['date_of_birth'] = self.data['date_of_birth'].loc[self.data['date_of_birth'].notnull()].apply(self.sanitize_date)\n self.data['home_number'] = self.data['home_number'].loc[self.data['home_number'].notnull()].apply(self.sanitize_landline_numbers)\n self.data['fax_number'] = self.data['fax_number'].loc[self.data['fax_number'].notnull()].apply(self.sanitize_landline_numbers)\n self.data['mobile_number'] = self.data['mobile_number'].loc[self.data['mobile_number'].notnull()].apply(self.sanitize_mobile_numbers)\n self.data['notes'] = self.data['notes'].loc[self.data['notes'].notnull()].apply(self.sanitize_notes)\n\n # Convert nan to None\n self.data = self.data.where(pd.notnull(self.data), None)\n \n print(\"Data Sanitization Successful\")\n return True",
"def normalize(df, feat_to_norm, my_scaler = None):\n if not my_scaler:\n my_scaler = StandardScaler()\n my_scaler.fit(df[feat_to_norm])\n feat_norm = my_scaler.transform(df[feat_to_norm])\n norm_col = []\n for i in range(len(feat_to_norm)):\n norm = feat_to_norm[i] + \"_norm\"\n df.loc[:, norm] = feat_norm[:,i].copy()\n norm_col.append(norm)\n return my_scaler, norm_col",
"def clean(self):\n for column in self.columns:\n column.change_misc_values()\n column.drop_greater_than()",
"def preprocess_column_and_value(self):\n col, val = self.column, self.value\n\n # Case 1. Both column and value are arrays\n if self.is_column_array() and self.is_value_array():\n # Cast the value to ARRAY[] with the same type that the column has\n # Only in this case Postgres will be able to handles them both\n val = cast(pg.array(val), pg.ARRAY(col.type.item_type))\n\n # Case 2. JSON column\n if self.is_column_json():\n # This is the type to which JSON column is coerced: same as `value`\n # Doc: \"Suggest a type for a `coerced` Python value in an expression.\"\n coerce_type = col.type.coerce_compared_value('=', val) # HACKY: use sqlalchemy type coercion\n # Now, replace the `col` used in operations with this new coerced expression\n col = cast(col, coerce_type)\n\n # Done\n self.column_expression = col\n self.value_expression = val",
"def _normalize_input_data(self, data, normalised_field_name='ADDRESS_norm'):\n # make a copy of the actual address field and run the parsing against it\n data[normalised_field_name] = data['ADDRESS'].copy()\n\n # remove white spaces from the end and beginning if present\n data[normalised_field_name] = data[normalised_field_name].str.strip()\n\n # remove commas if present as not useful for matching\n data[normalised_field_name] = data[normalised_field_name].str.replace(', ', ' ')\n data[normalised_field_name] = data[normalised_field_name].str.replace(',', ' ')\n\n # remove backslash if present and replace with space\n data[normalised_field_name] = data[normalised_field_name].str.replace('\\\\', ' ')\n\n # remove spaces around hyphens as this causes ranges to be interpreted incorrectly\n # e.g. FLAT 15 191 - 193 NEWPORT ROAD CARDIFF CF24 1AJ is parsed incorrectly if there\n # is space around the hyphen\n data[normalised_field_name] = \\\n data[normalised_field_name].str.replace(r'(\\d+)(\\s*-\\s*)(\\d+)', r'\\1-\\3', case=False)\n\n # some addresses have number TO number, while this should be with hyphen, replace TO with - in those cases\n # note: using \\1 for group 1 and \\3 for group 3 as I couldn't make non-capturing groups work\n data[normalised_field_name] = \\\n data[normalised_field_name].str.replace(r'(\\d+)(\\s*TO\\s*)(\\d+)', r'\\1-\\3', case=False)\n\n # some addresses have number/number rather than - as the range separator\n data[normalised_field_name] = \\\n data[normalised_field_name].str.replace(r'(\\d+)(\\s*/\\s*)(\\d+)', r'\\1-\\3', case=False)\n\n # some addresses have number+suffix - number+suffix, remove the potential whitespaces around the hyphen\n data[normalised_field_name] = \\\n data[normalised_field_name].str.replace(r'(\\d+[a-z])(\\s*-\\s*)(\\d+[a-z])', r'\\1-\\3', case=False)\n\n # synonyms to expand - read from a file with format (from, to)\n synonyms = pd.read_csv(os.path.join(self.currentDirectory, '../../data/') + 'synonyms.csv').values\n\n # expand common synonyms to help with parsing\n if self.settings['expandSynonyms']:\n self.log.info('Expanding synonyms as a part of normalisation...')\n for fro, to in synonyms:\n data['ADDRESS_norm'] = data['ADDRESS_norm'].str.replace(fro, to)\n\n # parsing gets really confused if region or county is in the line - get known counties from a file\n counties = pd.read_csv(os.path.join(self.currentDirectory, '../../data/') + 'counties.csv')['county']\n\n # use this for the counties so that e.g. ESSEX ROAD does not become just ROAD...\n # todo: the regex is getting ridiculous, maybe do other way around i.e. country must be followed by postcode or\n # be the last component.\n addRegex = r'(?:\\s|$)(?!ROAD|LANE|STREET|CLOSE|DRIVE|AVENUE|SQUARE|COURT|PARK|CRESCENT|WAY|WALK|HEOL|FFORDD|HILL|GARDENS|GATE|GROVE|HOUSE|VIEW|BUILDING|VILLAS|LODGE|PLACE|ROW|WHARF|RISE|TERRACE|CROSS|ENTERPRISE|HATCH|&)'\n\n # remove county from address but add a column for it\n data['County'] = None\n for county in counties:\n msk = data[normalised_field_name].str.contains(county + addRegex, regex=True, na=False)\n data.loc[msk, 'County'] = county\n data[normalised_field_name] = data[normalised_field_name].str.replace(county + addRegex, '', case=False)\n\n return data",
"def _check_schema(self, column_vals):\n \n model = self.medium\n # This will be only localy defined fields (excluding many_to_many)\n own_field_names = set([f.name for f in model._meta.fields])\n # All locally defined fields which are required and not auto fields (id)\n required_field_names = set([f.name for f in model._meta.fields\n if field_is_required(f)])\n m2m_field_names = set([f.name for f in model._meta.many_to_many])\n \n \n processed_column_values = []\n for key, val in column_vals:\n # Valid field?\n if not key in own_field_names.union(m2m_field_names):\n msg = \"Model %r doesn't have field named %s.\" % \\\n (pretty_model_name(model), key)\n \n raise ValueError(msg + \\\n self._annotate_invalid_schema_exception(model, key))\n \n # Keep a track of required fields\n try:\n required_field_names.remove(key)\n except KeyError:\n pass\n \n # If the field is a relation check the related type\n field = model._meta.get_field(key)\n from django.db.models.fields.related import ManyToManyField\n if isinstance(field, ManyToManyField):\n try:\n len(val)\n except TypeError:\n val = [val]\n rel_types = [isinstance(v, field.rel.to) for v in val]\n if not field.null and False in rel_types:\n raise ValueError(\"Values for field %s must be of type %s, \"\n \"got %s\" % \\\n (key,\n pretty_model_name(field.rel.to),\n val))\n processed_column_values.append((key, val))\n if len(required_field_names):\n raise ValueError(\"Requred fields %s not found\" % required_field_names)\n return m2m_field_names, processed_column_values"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Selects the browser to user according the the argument passed to the behave runner if no one is passed it will run the remote webdriver by default
|
def select_browser(context):
browser = context.config.userdata.get('browser')
if not browser:
browser = 'remote'
if browser.lower() == 'remote':
capabilities = {
"browserName": "chrome",
"browserVersion": "88.0",
"selenoid:options": {
"enableVNC": True,
"enableVideo": False,
"screenResolution": "1280x1024x24"
}
}
return webdriver.Remote(command_executor=getattr(config, "REMOTE_URL"),
desired_capabilities=capabilities)
elif browser.lower() == 'chrome':
return webdriver.Chrome(ChromeDriverManager().install())
elif browser.lower() == 'headless_chrome':
options = webdriver.ChromeOptions()
options.add_argument('--headless')
return webdriver.Chrome(ChromeDriverManager().install(), options=options)
elif browser.lower() in ('ff', 'firefox'):
return webdriver.Firefox(GeckoDriverManager().install())
else:
raise Exception("The browser type '{}' is not supported".format(context))
|
[
"def launch_browser(self):\n self.driver = webdriver.Chrome()",
"def driver(request):\n print(\"\\nstart browser for test..\")\n browser_name = request.config.getoption(\"browser_name\")\n if browser_name == \"chrome\":\n options = Options()\n options.add_argument('--no-sandbox')\n # options.add_argument(\"--headless\")\n options.add_argument(\"window-size=1920,1080\")\n options.add_argument(\"--incognito\")\n driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)\n driver.get(url=config.CROP_MONITORING_URL)\n time.sleep(2)\n\n elif browser_name == \"firefox\":\n options = FirefoxOptions()\n options.add_argument('--no-sandbox')\n # options.add_argument(\"--headless\")\n options.add_argument(\"window-size=1920,1080\")\n options.add_argument(\"--incognito\")\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=options)\n driver.get(url=config.CROP_MONITORING_URL)\n time.sleep(2)\n\n else:\n print(f\"Browser <browser_name> is still not implemented\")\n yield driver\n print(\"\\nquit browser..\")\n driver.quit()",
"def run_selenium():\n froid.run_selenium()",
"def openBrowser(windowed = True):\n return webdriver.Firefox() if windowed else webdriver.PhantomJS(constants.PHANTOMPATH)",
"def get_browser(browser_name, capabilities=None, **options):\n\n if browser_name == \"chrome\":\n return webdriver.Chrome(desired_capabilities=capabilities, **options)\n if browser_name == \"edge\":\n return webdriver.Edge(capabilities=capabilities, **options)\n if browser_name in [\"ff\", \"firefox\"]:\n return webdriver.Firefox(capabilities=capabilities, **options)\n if browser_name in [\"ie\", \"internet_explorer\"]:\n return webdriver.Ie(capabilities=capabilities, **options)\n if browser_name == \"phantomjs\":\n return webdriver.PhantomJS(desired_capabilities=capabilities, **options)\n if browser_name == \"remote\":\n return webdriver.Remote(desired_capabilities=capabilities, **options)\n if browser_name == \"safari\":\n return webdriver.Safari(desired_capabilities=capabilities, **options)\n\n raise ValueError(\"unsupported browser: {}\".format(repr(browser_name)))",
"def set_browser(testid=\"none\",testdesc=\"none\"):\n # if using sauce - create the correctly formated json string\n # - assume windows 2003\n # otherwise use *browser name\n if controller.testsauce==True:\n if controller.testbrowser == \"googlechrome\":\n sbrowser = {\\\n \"username\": sauce_auth.sauce_user,\\\n \"access-key\": sauce_auth.sauce_key,\\\n \"os\": \"Windows 2003\",\\\n \"browser\": \"googlechrome\",\\\n \"browser-version\": \"\", \\\n \"max-duration\": 480, \\\n \"idle-timeout\": 120, \\\n \"job-name\": testid +': '+ testdesc, \\\n \"public\": \"true\", \\\n }\n \n elif controller.testbrowser == \"iexplore\":\n sbrowser = {\\\n \"username\": sauce_auth.sauce_user,\\\n \"access-key\": sauce_auth.sauce_key,\\\n \"os\": \"Windows 2003\",\\\n \"browser\": \"iexplore\",\\\n \"browser-version\": \"8\" , \\\n \"max-duration\": 480, \\\n \"idle-timeout\": 120, \\\n \"public\": \"true\", \\\n \"job-name\": testid +': '+ testdesc \\\n }\n\n\n elif controller.testbrowser == \"iexplore9\":\n sbrowser = {\\\n \"username\": sauce_auth.sauce_user,\\\n \"access-key\": sauce_auth.sauce_key,\\\n \"os\": \"Windows 2008\",\\\n \"browser\": \"iexplore\",\\\n \"browser-version\": \"9\" , \\\n \"max-duration\": 480, \\\n \"idle-timeout\": 120, \\\n \"public\": \"true\", \\\n \"job-name\": testid +': '+ testdesc \\\n }\n\n\n \n elif controller.testbrowser == \"opera\":\n sbrowser = {\\\n \"username\": sauce_auth.sauce_user,\\\n \"access-key\": sauce_auth.sauce_key,\\\n \"os\": \"Windows 2008\",\\\n \"browser\": \"opera\",\\\n \"browser-version\": \"10\" , \\\n \"max-duration\": 480, \\\n \"idle-timeout\": 120, \\\n \"public\": \"true\", \\\n \"job-name\": testid +': '+ testdesc \\\n }\n elif controller.testbrowser == \"safari\":\n sbrowser = {\\\n \"username\": sauce_auth.sauce_user,\\\n \"access-key\": sauce_auth.sauce_key,\\\n \"os\": \"Windows 2003\",\\\n \"browser\": \"safariproxy\",\\\n \"browser-version\": \"5\", \\\n \"max-duration\": 480, \\\n \"idle-timeout\": 120, \\\n \"public\": \"true\", \\\n \"job-name\": testid +': '+ testdesc \\\n }\n\n elif controller.testbrowser == \"lin_ff\":\n sbrowser = {\\\n \"username\": sauce_auth.sauce_user,\\\n \"access-key\": sauce_auth.sauce_key,\\\n \"os\": \"Linux\",\\\n \"browser\": \"firefox\",\\\n \"browser-version\": \"3.6\", \\\n \"max-duration\": 600, \\\n \"idle-timeout\": 120, \\\n \"public\": \"true\", \\\n \"job-name\": testid +': '+ testdesc \\\n }\n elif controller.testbrowser == \"firefox3\":\n sbrowser= { \\\n \"username\": sauce_auth.sauce_user,\n \"access-key\": sauce_auth.sauce_key,\\\n \"os\": \"Windows 2003\",\\\n \"browser\": \"firefox\",\\\n \"browser-version\": \"3\", \\\n \"max-duration\": 480, \\\n \"idle-timeout\": 120, \\\n \"public\": \"true\", \\\n \"job-name\": testid +': '+ testdesc \\\n }\n \n else:\n sbrowser= { \\\n \"username\": sauce_auth.sauce_user,\n \"access-key\": sauce_auth.sauce_key,\\\n \"os\": \"Windows 2003\",\\\n \"browser\": \"firefox\",\\\n \"browser-version\": \"5\", \\\n \"max-duration\": 480, \\\n \"idle-timeout\": 120, \\\n \"public\": \"true\", \\\n \"job-name\": testid +': '+ testdesc \\\n }\n\n browser = json.dumps(sbrowser, indent=4)\n else:\n #use default browser\n if controller.testbrowser == \"iexplore9\":\n browser = \"*iexplore\"\n elif controller.testbrowser == \"firefox4\":\n browser = \"*firefox\"\n \n else:\n browser = \"*\"+controller.testbrowser\n\n return browser",
"def run_locally(self):\n print('Running for browser: ' + self.driver_name)\n return self.trigger_pytest(self.driver_name)",
"def __new__(cls, browser_name, host=None, port=None,\n desired_capabilities=None, **kwargs):\n if host and port:\n return cls.__get_remote_driver(\n browser_name, host, port, desired_capabilities, **kwargs)\n return cls.__get_local_driver(browser_name, **kwargs)",
"def set_browser(self, browser):\n self.browser = browser",
"def __init__(self, browser='chrome'):\n self.logger = Logger('blueRose.log', level='debug').logger\n if browser == \"chrome\":\n driver = webdriver.Chrome()\n elif browser == \"firefox\":\n driver = webdriver.Firefox()\n elif browser == \"ie\":\n driver = webdriver.Ie()\n try:\n self.driver = driver\n except Exception:\n raise NameError(\"Not found this browser,You can enter 'firefox', 'chrome', 'ie'.\")",
"def get_browser(self):\n if self.browser is None:\n if self.driver == \"Firefox\":\n self.browser = webdriver.Firefox()\n else:\n self.browser = webdriver.Chrome(chrome_options=self.get_options())\n return self.browser",
"def set_selenium_remote_session(self, selenium_url=''):\n if self.aborting:\n return self\n\n if self.use_firefox:\n self.browser = webdriver.Remote(\n command_executor=selenium_url,\n desired_capabilities=DesiredCapabilities.FIREFOX)\n else:\n self.browser = webdriver.Remote(\n command_executor=selenium_url,\n desired_capabilities=DesiredCapabilities.CHROME)\n\n self.logger.info('Session started - %s'\n % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n return self",
"def switch_start_chrome():",
"def get_driver(driver=webdriver, addr='http://localhost:4723/wd/hub', capabilities=None):\n if not capabilities:\n capabilities = {}\n try:\n return driver.Remote(addr, capabilities)\n except Exception as e:\n print(\"\\n\\nError starting Appium session: '{}'\\n\\n\".format(e))\n return None",
"def runDriver(driver_path):\n print(\"Running driver...\") \n driver = webdriver.Chrome(driver_path)\n return driver",
"def load_driver(gene_url):\n global driver\n if \"driver\" in dir(): \n driver.quit()\n driver = webdriver.Chrome()\n driver.get(gene_url);\n driver.find_element_by_partial_link_text(\"Display Options\").click()\n seq = Select(driver.find_element_by_id(\"seqsPerPage\"))\n seq.select_by_value(\"2000\")\n driver.find_element_by_id(\"displayCmd\").submit()",
"def main(path, chromedriver, geckodriver):\n if not geckodriver or (geckodriver and chromedriver):\n click.echo(\"Installing the latest version of chromedriver.\")\n webdrivers.install_latest_chromedriver(path)\n if not chromedriver or (chromedriver and geckodriver):\n click.echo(\"Installing the latest version of geckodriver.\")\n webdrivers.install_latest_geckodriver(path)",
"def launchBrowser(config, packageName):\n log.info(u\"Browser path: \" + config.browserPath)\n url = u'http://127.0.0.1:%d/%s' % (config.port, quote(packageName))\n log.info(u\"url \"+url)\n if sys.platform[:3] == u\"win\":\n profile = \"win-profile\"\n else:\n profile = \"linux-profile\"\n if (config.configDir/profile).exists():\n (config.configDir/profile).rmtree()\n log.info(\"Creating FireFox profile copied from\"+\n config.webDir/profile+\" to \"+\n config.configDir/profile)\n if not (config.configDir/profile).exists():\n (config.configDir/profile).mkdir()\n for filename in (config.webDir/profile).files():\n filename.copy(config.configDir/profile)\n if not (config.configDir/profile/'Cache').exists():\n (config.configDir/profile/'Cache').mkdir()\n for filename in (config.webDir/profile/'Cache').files():\n filename.copy(config.configDir/profile/'Cache')\n log.info(\"setupMoz configDir \"+config.configDir+ \" profile \"+profile)\n log.info(u\"profile = \" + config.configDir/profile)\n if sys.platform[:3] == u\"win\":\n try:\n os.environ[\"MOZ_NO_REMOTE\"] = \"1\"\n os.spawnl(os.P_DETACH, \n config.browserPath,\n config.browserPath.basename(),\n '-profile', \n '\"' + config.configDir/profile + '\"', \n url)\n reactor.callLater(10, tryAgain, config, profile, url)\n except OSError:\n print u\"Cannot launch Firefox, please manually run Firefox\"\n print u\"and go to\", url \n else:\n launchString = 'LOGNAME=eXe7913 '\n launchString += config.browserPath\n launchString += ' -profile \"' + config.configDir/profile + '\" '\n launchString += url\n launchString += \"&\"\n log.info(u'Launching firefox with: ' + launchString)\n os.system(launchString)",
"def __init__(self):\n options = Options()\n options.add_argument('-headless')\n self.path = \"C:\\\\Users\\\\weimaoquan\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Application\\\\chromedriver.exe\"\n self.browser = webdriver.Chrome(executable_path=self.path, options=options)\n self.browser.implicitly_wait(3)\n self.login()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test to see if the json blob is null, if so, there are no games on specified date, exit.
|
def test_for_games():
if games_json:
pass
else:
print("There are no NBA games on this day. Life is meaningless.")
_exit(1)
|
[
"def test_get_daily_data_req_empty(self):\n output = self.main.get_daily_data(self.request_empty)\n self.assertIsInstance(\n json.loads(output)[0],\n dict,\n )",
"def todays_games(self):\n unplayed_games = []\n live_games = []\n finished_games = []\n games_data = self.games_data\n game_headers = games_data[0]['headers']\n game_sets = games_data[0]['rowSet']\n header_list = [\n 'GAME_STATUS_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID', 'GAME_ID', 'GAME_DATE_EST', 'GAME_STATUS_TEXT'\n ]\n for game in game_sets:\n # game_info = list(zip(game_headers, game))\n game_info = dict(zip(game_headers, game))\n game_data = {x.lower(): game_info.get(x) for x in header_list}\n # game_data = {x.lower(): self._get_data(game_info, x) for x in header_list}\n logging.info(json.dumps(game_data, indent=2))\n game_data['home_record'] = self.get_team_record(game_data['home_team_id'])\n game_data['away_record'] = self.get_team_record(game_data['visitor_team_id'])\n game_data['home_team'] = self._team_ids.get(game_data['home_team_id'])\n game_data['away_team'] = self._team_ids.get(game_data['visitor_team_id'])\n status = game_data['game_status_id']\n if status == '1':\n unplayed_games.append(game_data)\n elif status == '2' or status == '3':\n score_headers = games_data[1]['headers']\n score_sets = games_data[1]['rowSet']\n game_scores = []\n for score in score_sets:\n game_scores.append(list(zip(score_headers, score)))\n for score in game_scores:\n game_id = self._get_data(score, 'GAME_ID')\n team_id = self._get_data(score, 'TEAM_ID')\n points = self._get_data(score, 'PTS')\n if game_id == game_data['game_id']:\n if team_id == game_data['home_team_id']:\n game_data['home_team_score'] = points\n elif team_id == game_data['visitor_team_id']:\n game_data['away_team_score'] = points\n if status == '2':\n live_games.append(game_data)\n elif status == '3':\n finished_games.append(game_data)\n Games = namedtuple('Status', ['unplayed', 'live', 'final'])\n games_info = Games(unplayed=unplayed_games, live=live_games, final=finished_games)\n # CACHE.set(game_data['id'], game_data)\n return games_info",
"def _get_games_data(self, date=None):\n url = f\"{BASE_URL}scoreboard/\"\n # if no date is provided get data for the current days games\n if not date:\n date = datetime.datetime.strftime(self._date, \"%m/%d/%Y\")\n params = {\n 'GameDate': date,\n 'LeagueID': '00',\n 'DayOffset': '0'\n }\n data = fetch_data(self._session, url, params)\n return data",
"def filter_data(data, date):\n for entry in data:\n entry_date = datetime.datetime.strptime(entry[\"Gametime\"],\n strptime_fmt).date()\n if date + datetime.timedelta(days=1) == entry_date:\n yield entry",
"def test_missing_json():\n json_content = JsonHandler.read_json(RESOURCES[\"missing\"])\n assert json_content == {}",
"def check_generated_data(date) -> bool:\n stats_yest = f\"data/player_stats/player_stats_{date}.csv\"\n return os.path.isfile(stats_yest)",
"def get_last_game(json):\n result = []\n \n for dictionary in get_games(json, \"hillegom\"):\n if dictionary[\"score\"] != \"\":\n\n # Remove teamnumber\n dictionary[\"home\"] = teamname_only(dictionary[\"home\"])\n dictionary[\"away\"] = teamname_only(dictionary[\"away\"])\n\n result.append(dictionary)\n \n return result[-1]",
"def test_live_sensor_data_doesnt_exist(self, mock_on_board, mock_jsonify):\n mock_jsonify.side_effect = json.dumps\n\n test_key = 'test_key'\n mock_on_board.get_latest_sensor_data.return_value = ()\n\n expected = json.dumps(\n {\n 'key': str(test_key),\n 'timestamp': None,\n 'value': None\n }\n )\n\n actual = live_sensor_data(test_key)\n\n self.assertEqual(expected, actual)",
"def _is_null_date(fecha):\n return year(fecha) == YEAR_NULL_TERADATA",
"def test_live_profiling_data_doesnt_exist(self, mock_on_board,\n mock_jsonify):\n mock_jsonify.side_effect = json.dumps\n\n test_key = 'test_key'\n mock_on_board.get_latest_profiling_data.return_value = ()\n\n expected = json.dumps(\n {\n 'key': str(test_key),\n 'timestamp': None,\n 'value': None\n }\n )\n\n actual = live_profiling_data(test_key)\n\n self.assertEqual(expected, actual)",
"def test_non_existent_JSON_file(self):\n self.assertEqual(Base.load_from_file(), [])",
"def test_existing_json():\n json_content = JsonHandler.read_json(RESOURCES[\"existing\"])\n assert json_content != {}",
"def fetch_standings():\n # check if the data needs to be fetched // or stored json\n try:\n with open('app/data/gw_standings/standings_current.json', 'r') as file:\n data = json.loads(file.read())\n except:\n return get_live_result()\n\n updated = data['updated']\n try:\n status = data['status']\n except KeyError:\n status = \"ongoing\"\n gameweek = data['gameweek']\n\n if status == 'completed' and gameweek == find_current_gw():\n return data\n\n current = calendar.timegm(time.gmtime())\n\n if current - updated < 500:\n return data\n return get_live_result()",
"def _cleaned_live_games(self):\n\n # Fetch all Live-Games-Page HTML\n live_games_html = self._fetch_livegames_html()\n\n # A list to hold all live games as they are parsed out\n live_games_json = list()\n\n if live_games_html is not None:\n for match in live_games_html:\n for event in match.find_all('div', attrs={'class': 'event'}):\n match_data = event.find_all('span', attrs={'class': 'team'})\n single_parsed_live_game = self._organize_sports_data(match_data=match_data)\n\n # append a set of home and away teams. That constitutes as a single game\n live_games_json.append(single_parsed_live_game)\n\n return live_games_json\n\n # HTML might have contained nothing, so we try executing the method again if that was the case\n else:\n print('<[{}]> Live-Games-Page returned None ... will retry in 5 seconds'\n .format(str(datetime.datetime.now())))\n # Wait for 5 seconds before trying again\n time.sleep(5)\n self._cleaned_live_games()",
"def checkfordata():\n \n conn = httplib.HTTPConnection(\"picasaweb.google.com\")\n conn.request(\"GET\", \"/data/feed/api/user/mikesorvillo?alt=json\")\n response = json.loads(conn.getresponse().read())\n albums = response['feed']['entry'] \n \n if len(albums) != Album.objects.count():\n importAlbums(albums)\n \n conn.close()",
"def check_if_empty(data_sources, day):\n day = pd.Timestamp(day)\n for source in data_sources:\n day_of_data = source.data[day:day+datetime.timedelta(hours=23)]\n if len(day_of_data.index) < 24:\n raise RuntimeError(\"There is not enough data for {} to \"\n \"generate scenarios with in source {}\"\n .format(day.date(), source.name))",
"def test_not_json(self):\n options = {'url' : 'http://raw.githubusercontent.com'}\n columns = ['geom']\n fdw = GeoJSON(options, columns)\n rows = fdw.execute([], columns)\n self.assertListEqual(rows, [])",
"def _verify_json(build_data):\n fields = ['waterfall_url',\n 'build_url',\n 'project_name',\n 'builderName',\n 'unsatisfied',\n 'revisions',\n 'blamelist',\n 'result',\n 'number',\n 'reason',\n 'recipients']\n\n for field in fields:\n if field not in build_data:\n logging.error('build_data did not contain field %s' % field)\n return False\n\n return True",
"def test_not_geojson(self):\n options = {'url' : 'https://raw.githubusercontent.com/fge/sample-json-schemas/master/json-home/json-home.json'}\n columns = ['geom']\n fdw = GeoJSON(options, columns)\n rows = fdw.execute([], columns)\n self.assertListEqual(rows, [])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the first game of the day's status to see if it has started yet.
|
def test_first_game_status(game_status, date):
if game_status == 1:
return False
elif game_status == 2:
return True
else: #status == 3
if date == today:
return True
else:
return False
|
[
"def test_are_games_in_progress(self):\n pass",
"def is_start(self):\n\n com = Competition.query.order_by(Competition.id.desc()).first()\n return com.flag if com else False",
"def is_start(self):\n return self._status == EventStatus.START",
"def started(self) -> bool:\n return len(self.history) > 0 # if there is any history the game has been started",
"def _run_inactive(games):\r\n return len(games) == 0",
"def running(self):\n return self.status == \"STARTED\"",
"def is_starting(self, sdi_id: str) -> Optional[bool]:\n response = self.get_status(sdi_id)\n if response.ok:\n return str(response.detail[\"state\"]) == \"1\"\n return None",
"def is_start(self) -> bool:\n return self.num_river == 1 and self.num_coast == 0",
"async def game_check_loop(self) -> None:\n await self.bot.wait_until_red_ready()\n await self._ready.wait()\n while True:\n try:\n params = {\"expand\": \"schedule.teams,schedule.linescore,schedule.broadcasts\"}\n async with self.session.get(f\"{BASE_URL}/api/v1/schedule\") as resp:\n if resp.status == 200:\n data = await resp.json()\n else:\n log.info(\"Error checking schedule. %s\", resp.status)\n await asyncio.sleep(30)\n continue\n except aiohttp.client_exceptions.ClientConnectorError:\n # this will most likely happen if there's a temporary failure in name resolution\n # this ends up calling the check_new_day earlier than expected causing\n # game day channels and pickems to fail to update prpoperly\n # continue after waiting 30 seconds should prevent that.\n data = {\"dates\": []}\n await asyncio.sleep(30)\n continue\n except Exception:\n log.exception(\"Error grabbing the schedule for today.\")\n data = {\"dates\": []}\n await asyncio.sleep(60)\n continue\n if data[\"dates\"] != []:\n for game in data[\"dates\"][0][\"games\"]:\n if game[\"status\"][\"abstractGameState\"] == \"Final\":\n continue\n if game[\"status\"][\"detailedState\"] == \"Postponed\":\n continue\n self.current_games[game[\"link\"]] = {\n \"count\": 0,\n \"game\": None,\n \"disabled_buttons\": False,\n }\n else:\n # Only try to create game day channels if there's no games for the day\n # Otherwise make the game day channels once we see\n # the first preview message to delete old ones\n await self.check_new_day()\n if self.TEST_LOOP:\n self.current_games = {\n \"https://statsapi.web.nhl.com/api/v1/game/2020020474/feed/live\": {\n \"count\": 0,\n \"game\": None,\n \"disabled_buttons\": False,\n }\n }\n while self.current_games != {}:\n self.games_playing = True\n to_delete = []\n for link, data in self.current_games.items():\n if data[\"game\"] is not None:\n await self.fix_pickem_game_start(data[\"game\"])\n if data[\"game\"] is not None and data[\"game\"].game_start - timedelta(\n hours=1\n ) >= datetime.now(timezone.utc):\n log.trace(\n \"Skipping %s @ %s checks until closer to game start.\",\n data[\"game\"].away_team,\n data[\"game\"].home_team,\n )\n continue\n data = await self.get_game_data(link)\n if data is None:\n continue\n try:\n game = await Game.from_json(data)\n self.current_games[link][\"game\"] = game\n except Exception:\n log.exception(\"Error creating game object from json.\")\n continue\n try:\n await self.check_new_day()\n posted_final = await game.check_game_state(\n self.bot, self.current_games[link][\"count\"]\n )\n except Exception:\n log.exception(\"Error checking game state: \")\n posted_final = False\n if (\n game.game_state in [\"Live\"]\n and not self.current_games[link][\"disabled_buttons\"]\n ):\n log.verbose(\"Disabling buttons for %r\", game)\n await self.disable_pickems_buttons(game)\n self.current_games[link][\"disabled_buttons\"] = True\n\n log.trace(\n \"%s @ %s %s %s - %s\",\n game.away_team,\n game.home_team,\n game.game_state,\n game.away_score,\n game.home_score,\n )\n\n if game.game_state in [\"Final\", \"Postponed\"]:\n self.current_games[link][\"count\"] += 1\n if posted_final:\n try:\n await self.set_guild_pickem_winner(game, edit_message=True)\n except Exception:\n log.exception(\"Pickems Set Winner error: \")\n self.current_games[link][\"count\"] = 21\n await asyncio.sleep(1)\n\n for link in self.current_games:\n if self.current_games[link][\"count\"] == 21:\n to_delete.append(link)\n for link in to_delete:\n del self.current_games[link]\n if not self.TEST_LOOP:\n await asyncio.sleep(60)\n else:\n await asyncio.sleep(10)\n log.debug(\"Games Done Playing\")\n\n if self.games_playing:\n try:\n await self.tally_leaderboard()\n # Only tally the leaderboard once per day\n # The tally function will iterate\n # over all servers and pull all pickems games\n # This is stored temporarily until we're done\n # iterating over all guilds and then forget\n # about the results\n except Exception:\n log.exception(\"Error tallying leaderboard:\")\n pass\n self.games_playing = False\n\n # Final cleanup of config incase something went wrong\n # Should be mostly unnecessary at this point\n await self.config.teams.clear()\n\n await asyncio.sleep(300)",
"def is_scheduled(self):\n # pylint:disable=unexpected-keyword-arg\n return (\n self.starting_at is not None\n and self.starting_at > timezone.now()\n and self.live_state == IDLE\n )",
"def is_ready_to_start(self):\n is_left_resolved = self.__left_participant.get_competitor() is not None\n is_right_resolved = self.__right_participant.get_competitor() is not None\n is_winner_resolved = self.__winner.get_competitor() is not None\n return is_left_resolved and is_right_resolved and not is_winner_resolved",
"def is_running(self) -> bool:\n return self.game_running",
"def test_start(self):\n\t\tr = self.c.post(reverse('mission.views.pending_mission_start', args=(self.pm.pk,)))\n\t\tself.assertEqual(200, r.status_code)\n\t\tself.assertIsNotNone(PendingMission.objects.get(pk=self.pm.pk).started)",
"def check_status(self):\n print(\"Yamcha: Let go!\")\n print(\"Piccolo: It's over\")\n print(\"*Loud explosion*\")\n self.is_dead = True",
"def is_start(self) -> bool:\n return self.colour == ORANGE",
"def _waitForLiveEpochs(self):\n return not not (self.ourEpoch or self.masterEpoch)",
"def in_game(self):\n try:\n if self.p.poll() is None:\n return True\n else:\n return False\n except:\n return False",
"def is_first_winner(self):\r\n return self.first_winner",
"def check_season():\n # Get current time\n now = datetime.now()\n if now.month in (7, 8):\n return False\n else:\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Makes call to AirSim and extract depth and/or RGB images from the UAV's camera. Due to a bug in AirSim an empty array is sometimes returned which is caught in this method.
|
def get_camera_observation(client, sensor_types=['rgb', 'depth'], max_dist=10, height=64, width=64):
requests = []
sensor_idx = {}
idx_counter = 0
if 'rgb' in sensor_types:
requests.append(airsim.ImageRequest(
'front_center', airsim.ImageType.Scene, pixels_as_float=False, compress=False))
sensor_idx.update({'rgb': idx_counter})
idx_counter += 1
if 'depth' in sensor_types:
requests.append(airsim.ImageRequest(
'front_center', airsim.ImageType.DepthPlanner, pixels_as_float=True, compress=False))
sensor_idx.update({'depth': idx_counter})
idx_counter += 1
responses = client.simGetImages(requests)
images = {}
if 'rgb' in sensor_types:
idx = sensor_idx['rgb']
# convert to uint and reshape to matrix with 3 color channels
try:
bgr = np.reshape(airsim.string_to_uint8_array(
responses[idx].image_data_uint8), (height, width, 3))
# move color channels around
rgb = np.array(bgr[:, :, [2, 1, 0]], dtype=np.float32)
except ValueError as err:
print('========================================================')
print('Value err when reshaping RGB image: {0}'.format(err))
print('Replacing rgb with all zeros')
print('========================================================')
rgb = np.zeros((height, width, 3), dtype=np.float32)
images.update({'rgb': rgb})
if 'depth' in sensor_types:
idx = sensor_idx['depth']
# convert to 2D numpy array. Had unexpected exception here. Try: Catch
try:
depth = airsim.list_to_2d_float_array(
responses[idx].image_data_float, width, height)
except ValueError as err:
print('========================================================')
print('Value err when reshaping depth image: {0}'.format(err))
print('Replacing depth map with all max dist values')
print('========================================================')
depth = np.ones((height, width), dtype=np.float32) * max_dist
depth = np.expand_dims(depth, axis=2)
images.update({'depth': depth})
return images
|
[
"def get_camera_image(self):\n upAxisIndex = 2\n camDistance = 500\n pixelWidth = 350\n pixelHeight = 700\n camTargetPos = [0, 80, 0]\n\n far = camDistance\n near = -far\n view_matrix = self.p.computeViewMatrixFromYawPitchRoll(\n camTargetPos, camDistance, 0, 90, 0, upAxisIndex)\n projection_matrix = self.p.computeProjectionMatrix(\n -90, 60, 150, -150, near, far)\n # Get depth values using the OpenGL renderer\n width, height, rgbImg, depthImg, segImg = self.p.getCameraImage(\n pixelWidth,\n pixelHeight,\n view_matrix,\n projection_matrix,\n renderer=self.p.ER_BULLET_HARDWARE_OPENGL)\n return rgbImg, depthImg, segImg",
"def run_make_camera_flat(self):\n outfile = 'camera_flat.fits'\n img_list = []\n for k in self.obs_dict:\n if self.obs_dict[k][1] == 'CAMERA': img_list.append(self.obs_dict[k][0])\n print img_list\n logging.info('\\nCreate a new camera flat with name %s\\n' % outfile) \n gr.make_camera_flat(img_list, outfile)",
"def render(self, mode=\"human\"):\n self._render_callback()\n\n # update unity\n if self._unity and not self._unity_updated:\n self._update_unity()\n self._unity_updated = True\n\n if mode == \"rgb_array\":\n if self._unity:\n img, _ = self._unity.get_images(self._camera_ids)\n else:\n img = self.sim.render(\n camera_name=self._camera_name,\n width=self._screen_width,\n height=self._screen_height,\n depth=False,\n )\n img = np.expand_dims(img, axis=0)\n assert len(img.shape) == 4\n # img = img[:, ::-1, :, :] / 255.0\n img = img[:, ::-1, :, :]\n return img\n\n elif mode == \"rgbd_array\":\n depth = None\n if self._unity:\n img, depth = self._unity.get_images(self._camera_ids, self._depth_ob)\n else:\n camera_obs = self.sim.render(\n camera_name=self._camera_name,\n width=self._screen_width,\n height=self._screen_height,\n depth=self._depth_ob,\n )\n if self._depth_ob:\n img, depth = camera_obs\n else:\n img = camera_obs\n img = np.expand_dims(img, axis=0)\n # img = img[:, ::-1, :, :] / 255.0\n img = img[:, ::-1, :, :]\n\n if depth is not None:\n # depth map is 0 to 1, with 1 being furthest\n # infinite depth is 0, so set to 1\n black_pixels = np.all(depth == [0, 0, 0], axis=-1)\n depth[black_pixels] = [255] * 3\n if len(depth.shape) == 4:\n # depth = depth[:, ::-1, :, :] / 255.0\n depth = depth[:, ::-1, :, :]\n elif len(depth.shape) == 3:\n # depth = depth[::-1, :, :] / 255.0\n depth = depth[::-1, :, :]\n\n return img, depth\n\n elif mode == \"segmentation\" and self._unity:\n img = self._unity.get_segmentations(self._camera_ids)\n return img\n\n elif mode == \"human\" and not self._unity:\n if platform != \"win32\":\n self._get_viewer().render()\n\n return None",
"def getImageFromCam(self):\n\n\t\twith picamera.array.PiRGBArray(self.CAMERA) as output:\n\n\t\t\tself.CAMERA.capture(output, 'rgb')\n\t\t\t\n\t\t\tprint('Captured %dx%d image' % (output.array.shape[1], output.array.shape[0]))\n\n\t\t\treturn output.array",
"def render(self):\n time_stamps = []\n cam_imgs = []\n cur_time = rospy.get_time()\n\n for recorder in self._cameras:\n stamp, image = recorder.get_image()\n print(\"stamp:\", stamp)\n logging.getLogger('robot_logger').error(\"Checking for time difference: Current time {} camera time {}\".format(cur_time, stamp))\n if abs(stamp - cur_time) > 10 * self._obs_tol: # no camera ping in half second => camera failure\n logging.getLogger('robot_logger').error(\"DeSYNC - no ping in more than {} seconds!\".format(10 * self._obs_tol))\n raise Image_Exception\n time_stamps.append(stamp)\n cam_imgs.append(image)\n\n if self.ncam > 1:\n for index, i in enumerate(time_stamps[:-1]):\n for j in time_stamps[index + 1:]:\n if abs(i - j) > self._obs_tol:\n logging.getLogger('robot_logger').error('DeSYNC- Cameras are out of sync!')\n raise Image_Exception\n\n images = np.zeros((self.ncam, self._height, self._width, 3), dtype=np.uint8)\n for c, img in enumerate(cam_imgs):\n images[c] = img[:, :, ::-1]\n\n return images",
"def capture_robot_camera(IP_PEPPER, PORT):\n SubID = \"Pepper\"\n videoDevice = ALProxy('ALVideoDevice', PI_PEPPER, PORT)\n\n # subscribe top camera, get an image with the size of 640x480\n AL_kTopCamera, AL_kQVGA, Frame_Rates = 0, 2, 10 \n AL_kBGRColorSpace = 13 # Buffer contains triplet on the format 0xRRGGBB, equivalent to three unsigned char\n captureDevice = videoDevice.subscribeCamera(SubID, AL_kTopCamera, AL_kQVGA, AL_kBGRColorSpace, Frame_Rates)\n\n width, height = 640, 480\n image = np.zeros((height, width, 3), np.uint8)\n result = videoDevice.getImageRemote(captureDevice)\n\n if result == None:\n print \"Camera problem.\"\n elif result[6] == None:\n print \"No image was captured. \"\n else:\n # translate value to mat\n values = map(ord, list(result[6]))\n i = 0\n for y in range(0, height):\n for x in range(0, width):\n image.itemset((y, x, 0), values[i + 0])\n image.itemset((y, x, 1), values[i + 1])\n image.itemset((y, x, 2), values[i + 2])\n i += 3\n\n # uncomment below lines to see the camera image\n #cv2.imwrite(\"assets/monitor/robocam.png\", image)\n #cv2.imshow(\"Camera image\", image)\n #cv2.waitKey(1)\n\n # unsubscribe from the camera.Otherwise, the camera image\n # might be corrupted. To be absoulutely sure, perform \n # a null check on result[6]\n videoDevice.unsubscribe(captureDevice)\n\n return result[6], image",
"def fnImageCaptureAndTransform():\r\n try:\r\n camera_port = 0\r\n camera = cv2.VideoCapture(camera_port)\r\n\r\n # camera settings\r\n ARcamera.set(3, 640) # width\r\n ARcamera.set(4, 480) # height\r\n ARcamera.set(12, 0) # saturation\r\n ARcamera.set(11, 1) # contrast\r\n ARcamera.set(10, 0) # brightness\r\n \r\n time.sleep(0.1) # wait for camera to stabilize itself\r\n isOk, capturedImage = ARcamera.read()\r\n del(ARcamera)\r\n\r\n \"\"\" Transform image to grayscale and give high contrast \"\"\"\r\n # transformation to grayscale image\r\n grayImage = cv2.cvtColor(capturedImage, cv2.COLOR_BGR2GRAY)\r\n # high contrast image - set with treshold number to treshold hot and stuck pixels\r\n grayImage[grayImage<5] = 0\r\n grayImage[grayImage>=5] = 255\r\n return grayImage\r\n except:\r\n fnException(\"Unexpected error!\\nPlese check Alpha Random Camera and perform calibration of system if problem persists.\", 0)",
"def grab_one(self):\n\n camera_array = self._get_camera_array()\n\n size = camera_array.GetSize()\n\n result = []\n\n for i in range(size):\n grab_result = camera_array[i].GrabOne(self._TIME_OUT)\n image_array = self.post_processing(grab_result).GetArray()\n grab_result.Release()\n result.append(image_array)\n\n return result",
"def run_from_memory(self, images, foreground_masks=None, dump_ply_files=False):\n assert self.all_camera_parameters is not None, 'Camera parameters not loaded yet; You should run load_all_camera_parameters first!'\n\n\n xyz_global_array = [None]*len(topologies[self.topology])\n def run_for_one_pair(pair_index, left_index, right_index):\n print('Performing Stereo matching between cameras', left_index,'and',right_index,'...')\n left_image, right_image = images[left_index], images[right_index]\n\n\n left_maps = self.left_maps_array[pair_index]\n right_maps = self.right_maps_array[pair_index]\n\n # Apply the rectification maps\n remap_interpolation = self.options['Remap']['interpolation']\n left_image_rectified = cv2.remap(left_image, left_maps[0],\n left_maps[1], remap_interpolation)\n right_image_rectified = cv2.remap(right_image, right_maps[0],\n right_maps[1], remap_interpolation)\n if foreground_masks is not None:\n left_foreground_mask_rectified = cv2.remap(\n foreground_masks[left_index], left_maps[0], left_maps[1],\n cv2.INTER_NEAREST)\n #right_background_rectified = cv2.remap(\n #foreground_masks[right_index], right_maps[0], right_maps[1],\n #cv2.INTER_NEAREST)\n # TODO: We don't actually filter using the right background mask yet\n\n #if self.visual_debug:\n #left = numpy.array(left_image_rectified)\n #right = numpy.array(right_image_rectified)\n #leftright = numpy.hstack((left,right))\n #pylab.imshow(leftright)\n #pylab.show()\n #return\n #continue\n matcher = self.matchers[pair_index]\n disparity_image = matcher.compute(left_image_rectified, right_image_rectified)\n\n # WARNING! OpenCV 3 Apparently doesn't support floating point disparity anymore,\n # and 16 bit disparity needs to be divided by 16\n if disparity_image.dtype == numpy.int16:\n disparity_image = disparity_image.astype(numpy.float32)\n disparity_image /= 16\n\n if self.visual_debug:\n im = numpy.array(disparity_image)\n #pylab.imshow(numpy.vstack((left_image,im,right_image)))\n pylab.imshow(numpy.vstack((im,)))\n pylab.show()\n #continue\n\n ## Filter the stereo correspondences based on the foreground_masks, before they're expanded to 3D\n #if foreground_masks is not None:\n #DISPARITY_SHIFT_16S = 4\n #MinDisparity = self.options['StereoMatcher']['MinDisparity']\n #filtered_sentinel = (MinDisparity - 1) << DISPARITY_SHIFT_16S # Match what opencv is supposed to be using.\n #assert numpy.all(disparity_image.shape==left_foreground_mask_rectified.shape), 'Bug! Shape mismatch!'\n #disparity_image[left_foreground_mask_rectified==0] = filtered_sentinel # DEBUG\n ## TODO: Maybe something could be gained by filtering on the right image as well...\n\n # Convert the depth map to a point cloud\n Q = self.Q_array[pair_index]\n threedeeimage = cv2.reprojectImageTo3D(disparity_image, Q, handleMissingValues=True,ddepth=cv2.CV_32F)\n # Reminder: If True, handleMissingValues replaces filtered_sentinel with 10000.0\n threedeeimage = numpy.array(threedeeimage)\n #if self.visual_debug:\n #depth_image = threedeeimage[:,:,2]\n #pylab.imshow(depth_image)\n #pylab.show()\n #continue\n\n ## Do the filtering after the expansion since I can't get the sentinel to work properly.\n #if foreground_masks is not None:\n #xyz[left_foreground_mask_rectified==0] = 10000.0 \n ## TODO: Maybe something could be gained by filtering on the right image as well...\n\n\n # Put the 3D images in a unified coordinate system...\n xyz = threedeeimage.reshape((-1,3)) # x,y,z now in three columns, in left rectified camera coordinates\n\n z = xyz[:,2]\n #z[left_foreground_mask_rectified==0] = 10000.0 # background filtering\n if foreground_masks is None:\n goodz = z < 9999.0 # Just filter out the points OpenCV already labeled as matching failures\n else:\n # Also filter out points in the background in the left camera\n goodz = numpy.logical_and(z < 9999.0,left_foreground_mask_rectified.reshape(-1)>0.0000001)\n xyz_filtered = xyz[goodz,:]\n #print('pixels before filtering: ',h*w, \"after filtering:\" ,xyz_filtered.shape[0] )\n\n R_left_rectified_to_global, T_left_rectified_to_global = self.extrinsics_left_rectified_to_global_array[pair_index]\n xyz_global = numpy.dot(xyz_filtered, R_left_rectified_to_global.T) + T_left_rectified_to_global.T # TODO: combine this with the the multipilication by Q inside of reprojectImageTo3D above. Note that different filtering may be required.\n\n if dump_ply_files:\n save_ply(xyz_global, 'pair_'+str(left_index)+'_'+str(right_index)+'.ply')\n #xyz_global_array.append(xyz_global)\n xyz_global_array[pair_index] = xyz_global\n\n itt_resume()\n t1 = time()\n import threading\n threads = []\n for pair_index, (left_index,right_index) in enumerate(topologies[self.topology]):\n threads.append(threading.Thread(target=run_for_one_pair, args=(pair_index,left_index,right_index)))\n #run_for_one_pair(pair_index, left_index, right_index)\n run_in_parallel = True\n if run_in_parallel:\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n else:\n for thread in threads:\n thread.start()\n thread.join()\n\n xyz = numpy.vstack(xyz_global_array)\n t2 = time()\n dt = t2-t1 # seconds. \n itt_detach()\n\n return xyz, dt",
"def capture(env):\n viewMatrix = env._view_matrix\n projectionMatrix = env._proj_matrix\n\n near = 0.01\n far = 10\n width = env._width\n height = env._height\n\n img_arr = env._p.getCameraImage(\n width,\n height,\n viewMatrix,\n projectionMatrix,\n renderer=env._p.ER_BULLET_HARDWARE_OPENGL,\n )\n assert len(img_arr) == 5\n\n w = img_arr[0] # width of the image, in pixels\n h = img_arr[1] # height of the image, in pixels\n rgbBuffer = img_arr[2] # color data RGB\n depthBuffer = img_arr[3] # depth data\n segmentationBuffer = img_arr[4] # depth data\n\n return get_point_cloud(rgbBuffer, depthBuffer, viewMatrix, projectionMatrix)",
"def hand_recognition_depth(pipeline, frame_size: Tuple[int, int]):\n width, height = frame_size\n recogniser = DepthHandRecogniser(pipeline, width, height)\n depth_gui = DepthRecogniserGUI(recogniser, frame_size)\n\n while True:\n if not recogniser.get_frames():\n CAMERA_LOG.warn(\"Dropped frames.\")\n continue\n recogniser.segment_hand()\n depth_gui.draw_frames()\n # frames = stream.wait_for_frames()\n # depth_frame = frames.get_depth_frame()\n # colour_frame = frames.get_color_frame()\n # if not depth_frame or not colour_frame:\n # continue\n #\n # # Distance\n # distance = depth_frame.get_distance(320, 240)\n #\n # # Convert images to data points in np array\n # depth_image = np.asanyarray(depth_frame.get_data())\n # colour_image = np.asanyarray(colour_frame.get_data())\n #\n # cv.putText(\n # colour_image,\n # str(distance) + \" m\",\n # (0, 30),\n # cv.FONT_HERSHEY_SIMPLEX,\n # 1.0,\n # (255, 255, 255), 2, cv.LINE_AA)\n #\n # Apply colour map to depth image\n # (converts image to 8-bit per pixel first)\n # depth_colourmap = cv.applyColorMap(\n # cv.convertScaleAbs(depth_image, alpha=0.03),\n # cv.COLORMAP_JET)\n #\n # # Vertical Stack Image\n # images = np.vstack((colour_image, depth_colourmap))\n #\n # # Show cv window\n # cv.namedWindow(\"Real Sense\", cv.WINDOW_AUTOSIZE)\n # cv.imshow(\"Real Sense\", images)\n\n # Quit\n if cv.waitKey(1) & 0xFF == ord('q'):\n break\n\n recogniser.stream.stop()",
"def cam_read(filename): #Adapted from sintel_io.py from http://sintel.is.tue.mpg.de/depth\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))\n N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))\n N = np.append(N, [[0,0,0,1]], axis=0)\n\n return M,N",
"def display_images(color_image, depth_image):\n # Switch color image channels\n color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)\n # rescale depth image\n depth_image = depth_image\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image,\n alpha=255/RSCamera.maximum_depth),\n cv2.COLORMAP_INFERNO)\n depth_colormap_dim = depth_colormap.shape\n color_colormap_dim = color_image.shape\n # print(np.max(depth_image), np.min(depth_image))\n # print(np.max(depth_colormap), np.min(depth_colormap))\n # If depth and color resolutions are different, resize color image to match depth image for display\n if depth_colormap_dim != color_colormap_dim:\n resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]),\n interpolation=cv2.INTER_AREA)\n images = np.hstack((resized_color_image, depth_colormap))\n else:\n images = np.hstack((color_image, depth_colormap))\n\n # Show images\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', images)\n return cv2.waitKey(5)",
"def cam2numpy(self):\n nao_image = self.video.getImageRemote(self.subscriber)\n image_width = nao_image[0]\n image_height = nao_image[1]\n array = nao_image[6]\n frame = Image.frombytes(\"RGB\", (image_width, image_height), array)\n frame = np.array(frame)\n return frame",
"def main():\n\n #p = optparse.OptionParser(usage=\"Usage: %prog [ options ]\\n\"\n # \"This program lets the camera run in free running mode.\")\n options, args = argparse.run()\n loginit.run(options.verbosity)\n logger = logging.getLogger('main')\n\n #add_common_options(p)\n\n l = DC1394Library()\n global cam\n cam = handle_common_options(options, l)\n\n try:\n cam.start(interactive = True)\n except IOError:\n print 'error: cannot open stream' \n exit(1)\n\n dims = (9,6)\n pts_per_board = dims[0] * dims[1]\n nr_samples = 20\n\n pt_counts = np.zeros((nr_samples, 1), dtype = int) #pts per image\n\n frame = np.asarray(cam.current_image)\n model_pts = np.zeros((nr_samples * pts_per_board, 3), dtype = float)\n model_pts = model_pts.astype('float32')\n image_pts = np.zeros((nr_samples * pts_per_board, 2), dtype = float)\n image_pts = image_pts.astype('float32')\n i = 0\n\n while i < nr_samples:\n frame = np.asarray(cam.current_image)\n found, points = cv2.findChessboardCorners(frame, dims, flags=cv2.CALIB_CB_FAST_CHECK)\n if found and ((points.shape)[0] == pts_per_board):\n cv2.drawChessboardCorners(frame, (6,9), points, found)\n cv2.imshow(\"win2\", frame)\n cv2.waitKey(2)\n step = i * pts_per_board\n j = 0\n\n while j < pts_per_board:\n image_pts[step, 0] = points[j, 0, 0]\n image_pts[step, 1] = points[j, 0, 1]\n model_pts[step, 0] = float(j) / float(dims[0])\n model_pts[step, 1] = float(j) % float(dims[0])\n model_pts[step, 2] = 0.0\n step += 1\n j += 1\n\n pt_counts[i, 0] = pts_per_board\n cv2.waitKey(2)\n i += 1\n time.sleep(1)\n\n else:\n cv2.imshow(\"win2\", frame)\n cv2.waitKey(2)\n\n camera_matrix = np.array([\n [2.23802515e+03, 0.0, 5.89782959e+02], \n [0.0, 2.07124146e+03, 4.55921570e+02], \n [0.0, 0.0, 1.]\n ])\n dist_coeffs = np.zeros(4)\n\n np.save(\"image_pts.npy\", image_pts)\n np.save(\"model_pts.npy\", model_pts)\n\n success, intrinsic, distortion_coeffs, rot_est_vecs, transl_est_vecs = cv2.calibrateCamera(model_pts, image_pts, frame.shape, camera_matrix, dist_coeffs, flags=cv2.CALIB_USE_INTRINSIC_GUESS)\n\n np.save(\"intrinsic.npy\", intrinsic)\n np.save(\"distortion_coeffs.npy\", distortion_coeffs)\n np.save(\"calibration_rotation_vectors.npy\", rot_est_vecs)\n np.save(\"calibration_translation_vectors.npy\", transl_est_vecs)",
"def get_camera_state(self) -> GoProResp:",
"def render_views(self, cameras: PinholeCamera) -> ImageTensors:\n ray_dataset = RayDataset(\n cameras, self._min_depth, self._max_depth, self._ndc, device=self._device, dtype=self._dtype\n )\n ray_dataset.init_ray_dataset()\n idx0 = 0\n imgs: ImageTensors = []\n batch_size = 4096 # FIXME: Consider exposing this value to the user\n for height, width in zip(cameras.height.int().tolist(), cameras.width.int().tolist()):\n bsz = batch_size if batch_size != -1 else height * width\n img = zeros((height * width, 3), dtype=torch.uint8)\n idx0_camera = idx0\n for idx0 in range(idx0, idx0 + height * width, bsz):\n idxe = min(idx0 + bsz, idx0_camera + height * width)\n idxs = list(range(idx0, idxe))\n origins, directions, _ = ray_dataset[idxs]\n with torch_inference_mode():\n rgb_model = self._nerf_model(origins, directions) * 255.0\n img[idx0 - idx0_camera : idxe - idx0_camera] = rgb_model\n idx0 = idxe\n img = img.reshape(height, width, -1) # (H, W, C)\n imgs.append(img)\n return imgs",
"def ap_extract(img, trace, apwidth=8, skysep=3, skywidth=7, skydeg=0\n ,gain=1.68,rdnoise=4.9,optimal=True,rectified=False):\n\n #Follow Horne 1986 for optimal extraction\n\n if rdnoise <= 0:\n rdnoise = 1\n print(\"Enforcing minimum readnoise of 1 electron\")\n\n #note that if trace is of integer type then so will sumspec and other output which will give wrong results\n sumspec0 = np.zeros(len(trace))\n varspec0 = np.zeros(len(trace))\n skyspec0 = np.zeros(len(trace))\n\n if optimal:\n imgstrip = np.zeros((2*apwidth + 1, len(trace)))\n varstrip = np.zeros((2*apwidth + 1, len(trace)))\n skyvar = np.zeros((2*apwidth + 1, len(trace))) #to keep track of sky contribution in variance to pass to optimal extraction\n\n if rectified:\n itrace = np.repeat(np.round(np.median(trace)),len(trace)).astype('int')\n else:\n itrace = np.round(trace).astype('int')\n\n varimg = (rdnoise/gain)**2 + np.abs(img.copy())/gain # for the variance img; in data units\n skyimg = np.zeros_like(img) #holds the 2d sky estimate\n\n # first determine the sky image, and aperature summed spectra\n for i in range(0,len(itrace)):\n #-- first do the aperture flux\n # juuuust in case the trace gets too close to the edge; what about the sky aperture in this case??\n widthup = apwidth\n widthdn = apwidth\n if (itrace[i]+widthup > img.shape[0]):\n widthup = img.shape[0]-itrace[i] - 1\n if (itrace[i]-widthdn < 0):\n widthdn = itrace[i] - 1\n\n #y defines the sky pixels\n y = np.append(np.arange(itrace[i]-apwidth-skysep-skywidth, itrace[i]-apwidth-skysep),\n np.arange(itrace[i]+apwidth+skysep+1, itrace[i]+apwidth+skysep+skywidth+1))\n\n z = img[y,i]\n if (skydeg>0):\n \n if len(np.where(np.isfinite(z))[0]) > skydeg + 1:\n # based on fitting with outlier removal from astropy --- API subject to change...\n polymod = models.Polynomial1D(skydeg)\n sigmafitting = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(),sigma_clip,niter=4,sigma=2.5)\n #defaults to 3 iterations, 3 sigma clipping; maybe makes these optional keywords to ap_extract\n \n # fit a polynomial to the sky in this column\n skyfit, polyout = sigmafitting(polymod,y,z) #can also add weights to this fitting, leaving it off for now\n \n ap = np.arange(itrace[i]-apwidth, itrace[i]+apwidth+1) # define the aperture in this column\n skyimg[itrace[i]-widthdn:itrace[i]+widthup+1,i] = polyout(ap) # evaluate the polynomial across the aperture for sky estimate\n \n #add contribution to variance image from background subtraction, estimate for fitted background variance based on optimal extraction documentation in iraf -- approximate\n varimg[itrace[i]-widthdn:itrace[i]+widthup+1,i] = varimg[itrace[i]-widthdn:itrace[i]+widthup+1,i] + polyout(ap)/(gain * (len(z)-1))\n \n if optimal:\n skyvar[:,i] = polyout(ap)/(gain * (len(z)-1))\n else:\n skyimg[itrace[i]-widthdn:itrace[i]+widthup+1,i] = np.nan\n print(\"Warning: empty data array in column {}, skipping sky subtraction here\".format(i))\n if optimal:\n skyvar[:,i] = np.nan\n \n elif (skydeg==0):\n skyimg[itrace[i]-widthdn:itrace[i]+widthup+1,i] = np.nanmean(z)\n # more sky pixels should yield smaller sky error... # the gain factors are to follow poisson statistics then convert back to data units\n varimg[itrace[i]-widthdn:itrace[i]+widthup+1,i] = varimg[itrace[i]-widthdn:itrace[i]+widthup+1,i] + np.var(gain*z)/(gain**2 * len(z))\n\n if optimal:\n skyvar[:,i] = np.var(gain*z)/(gain**2 * len(z))\n\n elif (skydeg<0):\n skyimg[itrace[i]-widthdn:itrace[i]+widthup+1,i] = 0\n print(\"No background subtraction is being applied\")\n\n #resulting variance img within aperature is variance estimate for Flux - Background; depart from Horne 1986 to include background subtraction in variance estimate for summed 1d spectrum\n\n sumspec0[i] = (img[itrace[i]-widthdn:itrace[i]+widthup+1,i] - skyimg[itrace[i]-widthdn:itrace[i]+widthup+1,i]).sum()\n varspec0[i] = varimg[itrace[i]-widthdn:itrace[i]+widthup+1,i].sum()\n skyspec0[i] = np.median(skyimg[itrace[i]-widthdn:itrace[i]+widthup+1,i])*(widthup+widthdn + 1) # just to get idea for what the sky background is\n\n if optimal:\n imgstrip[:,i] = img[itrace[i]-widthdn:itrace[i]+widthup+1,i] - skyimg[itrace[i]-widthdn:itrace[i]+widthup+1,i]\n varstrip[:,i] = varimg[itrace[i]-widthdn:itrace[i]+widthup+1,i]\n\n if optimal:\n # should have keywords passed from call to ap_extract\n prof2d,spec1d,errspec = OptExtProfile(sumspec0,imgstrip,varstrip,skyvar,rdnoise,gain)\n out = (spec1d,errspec,skyspec0)\n else:\n out = (sumspec0,np.sqrt(varspec0),skyspec0)\n \n return out",
"def cameraList(self):\r\n var = (CameraInfoEx*10)()\r\n self.dll.PvCameraListEx(byref(var), 1, None, sizeof(CameraInfoEx))\r\n return var"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculates 3D positions based on depth map and pixel coordinates. Points with depth larger than max_dist is not included.
|
def reproject_2d_points(points_2d, depth, max_dist, field_of_view):
h, w = depth.shape
center_x = w // 2
center_y = h // 2
focal_len = w / (2 * np.tan(field_of_view / 2))
points = []
for u, v in points_2d:
x = depth[v, u]
if x < max_dist:
y = (u - center_x) * x / focal_len
z = (v - center_y) * x / focal_len
points.append([x, y, z])
return np.array(points)
|
[
"def depth_to_local(depth, clip_planes, fov_deg):\n \"\"\" Determine the 'UV' image-space coodinates for each pixel.\n These range from (-1, 1), with the top left pixel at index [0,0] having\n UV coords (-1, 1).\n \"\"\"\n aspect_ratio = (depth.shape[1], depth.shape[0])\n #print (\"aspect ratio\" ,aspect_ratio)\n\n idx_grid = np.meshgrid(*[np.arange(ar) for ar in aspect_ratio])\n\n px_arr = np.stack(idx_grid, axis=-1) # Each pixel's index\n uv_arr = px_arr*[2/w for w in aspect_ratio]-1\n\n uv_arr[:, :, 1] *= -1 # Each pixel's UV coords\n\n \"\"\" Convert the depth mask values into per-pixel world-space depth\n measurements using the provided clip plane distances.\n \"\"\"\n z_depth = depth[:]\n \"\"\" Determine vertical & horizontal FOV in radians.\n Use the UV coordinate values and tan(fov/2) to determine the 'XY' direction\n vector for each pixel.\n \"\"\"\n vfov = np.radians(fov_deg)\n #hfov = np.radians(fov_deg*aspect_ratio[0]/aspect_ratio[1])\n hfov = 2*math.atan(math.tan(vfov/2) * (aspect_ratio[0]/aspect_ratio[1]))\n tans = np.array([np.tan(fov/2) for fov in (hfov, vfov)])\n px_dir_vec = uv_arr * tans\n \"\"\" Add Z coordinate and scale to the pixel's known depth. \"\"\"\n const_zs = np.ones((px_dir_vec.shape[0:2])+(1,))\n px_dir_vec = np.concatenate((px_dir_vec, const_zs), axis=-1)\n camera_offsets = px_dir_vec * np.expand_dims(z_depth, axis=-1)\n return camera_offsets",
"def depth2normal(depths, inv_intrinsics, nei=3):\n batch, _, height, width = depths.shape\n pts_3d_map = compute_3dpts_batch(depths, inv_intrinsics)\n\n # Shift the 3d pts map by nei (Default:3pixel) along 8 directions\n pts_3d_map_ctr = pts_3d_map[:, :, nei:-nei, nei:-nei]\n pts_3d_map_x0 = pts_3d_map[:, :, nei:-nei, 0:-(2 * nei)]\n pts_3d_map_y0 = pts_3d_map[:, :, 0:-(2 * nei), nei:-nei]\n pts_3d_map_x1 = pts_3d_map[:, :, nei:-nei, 2 * nei:]\n pts_3d_map_y1 = pts_3d_map[:, :, 2 * nei:, nei:-nei]\n pts_3d_map_x0y0 = pts_3d_map[:, :, 0:-(2 * nei), 0:-(2 * nei)]\n pts_3d_map_x0y1 = pts_3d_map[:, :, 2 * nei:, 0:-(2 * nei)]\n pts_3d_map_x1y0 = pts_3d_map[:, :, 0:-(2 * nei), 2 * nei:]\n pts_3d_map_x1y1 = pts_3d_map[:, :, 2 * nei:, 2 * nei:]\n\n # Generate difference between the central pixel and one of 8 neighboring pixels\n diff_x0 = pts_3d_map_ctr - pts_3d_map_x0\n diff_x1 = pts_3d_map_ctr - pts_3d_map_x1\n diff_y0 = pts_3d_map_y0 - pts_3d_map_ctr\n diff_y1 = pts_3d_map_y1 - pts_3d_map_ctr\n diff_x0y0 = pts_3d_map_x0y0 - pts_3d_map_ctr\n diff_x0y1 = pts_3d_map_ctr - pts_3d_map_x0y1\n diff_x1y0 = pts_3d_map_x1y0 - pts_3d_map_ctr\n diff_x1y1 = pts_3d_map_ctr - pts_3d_map_x1y1\n\n # Flatten the diff to a #pixle by 3 matrix\n pix_num = batch * (width - 2 * nei) * (height - 2 * nei)\n diff_x0 = diff_x0.permute(0, 2, 3, 1).reshape(pix_num, 3)\n diff_y0 = diff_y0.permute(0, 2, 3, 1).reshape(pix_num, 3)\n diff_x1 = diff_x1.permute(0, 2, 3, 1).reshape(pix_num, 3)\n diff_y1 = diff_y1.permute(0, 2, 3, 1).reshape(pix_num, 3)\n\n diff_x0y0 = diff_x0y0.permute(0, 2, 3, 1).reshape(pix_num, 3)\n diff_x0y1 = diff_x0y1.permute(0, 2, 3, 1).reshape(pix_num, 3)\n diff_x1y0 = diff_x1y0.permute(0, 2, 3, 1).reshape(pix_num, 3)\n diff_x1y1 = diff_x1y1.permute(0, 2, 3, 1).reshape(pix_num, 3)\n\n # Calculate normal by cross product of two vectors\n normals0 = normalize_l2(torch.cross(diff_x1, diff_y1)).unsqueeze(0)\n normals1 = normalize_l2(torch.cross(diff_x0, diff_y0)).unsqueeze(0)\n normals2 = normalize_l2(torch.cross(diff_x0y1, diff_x0y0)).unsqueeze(0)\n normals3 = normalize_l2(torch.cross(diff_x1y0, diff_x1y1)).unsqueeze(0)\n\n normal_vector = torch.sum(\n torch.cat((normals0, normals1, normals2, normals3), 0), dim=0)\n normal_vector = normalize_l2(normal_vector)\n\n normal_map = normal_vector.reshape(batch, (height - 2 * nei), (width - 2 * nei), 3)\n\n normal_map = normal_map.permute(0, 3, 1, 2)\n normal_map = F.pad(normal_map, (nei, nei, nei, nei), \"constant\", 0)\n\n return normal_map",
"def depth_to_pc(self, depth):\n rows, cols = depth.shape\n c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)\n valid = (depth > 0) & (depth < 255)\n #z = np.where(valid, depth / 256.0, np.nan)\n #x = np.where(valid, z * (c - cx) / fx, 0)\n #y = np.where(valid, z * (r - cy) / fy, 0)\n z = np.where(valid, depth, np.nan)\n x = np.where(valid, z * (c - self.cx) / self.fx, 0)\n y = np.where(valid, z * (r - self.cy) / self.fy, 0)\n return np.float32(np.dstack((x, y, z)))",
"def depth_value_extraction(self, dmap_list, pts_list):\n\n updated_pts = [[] for i in range(len(pts_list))]\n pts_depth = [[] for i in range(len(pts_list))]\n\n for idx in range(len(pts_list[0])): # Check all matched points\n depth = np.zeros(len(pts_list))\n valid = True\n for i in range(len(pts_list)): # Check depth of current point in each view\n if pts_list[i][idx] != []:\n (u,v) = pts_list[i][idx]\n neighborhood = tools.get_neighborhood(round(u), round(v), self.depth_neighborhood_radius, dmap_list[i])\n nonzero = neighborhood[np.nonzero(neighborhood)]\n count = len(nonzero)\n if count > 0: # and (max(nonzero) - min(nonzero)) < 100:\n depth[i] = sorted(nonzero)[count//2] #Take median value\n else:\n valid = False\n break\n if valid: # If there is valid depth information in all views we keep the point\n for i in range(len(pts_list)):\n pts_depth[i].append(depth[i])\n updated_pts[i].append(pts_list[i][idx])\n\n return pts_depth, updated_pts",
"def depth_to_points(depth, camera_clipping_planes,\n camera_field_of_view, pos_dict, rotation, tilt):\n # Get local offset from the camera of each pixel\n #print (\"shape of depth image\", depth.shape)\n #print (\"min max of depth image\", np.amin(depth),np.amax(depth))\n local_pts = depth_to_local(depth, camera_clipping_planes, camera_field_of_view)\n # Convert to world space\n # Use rotation & tilt to calculate rotation matrix.\n #rot = Rotation.from_euler('yx', (rotation, tilt), degrees=True)\n rot = Rotation.from_euler('yx', (360 - rotation,360- tilt), degrees=True)\n #rot = Rotation.from_euler('yx', (rotation,360- tilt), degrees=True)\n pos_to_list = lambda x: [x['x'], x['y'], x['z']]\n pos = pos_to_list(pos_dict)\n # Apply rotation, offset by camera position to get global coords\n #global_pts = np.matmul(local_pts, rot.as_matrix()) + pos\n global_pts = np.matmul(local_pts, rot.as_dcm()) + pos\n # Flatten to a list of points\n flat_list_pts = global_pts.reshape(-1, global_pts.shape[-1])\n return flat_list_pts",
"def get_3D_coordinates(filename, show_each_frame = False):\n \n #Define a list of 2D coordinates you want to locate\n colour_image_pixels_to_locate_list = [[880,555], [1440,200]]\n \n #Start a kinect (NEED TO CONNECT A KINECT or run a recording in kinect studio to make this command work, even though we are reading saved depth values)\n kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Depth)\n\n #Do a bunch of defines required for matching the colour coordinates to their depth later\n color2depth_points_type = _DepthSpacePoint* np.int(1920 * 1080)\n color2depth_points = ctypes.cast(color2depth_points_type(), ctypes.POINTER(_DepthSpacePoint))\n S = 1080*1920\n TYPE_CameraSpacePointArray = PyKinectV2._CameraSpacePoint * S\n csps1 = TYPE_CameraSpacePointArray()\n \n #load your saved depth data\n depthdatafile = open(\"DEPTH.\" + filename + \".pickle\", \"rb\")\n \n #make list to store the 3D positions in\n pixel_positions_3D_list = []\n \n #Iterate over each saved frame of depth data\n depth_file_not_finished = True\n while depth_file_not_finished == True:\n try:\n depthframe = pickle.load(depthdatafile) #each call loads a sucessive frame from a pickle file, so we need to do this once per frame\n \n three_D_pixel_positions_in_frame =[] # list to store the 3D pixel positions from one frame\n \n #Defines to allow colour pixel mapping to 3D coords to work correctly \n ctypes_depth_frame = np.ctypeslib.as_ctypes(depthframe.flatten())\n L = depthframe.size\n kinect._mapper.MapColorFrameToCameraSpace(L, ctypes_depth_frame, S, csps1)\n \n #Carry out certain actions if you want an image of where all the tracked points are in the depth data (makes program 20x slower)\n if show_each_frame == True:\n \n #Note the method on the line below, for finding the corrsponding depth pixel of a single tracked pixel in the colour image, is NOT what I am using to find the 3D position of a colour pixel\n kinect._mapper.MapColorFrameToDepthSpace(ctypes.c_uint(512 * 424), ctypes_depth_frame, ctypes.c_uint(1920 * 1080), color2depth_points)\n \n cut_down_depth_frame = depthframe.astype(np.uint8)\n cut_down_depth_frame = np.reshape(cut_down_depth_frame, (424, 512))\n \n #Iterate over the lists of pixel positions in the 2D colour image to locate\n for pixel in colour_image_pixels_to_locate_list:\n \n #find x and y in pixel position in the 2D colour image\n x = pixel[0]\n y = pixel[1]\n \n #Find 3D position of each pixel (relative to camera) using Colour_to_camera method, all measurements (x, y and z) in m\n x_3D = csps1[y*1920 + x].x\n y_3D = csps1[y*1920 + x].y\n z_3D = csps1[y*1920 + x].z\n pixel_position_3D = [x_3D, y_3D, z_3D]\n \n #if show_each_frame flag set, display the depth data and corresponding points you are reading\n if show_each_frame == True:\n \n try:\n \n #method below finds 2D depth pixel that corresponds to a 2D colour pixel, for use in the pop up images, to show you what points you are tracking. While it could be used to find 3D joint positions, IT IS NOT THE METHOD I USE OR RECOMMEND FOR FINDING 3D JOINT POSITIONS, as it gives you x and y in pixels not m (z is in mm)\n read_pos = x+y*1920 -1\n depth_image_corresponding_x = int(color2depth_points[read_pos].x)\n depth_image_corresponding_y = int(color2depth_points[read_pos].y)\n \n #plot a circle at the pixel in the depth frame that matches the corresponding pixel in the image frame\n cv2.circle(cut_down_depth_frame, (depth_image_corresponding_x,depth_image_corresponding_y), 5, (255, 0, 255), -1)\n \n #note that the value below is NOT used in this code, included just for reference\n corresponding_depth = depthframe[((depth_image_corresponding_y * 512) + depth_image_corresponding_x)]\n \n except OverflowError:\n #the SDK returns infinity for the depth of some positions, so we need to handle that\n #I choose to not find the corresponding pixel in the depth image, and so dont plot a circle there, in this case\n pass\n \n #Display annotated depth image if flag is set\n if show_each_frame == True:\n cv2.imshow('KINECT Video Stream', cut_down_depth_frame)\n \n #code to close window if escape is pressed, doesnt do anything in this program (as we keep calling for data to be displayed in the window) but included for reference\n key = cv2.waitKey(1)\n if key == 27: \n pass\n \n #add 3D positions found in this frame to an intermediate list\n three_D_pixel_positions_in_frame.append(pixel_position_3D)\n \n #add per frame lists of 3D position into a results list\n pixel_positions_3D_list.append(three_D_pixel_positions_in_frame)\n \n #close loop at end of file\n except EOFError:\n cv2.destroyAllWindows()\n depth_file_not_finished = False\n \n #return 3D joint position lists \n return pixel_positions_3D_list",
"def obj_depth2pts(objID, depth, mask, camera, view_matrix):\n obj_depth = gen_obj_depth(objID, depth, mask)\n cam_pts = np.asarray(transforms.depth_to_point_cloud(camera.intrinsic_matrix, obj_depth))\n if len(cam_pts) == 0:\n return\n else:\n world_pts = transforms.transform_point3s(cam_view2pose(view_matrix), cam_pts)\n return world_pts",
"def depth_to_points(self, depth):\n uv = self.flattened_pixel_locations_to_uv(\n np.arange(self.width * self.height))\n points = self.batch_project_pixel_to_3d_ray(\n uv, depth=depth)\n return points.reshape(self.height, self.width, 3)",
"def depth_image_to_pointcloud(self, image, depth_image, camera_K):\n data = []\n \n # convert each pixel into the corresponding 3D point, setting $Z$ based on the depth image.\n\n # Get locations of valid depth image points\n error_value = ((2.**16)-2) / 512.\n # Trust depth only up to 20 feet away\n valid_indices = np.argwhere(depth_image < error_value) # (N, 2)\n\n # Turn to homogenous coordinates\n # Flip u,v to v, u, 1 \n h_uv = np.concatenate([valid_indices[:, 1][:, None], valid_indices[:, 0][:, None],\n np.ones(valid_indices.shape[0])[:, None]], axis=1) # (N, 3)\n\n # Project out to 3D\n points_3d = np.matmul(np.linalg.inv(camera_K), h_uv.T).T \n\n # scale each point so that z is the same as the depth image\n scale_factors = depth_image[valid_indices[:, 0], valid_indices[:, 1]] / points_3d[:, 2] \n cloud_points = points_3d * np.tile(scale_factors, (3, 1)).T\n\n # attach RGB\n rgb = image[valid_indices[:, 0], valid_indices[:, 1]] / 255.\n xyz_rgb = np.concatenate([cloud_points, rgb], axis=1)\n\n return xyz_rgb",
"def depths(mask, normals):\n width, height, three = normals.shape\n assert three == 3\n m = dok_matrix((width*height*2, width*height), dtype=float)\n b = np.zeros(width*height*2, dtype=float)\n log.debug('maximal shape: %s', m.shape)\n row = 0\n coords = ConsistentBimap()\n for x in range(width):\n for y in range(height):\n if not mask[x,y]: continue\n elif not (mask[x+1,y] and mask[x,y+1] and mask[x-1,y] and mask[x,y-1]):\n continue\n else:\n # n_z (z(x+1, y) - z(x, y)) = -n_x\n m[row, coords[(x+1,y)]] = 1\n m[row, coords[(x,y)]] = -1\n b[row] = normals[x,y,X]/normals[x,y,Z]\n row += 1\n\n # n_z (z(x, y+1) - z(x, y)) = -n_y\n m[row, coords[(x,y+1)]] = 1\n m[row, coords[(x,y)]] = -1\n b[row] = normals[x,y,Y]/normals[x,y,Z]\n row += 1\n\n # Now we know how many pixels are used and we restrict the matrix to the\n # rows needed.\n m_p = dok_matrix((row+1, coords.i), dtype=float)\n\n for (x,y), v in m.items():\n try:\n m_p[x,y] = v\n except Exception as e:\n log.error('error at (%s, %s)', x, y)\n raise\n # normalization\n m_p[row,0] = 1\n m_p = m_p.tocsr()\n b = b[:row+1]\n log.debug('actual shape: %s', m_p.shape)\n s = lsqr(m_p, b, atol=1e-3, btol=1e-6, show=True)\n z_p = s[0]\n z_p = normalize(z_p)\n z = np.zeros((width, height))\n for row,(x,y) in coords.r.items():\n z[x,y] = z_p[row]\n log.debug('z(0,0) = %s', z[0,0])\n return z",
"def unproject_depth_map(image, inv_depth_image, K, mask=None):\n\n xyz = np.zeros([0, 3], dtype=np.float64)\n rgb = np.zeros([0, 3], dtype=np.float64) # values should be within (0, 1)\n\n \"\"\" YOUR CODE STARTS HERE \"\"\"\n f_x = K[0, 0]\n f_y = K[1, 1]\n c_x = K[0, 2]\n c_y = K[1, 2]\n\n x = np.linspace(0, image.shape[1] - 1, image.shape[1]).astype(np.int)\n y = np.linspace(0, image.shape[0] - 1, image.shape[0]).astype(np.int)\n xx, yy = np.meshgrid(x, y)\n xx = (xx - c_x) / inv_depth_image / f_x\n yy = (yy - c_y) / inv_depth_image / f_y\n\n if mask is not None:\n xx = xx * mask\n yy = yy * mask\n inv_depth_image = 1/inv_depth_image * mask\n else:\n inv_depth_image = 1/inv_depth_image\n\n xyz = np.dstack((xx, yy, inv_depth_image))\n xyz = xyz.reshape(-1, 3)\n rgb = image.reshape(-1, 3)\n\n \"\"\" YOUR CODE ENDS HERE \"\"\"\n\n return xyz, rgb",
"def extent_3d(self):\n\n minxy, maxxy = self.extent_2d()\n cs = self.cs\n xyz0 = cs.xyz_from_oriented((minxy[0], minxy[1], 0.0))\n xyz1 = cs.xyz_from_oriented((maxxy[0], minxy[1], 0.0))\n xyz2 = cs.xyz_from_oriented((maxxy[0], maxxy[1], 0.0))\n xyz3 = cs.xyz_from_oriented((minxy[0], maxxy[1], 0.0))\n\n \"\"\"\n xyz0 = cs.xyz_from_oriented((self.x0, self.y0, 0.0))\n xyz1 = cs.xyz_from_oriented((self.x0 + (self.nx - 1) * self.dx,\n self.y0,\n 0.0))\n xyz2 = cs.xyz_from_oriented((self.x0 + (self.nx - 1) * self.dx,\n self.y0 + (self.ny - 1) * self.dy, 0.0))\n xyz3 = cs.xyz_from_oriented((self.x0,\n self.y0 + (self.ny - 1) * self.dy,\n 0.0))\n \"\"\"\n\n minxyz = (min(xyz0[0], xyz1[0], xyz2[0], xyz3[0]),\n min(xyz0[1], xyz1[1], xyz2[1], xyz3[1]),\n min(xyz0[2], xyz1[2], xyz2[2], xyz3[2]))\n maxxyz = (max(xyz0[0], xyz1[0], xyz2[0], xyz3[0]),\n max(xyz0[1], xyz1[1], xyz2[1], xyz3[1]),\n max(xyz0[2], xyz1[2], xyz2[2], xyz3[2]))\n\n return minxyz, maxxyz",
"def seafloor_grid(depths, lat, lon):",
"def ComputeDepth(self, *args):\n return _Select3D.Select3D_SensitiveCircle_ComputeDepth(self, *args)",
"def point_cloud_to_panorama(points, v_res=0.42, h_res=0.35, v_fov=(-24.9, 2.0),\n d_range=(0, 100), y_fudge=3, side_range=(-20., 20.),\n fwd_range=(0.,40), height_range=(-2, 0.4)):\n # side_range = (-30., 30.)\n # fwd_range = (0., 60)\n # height_range = (-2, 0.4) #\n xi_points = points[:, 0]\n yi_points = points[:, 1]\n zi_points = points[:, 2]\n reflectance = points[:, 3]\n\n f_filt = np.logical_and(\n (xi_points > fwd_range[0]), (xi_points < fwd_range[1]))\n s_filt = np.logical_and(\n (yi_points > -side_range[1]), (yi_points < -side_range[0]))\n filter = np.logical_and(f_filt, s_filt)\n z_filt = np.logical_and((zi_points >= height_range[0]),\n (zi_points < height_range[1]))\n zfilter = np.logical_and(filter, z_filt)\n indices = np.argwhere(zfilter).flatten()\n print 'indice size'\n print indices.size\n\n x_points = xi_points[indices]\n print 'xi_points'\n print x_points\n y_points = yi_points[indices]\n z_points = zi_points[indices]\n r_points = reflectance[indices]\n r_max = max(r_points)\n z_max = max(z_points)\n r_min = min(r_points)\n z_min = min(z_points)\n\n # Projecting to 2D\n # x_points = points[:, 0]\n # y_points = points[:, 1]\n # z_points = points[:, 2]\n # r_points = points[:, 3]\n\n # d_points = np.sqrt(x_points ** 2 + y_points ** 2) # map distance relative to origin\n # print 'd_points size', len(d_points)\n d_points = np.sqrt(x_points ** 2 + y_points ** 2 + z_points ** 2) # abs distance\n # d_points = r_points\n # d_points = z_points\n\n # d_points = np.zeros(indices.size)\n # for i in range(indices.size):\n # d_points[i] = z_points[i]\n\n # We use map distance, because otherwise it would not project onto a cylinder,\n # instead, it would map onto a segment of slice of a sphere.\n\n # RESOLUTION AND FIELD OF VIEW SETTINGS\n v_fov_total = -v_fov[0] + v_fov[1]\n\n # CONVERT TO RADIANS\n v_res_rad = v_res * (np.pi / 180)\n h_res_rad = h_res * (np.pi / 180)\n\n # MAPPING TO CYLINDER\n de_points = np.sqrt(x_points ** 2 + y_points ** 2)\n x_img = np.arctan2(y_points, x_points) / h_res_rad\n y_img = -(np.arctan2(z_points, de_points) / v_res_rad)\n\n # THEORETICAL MAX HEIGHT FOR IMAGE\n d_plane = (v_fov_total / v_res) / (v_fov_total * (np.pi / 180))\n h_below = d_plane * np.tan(-v_fov[0] * (np.pi / 180))\n h_above = d_plane * np.tan(v_fov[1] * (np.pi / 180))\n y_max = int(np.ceil(h_below + h_above + y_fudge))\n\n # SHIFT COORDINATES TO MAKE 0,0 THE MINIMUM\n x_min = -180.0 / h_res / 2\n x_img = np.trunc(-x_img - x_min).astype(np.int32)\n x_max = int(np.ceil(180.0 / h_res))\n\n y_min = -((v_fov[1] / v_res) + y_fudge)\n y_img = np.trunc(y_img - y_min).astype(np.int32)\n\n # CLIP DISTANCES\n d_points = np.clip(d_points, a_min=d_range[0], a_max=d_range[1])\n\n # CONVERT TO IMAGE ARRAY\n img = np.ones([y_max + 1, x_max + 1, 3], dtype=np.uint8)*255\n distance = np.sqrt(x_points ** 2 + y_points ** 2 + z_points ** 2)\n dis_max = max(distance)\n dis_min = min(distance)\n img[y_img, x_img, 0] = scale_to_255(distance, min=dis_min, max=dis_max)\n img[y_img, x_img, 1] = scale_to_255(z_points, min=z_min, max=z_max)\n img[y_img, x_img, 2] = scale_to_255(r_points, min=r_min, max=r_max)\n return img",
"def rgb_plane2rgb_world(imgDepth, camera_params):\n fx_rgb = camera_params['fx_rgb']\n fy_rgb = camera_params['fy_rgb']\n cx_rgb = camera_params['cx_rgb']\n cy_rgb = camera_params['cy_rgb']\n\n H, W = imgDepth.shape\n xx, yy = np.meshgrid(range(W), range(H))\n x3 = (xx - cx_rgb) * imgDepth / fx_rgb # здесь есть небольшие различия в значениях с матлабом\n y3 = (yy - cy_rgb) * imgDepth / fy_rgb\n z3 = imgDepth\n points3d = np.stack([x3, -y3, z3], axis=2)\n return points3d",
"def back_project_no_distortion(\n shot: pymap.Shot, pixel: List[float], depth: float\n) -> np.ndarray:\n K = shot.camera.get_K()\n K1 = np.linalg.inv(K)\n p = np.dot(K1, [pixel[0], pixel[1], 1])\n p *= depth / p[2]\n return shot.pose.transform_inverse(p)",
"def __get_projected_image_depth(self, projected_image, surface):\n image_depth = np.ones((self.height, self.width)) * np.inf\n p = self.orientation.dot(surface.edge_points3d[0] - self.position)\n n = self.orientation.dot(surface.normal)\n t = p.dot(n)\n\n for i in xrange(self.height):\n for j in xrange(self.width):\n if not np.allclose(projected_image[i, j], 0):\n d = np.array([j - self.half_width, i - self.half_height, self.focal])\n d /= np.linalg.norm(d)\n image_depth[i, j] = t / d.dot(n)\n\n return image_depth",
"def initial_superpixel_cluster(self, superpixel_center, superpixel_seed_index, pixels, space_map, norm_map):\n # Reshape depth from (3,3,1) into (3,3)\n shape = self.depth.shape[0:2]\n depth = self.depth.reshape(shape)\n mask1 = pixels != superpixel_seed_index\n mask2 = depth <= 0.05\n mask = mask1 | mask2\n\n [col, row] = np.meshgrid(\n np.arange(self.im_width), np.arange(self.im_height))\n col = np.ma.array(col, mask=mask) - superpixel_center[0]\n row = np.ma.array(row, mask=mask) - superpixel_center[1]\n\n diff = np.ma.multiply(col, col) + np.ma.multiply(row, row)\n max_dist = np.max(diff)\n\n # Reshape depth from mxn into (m-1)x(n-1)\n pixel_depths = depth[:-1, :-1][~mask[:-1, :-1]].reshape(-1, 1)\n valid_depth_num = pixel_depths.shape[0]\n pixel_positions = space_map[:-1, :-1][~mask[:-1, :-1]].reshape(-1, 3)\n pixel_norms = norm_map[~mask[:-1, :-1]].reshape(-1, 3)\n return pixel_depths, pixel_norms, pixel_positions, max_dist, valid_depth_num"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a valid spawn point. If the scene is not basic23 the valid spawn is same as valid target.
|
def valid_spawn(scene):
if scene == 'basic23':
r = np.random.rand()
if r < 1/5:
target_x = 4.2 - np.random.rand() * 1
target_y = 4.7 - np.random.rand() * 9
elif r < 2/5:
target_x = 10.2 - np.random.rand() * 1
target_y = 4.7 - np.random.rand() * 9
elif r < 3/5:
target_x = 16.2 - np.random.rand() * 1
target_y = 4.7 - np.random.rand() * 9
elif r < 4/5:
target_x = 21.1
target_y = 4.7 - np.random.rand() * 9
else:
target_x = 20.9 - np.random.rand() * 21.9
target_y = 1 - np.random.rand() * 2.6
return np.array([target_x, target_y])
else:
return valid_trgt(scene)
|
[
"def getSpawnPoint(self):\n return random.choice(self.spawnPoints)",
"def rdm_spawn(self):\n\n spawned = False #sprite hasn't spawned\n while not spawned:\n\n self.tile_y = random.randrange(1,14) #randomely sets y postion on grid\n self.tile_x = random.randrange(1,14) #randomely sets x postion on grid\n\n #check for wall\n if self.level.frame[self.tile_y][self.tile_x] != 'X':\n #check for another spawned sprite\n if self.level.frame[self.tile_y][self.tile_x] != 'O': \n\n #place 'spawned' token on grid\n self.level.frame[self.tile_y][self.tile_x] == 'O'\n\n self.y = self.tile_y * TILESIZE #set y position on screen\n self.x = self.tile_x * TILESIZE #set x position on screen\n\n spawned = True #ends loop",
"def _spawn_blocker(self, transform):\n # static object transform\n shift = 0.9\n x_ego = self._reference_waypoint.transform.location.x\n y_ego = self._reference_waypoint.transform.location.y\n x_cycle = transform.location.x\n y_cycle = transform.location.y\n x_static = x_ego + shift * (x_cycle - x_ego)\n y_static = y_ego + shift * (y_cycle - y_ego)\n\n spawn_point_wp = self.ego_vehicles[0].get_world().get_map().get_waypoint(transform.location)\n\n self.transform2 = carla.Transform(carla.Location(x_static, y_static,\n spawn_point_wp.transform.location.z + 0.3))\n\n static = CarlaActorPool.request_new_actor('static.prop.vendingmachine', self.transform2)\n static.set_simulate_physics(enabled=False)\n\n return static",
"def random_spawn(self):\n self.turtle = Turtle(self._instance_number)\n\n self.rand_pos()\n\n self.turtle.spawn(self.x_pos, self.y_pos, self.theta)\n print(self.turtle.get_name())",
"def spawn_tile(self):\n empty_squares = get_empty_squares(self.get_board())\n chosen_square = empty_squares[random.randint(0, len(empty_squares) - 1)]\n next_num = 2\n if random.random() < self.four_chance:\n next_num = 4\n self.get_board()[chosen_square[0]][chosen_square[1]] = next_num\n if len(empty_squares) == 1 and not has_valid_move(self.get_board()):\n return False\n return True;",
"def spawn_poison(self):\n \n valid = False\n \n #gives the new x-coordinate and y-coordinate of the new poison on the grid.\n while not valid:\n \n new_loc_x = random.randint(0, const['def_board'] - 1)\n \n new_loc_y = random.randint(0, const['def_board'] - 1)\n \n new_loc = [new_loc_x, new_loc_y]\n \n \n if not self.p_snake.pos.count(new_loc) > 0:\n \n valid = True\n \n self.pois_coord = new_loc",
"def spawn_monster(region_lvl, lvl):\n\n difficulty = np.random.choice(2, 1, p=[0.65, 0.35])[0]\n\n name = random.choice(list(gv.monster_name_dict[str(region_lvl)].keys()))\n\n monster_name = random.choice(gv.monster_name_dict[str(region_lvl)][name][str(difficulty + 1)]) + \" \" + name\n\n monster = Monster(name=monster_name, difficulty=difficulty, region_lvl=region_lvl, lvl=lvl)\n\n return monster",
"def _try_spawn_ego_vehicle_at(self, transform):\n print('try spawn ego vehicle at mai hai ham')\n\n vehicle = None\n # Check if ego position overlaps with surrounding vehicles\n overlap = False\n for idx, poly in self.vehicle_polygons[-1].items():\n poly_center = np.mean(poly, axis=0)\n ego_center = np.array([transform.location.x, transform.location.y])\n dis = np.linalg.norm(poly_center - ego_center)\n if dis > 8:\n continue\n else:\n overlap = True\n break\n\n if not overlap:\n vehicle = self.world.try_spawn_actor(self.ego_bp, transform)\n\n if vehicle is not None:\n self.ego = vehicle\n return True\n\n return False",
"def random_spawn_test(self):\n collection = []\n for i in range(self._instance_number):\n collection.append(Turtle(i))\n\n self.rand_pos()\n\n collection[i].spawn(self.x_pos, self.y_pos, self.theta)\n print(collection[i].get_name())",
"def material_spawn_test(material):\n physics_client = p.connect(p.DIRECT)\n particle_size = 0.2\n half_extents = 0.1\n spawned_particles = pi.spawn_material_block(\n [0, 0, 0], [1, 1, 1], material, {'particle size': particle_size})\n\n spawned_positions = set()\n for particles in spawned_particles:\n spawned_positions.add(tuple(particles.get_position()))\n\n expected_positions = set()\n for x in range(int(1/particle_size)):\n for y in range(int(1/particle_size)):\n for z in range(int(1/particle_size)):\n expected_positions.add(tuple(\n [x * particle_size+half_extents,\n y * particle_size+half_extents,\n z * particle_size+half_extents]))\n\n p.disconnect()\n return expected_positions == spawned_positions",
"def random_point(boundary):\n\tpass",
"def _try_spawn_random_walker_at(self, transform):\n print('try spawn random walker mai hai ham')\n walker_bp = random.choice(\n self.world.get_blueprint_library().filter('walker.*'))\n # set as not invencible\n if walker_bp.has_attribute('is_invincible'):\n walker_bp.set_attribute('is_invincible', 'false')\n walker_actor = self.world.try_spawn_actor(walker_bp, transform)\n\n if walker_actor is not None:\n walker_controller_bp = self.world.get_blueprint_library().find('controller.ai.walker')\n walker_controller_actor = self.world.spawn_actor(\n walker_controller_bp, carla.Transform(), walker_actor)\n # start walker\n walker_controller_actor.start()\n # set walk to random point\n walker_controller_actor.go_to_location(\n self.world.get_random_location_from_navigation())\n # random max speed\n # max speed between 1 and 2 (default is 1.4 m/s)\n walker_controller_actor.set_max_speed(1 + random.random())\n return True\n return False",
"def _spawn_actor(self, grid_world):\n new_id = grid_world.largest_id + 1\n # Brute force this, keep coming up with new random position until\n # finally adds successfuly.\n x_dim, y_dim = grid_world.grid.grid_dim\n rand_location = (random.randint(0, x_dim - 1),\n random.randint(0, y_dim - 1))\n actor = self.actor_producer(new_id, rand_location)\n while not grid_world.add_actor(actor, rand_location):\n rand_location = (random.randint(0, x_dim - 1),\n random.randint(0, y_dim - 1))\n actor = self.actor_producer(new_id, rand_location)",
"def spawn_vehicle(self) -> None:\n\n random.choice(self.access_points).generate()",
"def spawnEnemy(self):\n newUrchin = UrchinSprite()\n newUrchin.setCoordinates(self.coordinates[0], self.coordinates[1])\n newUrchin.setRandomDirection()\n BlackHoleSprite.chooseNextBlackHoleToSpawn()",
"def is_valid_position(self, piece):\r\n for (x, y) in piece.get_template():\r\n if x < 0 or x > 9 or y > 19 or \\\r\n (0 <= x <= 9 and 0 <= y <= 19 and self.grid[y][x]):\r\n return False\r\n return True",
"def spawn(self, pose_coordinates):\n pass",
"def spawn(self, tile_y, tile_x):\n self.tile_y = tile_y #y position on grid\n self.tile_x = tile_x #x position on grid\n self.level.frame[self.tile_y][self.tile_x] == 'O' #token for presence of spawned sprite\n self.y = self.tile_y * TILESIZE #y position on screen\n self.x = self.tile_x * TILESIZE #x position on screen",
"def generate_target(client, max_target_distance, scene=None, invalid_prob=0.0):\n\n is_valid = True\n if scene is not None:\n r = np.random.rand()\n if r < invalid_prob:\n target = invalid_trgt(scene)\n is_valid = False\n else:\n target = valid_trgt(scene)\n\n else:\n pos = client.simGetGroundTruthKinematics().position\n x = (2 * np.random.rand() - 1) * max_target_distance + pos.x_val\n y = (2 * np.random.rand() - 1) * max_target_distance + pos.y_val\n target = np.array([x, y])\n return target, is_valid"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a new goal target for the agent to reach. If scene string is None random position relative to current position of the UAV. Else random valid or invalid from predefined areas.
|
def generate_target(client, max_target_distance, scene=None, invalid_prob=0.0):
is_valid = True
if scene is not None:
r = np.random.rand()
if r < invalid_prob:
target = invalid_trgt(scene)
is_valid = False
else:
target = valid_trgt(scene)
else:
pos = client.simGetGroundTruthKinematics().position
x = (2 * np.random.rand() - 1) * max_target_distance + pos.x_val
y = (2 * np.random.rand() - 1) * max_target_distance + pos.y_val
target = np.array([x, y])
return target, is_valid
|
[
"def valid_spawn(scene):\n if scene == 'basic23':\n r = np.random.rand()\n if r < 1/5:\n target_x = 4.2 - np.random.rand() * 1\n target_y = 4.7 - np.random.rand() * 9\n elif r < 2/5:\n target_x = 10.2 - np.random.rand() * 1\n target_y = 4.7 - np.random.rand() * 9\n elif r < 3/5:\n target_x = 16.2 - np.random.rand() * 1\n target_y = 4.7 - np.random.rand() * 9\n elif r < 4/5:\n target_x = 21.1\n target_y = 4.7 - np.random.rand() * 9\n else:\n target_x = 20.9 - np.random.rand() * 21.9\n target_y = 1 - np.random.rand() * 2.6\n\n return np.array([target_x, target_y])\n else:\n return valid_trgt(scene)",
"def node_choose_enemy_target(caller, raw_string, **kwargs):\n text = \"Choose an enemy to target.\"\n action_dict = kwargs[\"action_dict\"]\n\n combathandler = _get_combathandler(caller)\n\n _, enemies = combathandler.get_sides(caller)\n\n options = [\n {\n \"desc\": target.get_display_name(caller),\n \"goto\": (\n _step_wizard,\n {**kwargs, **{\"action_dict\": {**action_dict, **{\"target\": target}}}},\n ),\n }\n for target in enemies\n ]\n options.extend(_get_default_wizard_options(caller, **kwargs))\n return text, options",
"def set_new_goal(self):\n rospy.loginfo(\"Defining a goal position...\")\n \n if(self.static_goal and self.testing and self.gentests):\n x_y = np.random.uniform(low = -0.1, high = 0.1, size = 2)\n z = np.random.uniform(low = -0.05, high = 0.1, size = 1)\n perturb = np.concatenate([x_y,z],axis=0)\n self.goal_pos = self.static_goal_pos\n self.goal_pos = self.goal_pos + perturb\n elif(self.static_goal and self.testing):\n self.goal_pos = self.static_goal_pos\n elif(self.static_goal):\n self.goal_pos = self.static_goal_pos\n elif(self.testing and self.gentests):\n while(True):\n x_y = np.random.uniform(low = -0.4, high = 0.4, size = 2)\n z = np.random.uniform(low = 0, high = 0.4, size = 1)\n self.goal_pos = np.concatenate([x_y,z],axis=0) \n if(np.linalg.norm(self.goal_pos)<0.4 and np.linalg.norm(self.goal_pos)>0.1):\n break\n else:\n index = np.random.randint(low=0,high=len(self.goal_pos_list)-1)\n self.goal_pos = self.goal_pos_list[index]\n \n sphere_pose = Pose()\n sphere_pose.position.x = self.goal_pos[0]\n sphere_pose.position.y = self.goal_pos[1]\n sphere_pose.position.z = self.goal_pos[2]\n self.config_request.model_state.pose = sphere_pose\n self.set_model_state(self.config_request)",
"def random_agent(board):\n n_legal = board.legal_moves.count()\n random_pick = random.randint(0,n_legal-1)\n cmd = str(list(board.legal_moves)[random_pick])\n cmd_AI = chess.Move.from_uci(cmd)\n print('###############\\nRandom AI moves:\\n',board.lan(cmd_AI))\n return cmd_AI",
"def handle_random_target(req):\n\n # Random Location selector\n rand_index = rand(0, 5)\n rand_x, rand_y = TARGET_POSE[rand_index]\n print(f'The Target Location is x: {rand_x}, y: {rand_y}')\n return RandomTargetResponse(rand_x, rand_y)",
"def SetGoal(self,Tgoal,randomize=True):\n print 'randomizing blocks, might take a couple of seconds...'\n if not randomize:\n self.Tgoal = Tgoal\n return\n \n with self.env:\n self.Tgoal = None\n while self.Tgoal is None:\n for gmodel in self.gmodels:\n gmodel.target.SetTransform(Tgoal)\n minextents = array([-0.1,-0.2,0])\n maxextents = array([0.2,0.2,0])\n invalidgrasp = False\n for igmodel in range(len(self.gmodels)-1,-1,-1):\n gmodel = self.gmodels[igmodel]\n target=gmodel.target\n T = eye(4)\n target.SetTransform(T)\n ab = target.ComputeAABB()\n while True:\n T = array(Tgoal)\n target.SetTransform(T)\n validgrasps,validindices=gmodel.computeValidGrasps(returnnum=1)\n if len(validgrasps) == 0:\n print 'no valid goal grasp for target %s'%gmodel.target\n invalidgrasp = True\n break\n\n T[0:3,3] += (maxextents-minextents)*random.rand(3)+minextents\n T[2,3] += 0.001-(ab.pos()[2]-ab.extents()[2])\n if linalg.norm(Tgoal[0:3,3]-T[0:3,3]) < 0.1:\n continue\n target.SetTransform(T)\n if not self.env.CheckCollision(target):\n # have to check all previously moved targets still maintain their grasps\n success = True\n for igmodel2 in range(igmodel,len(self.gmodels)):\n validgrasps,validindices=self.gmodels[igmodel2].computeValidGrasps(returnnum=1)\n if len(validgrasps) == 0:\n success = False\n break\n if success:\n break\n \n if not invalidgrasp:\n self.Tgoal = Tgoal",
"def randomTarget(longitude,\n latitude,\n altitude,\n target_label=None,\n coords_offset=0.0000,\n **kwds):\n\n color = randomColor()\n letter = random.choice(gs.LETTERS)\n params = {\n 'size': None,\n 'orientation': random.random() * 360,\n 'longitude': longitude,\n 'latitude': latitude,\n 'altitude': altitude,\n 'letter': letter,\n 'color': color,\n 'font_color': randomColor(ignore=color),\n }\n params.update(kwds)\n if target_label is None:\n target_label = WRG.next()\n target, _, extra_params, shape = TARGET_CLASSES[target_label]\n params.update(extra_params)\n\n return target(**params), target_label, gs.LETTERS.index(letter), shape",
"def node_choose_enemy_recipient(caller, raw_string, **kwargs):\n text = \"Choose an enemy as a recipient.\"\n action_dict = kwargs[\"action_dict\"]\n\n combathandler = _get_combathandler(caller)\n _, enemies = combathandler.get_sides(caller)\n\n options = [\n {\n \"desc\": target.get_display_name(caller),\n \"goto\": (\n _step_wizard,\n {**kwargs, **{\"action_dict\": {**action_dict, **{\"recipient\": target}}}},\n ),\n }\n for target in enemies\n ]\n options.extend(_get_default_wizard_options(caller, **kwargs))\n return text, options",
"def random_agent(env):\n start_state = AirplaneState(randint(0, env.grid_size), randint(0, env.grid_size),\n randint(10, env.max_height), 0, randint(0, 7), randint(env.min_speed, env.max_speed))\n goal_state = AirplaneState(randint(0, env.grid_size), randint(0, env.grid_size),\n randint(10, env.max_height), 0, randint(0, 7), randint(env.min_speed, env.max_speed))\n name = ''.join(choice(string.ascii_uppercase + string.digits) for _ in range(6))\n return AirplaneAgent(start_state, goal_state, name)",
"def create_target(self):\n random_target = random.randint(1 ,3)\n \n # TODO: Decide what type of target to create and append it to the list\n \n if random_target == 1:\n target = Orange_Target()\n target.velocity.dx = random.uniform(2,3)\n target.velocity.dy = random.uniform(-2, -3)\n target.center.y = random.uniform(500,700)\n \n elif random_target ==2:\n target = Number_Target()\n target.velocity.dx = random.uniform(1,1.5)\n target.velocity.dy = random.uniform(-1, -1.5)\n target.center.y = random.uniform(500,700)\n \n elif random_target == 3:\n target = Blue_Target()\n target.velocity.dx = random.uniform(2,3)\n target.velocity.dy = random.uniform(-2, -3)\n target.center.y = random.uniform(500,700)\n\n self.targets.append(target)",
"def get_adventure(self, *args, **kwargs) -> str:\n\n try:\n actor: str = args[0]\n except IndexError:\n actor = \"TP\"\n\n if actor is None:\n actor = \"TP\"\n\n story: str = random.choice(self._adventures)\n\n story = story.replace(\"<ACTOR>\", actor)\n\n return story",
"def generate_random_node(self):\n if np.random.random_sample() > self.goal_sample_rate:\n x = np.random.uniform(self.sample_space.x_min, self.sample_space.x_max)\n y = np.random.uniform(self.sample_space.y_min, self.sample_space.y_max)\n z = np.random.uniform(self.sample_space.z_min, self.sample_space.z_max)\n node = Node(np.array((x, y, z)))\n else:\n node = self.goal_node\n\n return node",
"def move_to(self):\n #self.find_wall()\n \n t = self.find_best_way()\n if t:\n click(t)\n else:\n click(random.choice(locations))",
"def do_move(self, world, friendly_unit, enemy_units):\n\n initial_translation = 11\n nums_turns_since_kill = 20\n print(self.outbound)\n\n # Initialize initial quadrant\n if self.turn_count == 0:\n if friendly_unit.position == (3, 3):\n self.initial_quadrant = 1\n if friendly_unit.position == (26, 3):\n self.initial_quadrant = 2\n if friendly_unit.position == (3, 26):\n self.initial_quadrant = 3\n if friendly_unit.position == (26, 26):\n self.initial_quadrant = 4\n\n self.turn_count += 1\n\n # if unit is dead, stop making moves.\n if friendly_unit.status == 'DISABLED':\n print(\"Turn {0}: Disabled - skipping move.\".format(str(self.turn_count)))\n self.target = None\n self.outbound = True\n return\n\n # HARD CODE THE INITIAL TURNS TO GET A GOOD POSITION\n if self.turn_count <= 4*(initial_translation - 1):\n if self.initial_quadrant == 1:\n self.outbound = True\n self.target = world.position_to_tile_map[(3, 3 + initial_translation)]\n if self.turn_count == initial_translation:\n self.initial_ai_moves += 1\n elif self.initial_ai_moves == 1:\n self.target = world.position_to_tile_map[(3 + initial_translation, 3 + initial_translation)]\n if self.turn_count == (2 * initial_translation)-1:\n self.initial_ai_moves += 1\n elif self.initial_ai_moves == 2:\n self.outbound = False\n self.target = world.util.get_closest_friendly_territory_from(friendly_unit.position,\n friendly_unit.snake)\n if self.turn_count == (4 * initial_translation)-1:\n self.initial_ai_moves += 1\n\n if self.initial_quadrant == 2:\n self.outbound = True\n self.target = world.position_to_tile_map[(26, 3 + initial_translation)]\n if self.turn_count == initial_translation:\n self.initial_ai_moves += 1\n elif self.initial_ai_moves == 1:\n self.target = world.position_to_tile_map[(26 - initial_translation, 3 + initial_translation)]\n if self.turn_count == (2 * initial_translation)-1:\n self.initial_ai_moves += 1\n elif self.initial_ai_moves == 2:\n self.outbound = False\n self.target = world.util.get_closest_friendly_territory_from(friendly_unit.position,\n friendly_unit.snake)\n if self.turn_count == (4 * initial_translation)-1:\n self.initial_ai_moves += 1\n\n if self.initial_quadrant == 3:\n self.outbound = True\n self.target = world.position_to_tile_map[(3, 26 - initial_translation)]\n if self.turn_count == initial_translation:\n self.initial_ai_moves += 1\n elif self.initial_ai_moves == 1:\n self.target = world.position_to_tile_map[(3 + initial_translation, 26 - initial_translation)]\n if self.turn_count == (2 * initial_translation) - 1:\n self.initial_ai_moves += 1\n elif self.initial_ai_moves == 2:\n self.outbound = False\n self.target = world.util.get_closest_friendly_territory_from(friendly_unit.position,\n friendly_unit.snake)\n if self.turn_count == (4 * initial_translation) - 1:\n self.initial_ai_moves += 1\n\n if self.initial_quadrant == 4:\n self.outbound = True\n self.target = world.position_to_tile_map[(26 - initial_translation, 26)]\n if self.turn_count == initial_translation:\n self.initial_ai_moves += 1\n elif self.initial_ai_moves == 1:\n self.target = world.position_to_tile_map[(26 - initial_translation, 26 - initial_translation)]\n if self.turn_count == (2 * initial_translation)-1:\n self.initial_ai_moves += 1\n elif self.initial_ai_moves == 2:\n self.outbound = False\n self.target = world.util.get_closest_friendly_territory_from(friendly_unit.position,\n friendly_unit.snake)\n if self.turn_count == (4 * initial_translation)-1:\n self.initial_ai_moves += 1\n\n else:\n print(self.turns_since_kill)\n self.turns_since_kill += 1\n # LOOK FOR KILL, WHEN KILL RETURN HOME, IF NO KILL, THEN NEVER GO HOME ):\n if self.has_killed is False:\n # Just assume killed and run away\n if self.turns_since_kill == nums_turns_since_kill - 1:\n self.has_killed = True\n self.turns_since_kill = 0\n self.outbound = False\n # If there arent any bodies to take, go to closest territory\n if world.util.get_closest_enemy_body_from(friendly_unit.position, None) is None:\n self.outbound = True\n self.target = world.util.get_closest_enemy_territory_from(friendly_unit.position,\n friendly_unit.snake)\n else:\n self.outbound = True\n self.target = world.util.get_closest_enemy_body_from(friendly_unit.position, friendly_unit.snake)\n\n else:\n self.target = world.util.get_closest_friendly_territory_from(friendly_unit.position, None)\n if friendly_unit.position in friendly_unit.territory:\n self.has_killed = False\n self.outbound = True\n self.outbound = False\n\n # if unit reaches the target point, reverse outbound boolean and set target back to None\n # if self.target is not None and friendly_unit.position == self.target.position:\n # self.outbound = not self.outbound\n # self.target = None\n #\n\n # # if outbound and no target set, set target as the closest capturable tile at least 1 tile away\n # from your territory's edge.\n if self.outbound and self.target is None:\n self.target = world.position_to_tile_map((14, 14))\n #\n # # else if inbound and no target set, set target as the closest friendly tile\n elif not self.outbound and self.target is None:\n self.target = world.util.get_closest_friendly_territory_from(friendly_unit.position, None)\n #\n # # set next move as the next point in the path to target\n # Construct the set to avoid\n avoid = set()\n avoid.union(friendly_unit.territory)\n avoid.union(friendly_unit.snake)\n # If going outbound, try to avoid own territory\n if self.outbound:\n next_move = world.path.get_shortest_path(friendly_unit.position, self.target.position, avoid)[0]\n else:\n next_move = world.path.get_shortest_path(friendly_unit.position, self.target.position,\n friendly_unit.snake)[0]\n\n # move!\n friendly_unit.move(next_move)\n self.units_from_territory_edge += 1\n print(\"Turn {0}: currently at {1}, making {2} move to {3}.\".format(\n str(self.turn_count),\n str(friendly_unit.position),\n 'outbound' if self.outbound else 'inbound',\n str(self.target.position)\n ))",
"def move_target(self):\n old_loc = self.target\n new_locations = TARGET_MOVE_VECTORS + np.array(self.target)\n np.random.shuffle(new_locations)\n new_locations = list(map(tuple, new_locations))\n for new_loc in new_locations:\n if self.in_bounds(new_loc):\n self.target = new_loc\n self.last_move = (old_loc, new_loc)\n debug_print(f'[LANDSCAPE]: Target moved: {self.last_move}', 7)\n break",
"def set_goal(self):\n rospy.logdebug(\"Setting goal\")\n\n # Create goal:\n goal = MoveBaseGoal()\n\n # Set random goal:\n goal.target_pose.header.frame_id = \"map\"\n goal.target_pose.header.stamp = rospy.Time.now()\n goal.target_pose.pose.position.x = self.x\n goal.target_pose.pose.position.y = self.y\n goal.target_pose.pose.orientation.w = 1.0\n rospy.logdebug(f\"goal: {goal.target_pose.pose.position.x, goal.target_pose.pose.position.y}\")\n self.move_base.send_goal(goal, self.goal_status)",
"def place_agent(self):\n if self.initial_position is None:\n indx = np.random.randint(0, len(self.possible_locations))\n self.position = self.possible_locations[indx]\n else:\n self.position = np.copy(self.initial_position)",
"def target(self,p,target_option):\n log(\"MState target\",3)\n if not self.time == \"Night\":\n log(\"{} couldn't target {}: Not Night\".format(p,target_option))\n return False\n\n # Check if the player is represented as an object or a string\n try:\n player = self.getPlayer(p)\n except Exception as e:\n log(\"Couldn't find target from {}: {}\".format(p,e))\n return False\n try:\n target_number = ord(target_option)-ord('A')\n if target_number == len(self.players):\n target = self.null\n elif target_number == None:\n target = None\n else:\n target = self.players[target_number]\n player.target = target\n except Exception as e:\n log(\"{} failed to target {}: {}\".format(player.id, target_option, e))\n return False\n\n if player.role == \"MILKY\" and player.target == player:\n self.mainComm.send(\"Ewwww please don't milk yourself in front of me\", player.id)\n player.target = None\n return True\n\t\t\t\n self.mainComm.send(\"It is done, targeted {}\".format(target_option),player.id)\n\n if type(target) == Player:\n target_id = target.id\n target_role = target.role\n else:\n target_id = \"_\"\n target_role = \"_\"\n\n self.record(' '.join([\"TARGET\",player.id,player.role,target_id,target_role]))\n # Check if Night is over\n self.__checkToDay()\n return True",
"def pickNewTarget(consoleip):\n #pick a random console and random control from that console\n targetconsole = random.choice(players)\n targetsetup = currentsetup[targetconsole]\n targetctrlid = random.choice(targetsetup['controls'].keys())\n targetcontrol = targetsetup['controls'][targetctrlid]\n targetname = targetcontrol['name']\n targetdef = targetcontrol['definition']\n targettimeout = currenttimeout\n if 'scalefactor' in targetdef:\n targettimeout *= targetdef['scalefactor']\n targetinstruction = ''\n #pick a new target based on the control type and current value\n ctrltype = targetcontrol['type']\n if 'value' in targetdef:\n curval = targetdef['value']\n else:\n curval=''\n if ctrltype == 'button':\n targetval=1\n targetinstruction = controls.getButtonAction(targetname)\n elif ctrltype == 'toggle':\n if curval == 0:\n targetval=1\n else:\n targetval=0\n targetinstruction = controls.getToggleAction(targetname, targetval)\n elif ctrltype == 'selector':\n targetrange = range(targetdef['min'],targetdef['max']+1)\n targetval = getChoice(targetrange, curval)\n targetinstruction = controls.getSelectorAction(targetname, targetrange, targetval, curval)\n elif ctrltype == 'colour':\n targetrange = targetdef['values']\n targetval = getChoice(targetrange, curval)\n targetinstruction = controls.getColourAction(targetname, targetval)\n elif ctrltype in ['words', 'verbs']:\n targetrange = targetdef['pool']\n targetval=getChoice(targetrange, curval)\n if 'list' in targetdef:\n if targetdef['list']=='passwd':\n targetinstruction = controls.getPasswdAction(targetname, targetval)\n elif targetdef['list']=='verbs' or ctrltype == 'verbs':\n targetinstruction = controls.getVerbListAction(targetname, targetval)\n elif ctrltype == 'verbs':\n targetinstruction = controls.getVerbListAction(targetname, targetval)\n if targetinstruction=='':\n targetinstruction = controls.getWordAction(targetname, targetval)\n elif ctrltype == 'pin':\n finished=False\n while not finished:\n newpin=''\n for i in range(4):\n newpin += str(random.choice(range(10)))\n if newpin != curval:\n finished=True\n targetval=newpin\n targetinstruction = controls.getPinAction(targetname, targetval)\n else:\n print(\"Unhandled type: \" + ctrltype)\n #Now we have targetval and targetinstruction for this consoleip, store and publish it\n console[consoleip]['instructions']=targetinstruction\n console[consoleip]['target']={\"console\": targetconsole, \"control\": targetctrlid, \"value\": targetval, \"timestamp\": time.time(), \"timeout\": targettimeout}\n print(\"Instruction: \" + consoleip + '/' + targetctrlid + ' - ' + ctrltype + ' (was ' + str(curval) + ') ' + str(targetinstruction))\n #update game stats\n playerstats[consoleip]['instructions']['total'] += 1\n playerstats[targetconsole]['targets']['total'] += 1\n #publish!\n client.publish('clients/' + consoleip + '/timeout', str(targettimeout))\n client.publish('clients/' + consoleip + '/instructions', str(targetinstruction))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate an SPNNeuron class with the given number of gaussians.
|
def create_spn_neuron(n_gaussians: int):
class SPNNeuron(nn.Module):
def __init__(self, in_features):
"""
Initialize the SPNNeuron.
Args:
in_features: Number of input features.
n_mv: Number of different pairwise independence mixtures of the leaf nodes.
"""
# Init
super(SPNNeuron, self).__init__()
# Create random sequence of scopes
scopes = np.random.permutation(in_features)
sums = []
# For two consecutive (random) scopes
for i in range(0, in_features, 2):
scope_1 = scopes[i]
scope_2 = scopes[i + 1]
# Create n_mv MultivariateGaussian from these two scopes
mvs = []
for _ in range(n_gaussians):
# TODO: MVG are currently not trainable
# mv = MultivariateGaussian(n_vars=2, scope=[scope_1, scope_2])
# mvs.append(mv)
g1 = GaussianNode(scope=scope_1)
g2 = GaussianNode(scope=scope_2)
prod = ProductNode([g1, g2])
mvs.append(prod)
sumnode = SumNode(children=mvs)
sums.append(sumnode)
self.root = ProductNode(children=sums)
def forward(self, x):
x = self.root(x)
return x
return SPNNeuron
|
[
"def create_network(self, neurons_input=1, neurons_hidden=0):\n\t\t\n\t\tself.rate = 0.01\t#Learning rate\n\t\tself.weights_input = []\n\t\tself.weights_hidden = []\n\t\tself.weights_output = []\n\t\tself.neurons_input = neurons_input\n\t\tself.neurons_hidden = neurons_hidden\n\n\t\tif neurons_input > 1:\n\t\t\tneurons_output = 1\n\t\telse:\n\t\t\tneurons_output = 0\n\t\tself.neurons_output = neurons_output\n\n\t\t# set random starting weights\n\t\tfor i in range(neurons_input):\n\t\t\tself.weights_input.append(randint(-1,1))\n\t\tfor i in range(neurons_hidden):\n\t\t\tfor j in range(neurons_input*neurons_hidden):\n\t\t\t\tself.weights_hidden.append(randint(-1,1))\n\t\tfor i in range(neurons_output):\n\t\t\tfor j in range(neurons_hidden):\n\t\t\t\tself.weights_output.append(randint(-1,1))",
"def generate_class_b(num_samples=10):\n\n class_b = []\n for i in range(0, num_samples):\n random_item = (random.normalvariate(0.0, 0.5),\n random.normalvariate(-0.5, 0.5),\n -1.0)\n class_b.append(random_item)\n\n return class_b",
"def createNeurons(self, verbose=False):\n\t\t# Depending on the set_reduction_method, we use different algorithms to calculate prototypes\n\t\tif self.set_reduction_method == \"means\":\n\t\t\tprint(\"Calculating centers for Gaussian function by means...\")\n\t\t\tself.prototypes = Cluster.byMeans(self.training_set, number_of_clusters=self.cluster_count,\n\t\t\t\t\t\t\t\t\t\t\t class_header=self.class_header, verbosity=0)\n\t\telif self.set_reduction_method == \"medoids\":\n\t\t\tprint(\"Calculating centers for Gaussian function by medoids...\")\n\t\t\tself.prototypes = Cluster.byMedoids(self.training_set, self.cluster_count, self.class_header, verbosity=0)\n\n\t\telif self.set_reduction_method == \"condensed\":\n\t\t\tprint(\"Calculating centers for Gaussian function using condensed nearest neighbor...\")\n\t\t\tself.prototypes = NearestNeighbor.condensedNearestNeighbor(self.training_set, self.class_header)\n\n\t\telse:\n\t\t\tprint(\"'%s' is an invalid set reduction method, please check it and try again.\" % self.set_reduction_method)\n\t\t\tsys.exit()\n\n\t\tif not self.regression:\n\t\t\tprint(\"Generating output layer of size %d with sigmoid activation functions...\" % self.output_count) if verbose else None\n\t\t\tself.output_layer = FFNetwork(len(self.prototypes),\n\t\t\t\t\t\t\t\t\t\t [self.output_count, 'sigmoid'],\n\t\t\t\t\t\t\t\t\t\t self.training_set,\n\t\t\t\t\t\t\t\t\t\t class_header=self.class_header,\n\t\t\t\t\t\t\t\t\t\t learning_rate=self.learning_rate,\n\t\t\t\t\t\t\t\t\t\t use_momentum=self.use_momentum,\n\t\t\t\t\t\t\t\t\t\t regression=self.regression)\n\t\telse:\n\t\t\tprint(\"Generating output layer with a single linear activation function for regression...\") if verbose else None\n\t\t\tself.output_layer = FFNetwork(len(self.prototypes),\n\t\t\t\t\t\t\t\t\t\t [self.output_count, 'linear'],\n\t\t\t\t\t\t\t\t\t\t self.training_set,\n\t\t\t\t\t\t\t\t\t\t class_header=self.class_header,\n\t\t\t\t\t\t\t\t\t\t learning_rate=self.learning_rate,\n\t\t\t\t\t\t\t\t\t\t use_momentum=self.use_momentum,\n\t\t\t\t\t\t\t\t\t\t regression=self.regression)\n\n\t\tprint(\"Generating widths for basis functions using nearest neighbor proximity...\") if verbose else None\n\t\tsigma_list = self.findSigma()\n\n\t\t# for every point in prototype list, create a neuron and store that point and sigma in said neuron\n\t\tprint(\"Generating layer of Gaussian basis functions of size %d...\" % len(self.prototypes)) if verbose else None\n\t\tfor i in range(len(self.prototypes)):\n\t\t\tself.function_layer.append(RBFNeuron(self.prototypes.iloc[i], sigma_list[i], self.class_header))\n\n\t\tprint(\"\\nTRAINING NEURONS ON TRAINING DATA OF %d ENTRIES\" % len(self.training_set)) if verbose else None\n\t\tself.training_set.apply(lambda row: self.train(row), axis=1)",
"def increment_classes(self, n=10):\n\n in_features = self.net.fc.in_features # size of each input sample\n out_features = self.net.fc.out_features # size of each output sample\n weight = self.net.fc.weight.data\n\n self.net.fc = nn.Linear(in_features, out_features+n)\n self.net.fc.weight.data[:out_features] = weight",
"def n_neuron(self):\n pass",
"def generate_random_particle(_id, input_size, neurons):\n position = []\n speed = []\n n_neurons = sum(neurons)\n n_weights = input_size * neurons[0]\n for i in range(len(neurons) - 1):\n n_weights = n_weights + neurons[i]*neurons[i+1] \n total_n_values = n_weights + (2* n_neurons) # give the PSO the possibility to select the activation functions and bias, subtract one if the activation function is not needed for the last neuron \n position = 2 * rand.random_sample(total_n_values) - 1\n speed = np.zeros(total_n_values)\n return Particle(_id, position, speed, n_weights, n_neurons)",
"def createNN():\r\n \r\n nn = createNearestNeighbor(allData,k=5,metric=\"Cosine\")\r\n g = Network()\r\n for cancer in range(len(cancerNames)):\r\n for nodeNumber in range(startingPositions[cancer],startingPositions[cancer+1]):\r\n g.add_node(nodeNumber,color=colors[cancer])\r\n\r\n for i in range(nn.shape[0]):\r\n for j in range(nn.shape[1]):\r\n if(nn[i][j]):\r\n g.add_edge(i,j)\r\n\r\n g.show(\"basic.html\")",
"def gan_target(n):\r\n generator_fake = np.ones((n,1))\r\n generator_real = np.zeros((n,1))\r\n discriminator_fake = np.zeros((n,1))\r\n discriminator_real = np.ones((n,1))\r\n return [generator_fake,generator_real,discriminator_fake,discriminator_real]",
"def generate_class_a(num_samples=5):\n\n class_a = []\n\n for i in range(0, num_samples):\n random_item_one = (random.normalvariate(-1.5, 1),\n random.normalvariate(0.5, 1),\n 1.0)\n random_item_two = (random.normalvariate(1.5, 1),\n random.normalvariate(0.5, 1),\n 1.0)\n class_a.append(random_item_one)\n class_a.append(random_item_two)\n\n return class_a",
"def gen_noise_param(n):\n #for now just have stdev=1 for every node\n return np.ones(n)*0.1",
"def __init__(self,rate):\r\n self.numInputs = 784\r\n self.rate = rate\r\n self.bias = random.random() - 0.5\r\n self.weights = numpy.random.random(self.numInputs) - 0.5",
"def create(initSampleCount=..., initSeedCount=..., pointDistribution=...) -> retval:\n ...",
"def make_spawner(self, num_inputs):\n # Add input instructions.\n input_instrs = [InputInstruction(i) for i in range(num_inputs)]\n all_atom_gens = self.atom_generators + input_instrs\n # Create spawner\n if self.keep_linear:\n self.spawner = LinearSpawner(all_atom_gens)\n else:\n self.spawner = Spawner(all_atom_gens)\n if self.verbose > 1:\n print('Creating Spawner with following atom generators:')\n print(self.spawner.atom_generators)",
"def generate_MultiRing(nClass, nSamples):\r\n # imports\r\n c = nClass\r\n n = nSamples\r\n # Determine class labels\r\n tau = np.linspace(0, 1, c+1)\r\n labels = np.zeros((1, nSamples))\r\n # Uniform distribution to randomly assign class labels\r\n dist = np.random.uniform(0, 1, (1, nSamples))\r\n # Distribute class labels\r\n for j in range(c):\r\n l_idx = np.where((tau[j] <= dist[0, :]) & (dist[0, :] < tau[j+1]))[0]\r\n temp = np.multiply(j, np.ones((1, len(l_idx))))\r\n labels[0, l_idx] = temp\r\n # Generate parameters of Gamma distribution\r\n a = np.power(np.linspace(1, c, c), 3)\r\n b = np.multiply(2, np.ones((1, c)))\r\n # Generate angle from uniform distribution\r\n angle = np.multiply(2*math.pi, np.random.uniform(0, 1, (1, n)))\r\n radius = np.zeros((1, n))\r\n for j in range(c):\r\n l_idx = np.where(labels[0, :] == j)[0]\r\n radius[0, l_idx] = np.random.gamma(a[j], b[0, j], len(l_idx))\r\n # Generate data samples\r\n data = np.zeros((2, n))\r\n data[0, :] = np.multiply(radius, np.cos(angle))\r\n data[1, :] = np.multiply(radius, np.sin(angle))\r\n\r\n # Plotting\r\n plotting.plot_Data(data, labels, c)\r\n\r\n return data, labels",
"def g05_graph(n, seed=0):\n\n graph = nx.gnp_random_graph(n, 0.5, seed)\n\n return graph",
"def makeG(self):\n count = 0\n count1 = 0\n \n #changed here\n self.previousNeuronStatusList=[False for i in range(len(self.neuronCheckList))]\n \n self.win=GraphWin(\"Neurons\",self.l,self.h,autoflush=False)\n\n for ob in self.constructList:\n if ob not in self.storageList[0]:\n count1 += 1\n a = ob\n if not isinstance(a,list):\n a = [a]\n GraphicObjectType = 0\n if len(a) != 1:\n GraphicObjectType = 2\n for i in range(len(a)):\n count += 1\n self.storageList[0].append(a[i])\n if(GraphicObjectType==0):\n r = 5\n else:\n r = 3\n circ = Circle(self.assignLoc(count, count1, GraphicObjectType, len(a), i),r)\n self.storageList[1].append(circ)\n for circ in self.storageList[1]:\n circ.setFill(\"black\")\n circ.draw(self.win)\n for i in range(len(self.synapseConstList)):\n a=self.synapseConstList[i]\n pre = a.pre\n post = a.post\n preCoords = self.storageList[1][self.storageList[0].index(pre)].getCenter()\n postCoords = self.storageList[1][self.storageList[0].index(post)].getCenter()\n l = Line(preCoords,postCoords)\n if(a.weight<0):\n l.setFill(\"blue\")\n else:\n l.setFill(\"green\")\n l.draw(self.win)\n self.win.update()",
"def create_population(self, count):\n\n pop = []\n for _ in range(count):\n # Create a random network.\n network = Network(self.nn_param_choices)\n network.create_random()\n\n # Add the network to our population.\n pop.append(network)\n\n return pop",
"def star_neuron(wing_number=3,\n node_on_each_wings=4,\n spherical=False,\n length=10):\n nodes_list = []\n root = Node()\n root.r = 1.\n root.node_type = 1\n root.xyz = np.array([0, 0, 0], dtype=float)\n nodes_list.append(root)\n\n for i in range(0):\n soma = Node()\n soma.r = .2\n soma.node_type = 1\n soma.xyz = np.array([0, 0, 0], dtype=float)\n nodes_list.append(soma)\n root.add_child(soma)\n soma.parent = root\n\n angle = 2 * np.pi/wing_number\n for j in range(wing_number):\n rand_vec = np.random.randn(3)\n rand_vec = rand_vec/np.sqrt(sum(rand_vec**2))\n for i in range(node_on_each_wings):\n node = Node()\n node.r = .2\n node.node_type = 2\n if spherical:\n x = rand_vec[0] * length * (i+1)\n y = rand_vec[1] * length * (i+1)\n z = rand_vec[2] * length * (i+1)\n else:\n x = np.sin(j*angle) * length * (i+1)\n y = np.cos(j*angle) * length * (i+1)\n z = 0.\n node.xyz = np.array([x, y, z], dtype=float) # +0*np.random.rand(3)\n if i == 0:\n root.add_child(node)\n node.parent = root\n nodes_list.append(node)\n else:\n nodes_list[-1:][0].add_child(node)\n node.parent = nodes_list[-1:][0]\n nodes_list.append(node)\n neuron = Neuron(input_format='only list of nodes', input_file=nodes_list)\n return neuron",
"def generate_gaussian(num_examples=100):\n\n # Set parameters for each Gaussian in the mixture\n gaussians = [\n ([0.0, .00], .02 * np.eye(2), 200),\n ([.35, .55], .03 * np.eye(2), 200),\n ([0.0, 1.2], .04 * np.eye(2), 200),\n ([-1., 1.4], 1.0 * np.eye(2), 400),\n ]\n\n # Generate dataset\n examples = []\n for class_idx, (mu, sigma, count) in enumerate(gaussians):\n # Sample class from Gaussian\n class_examples = np.random.multivariate_normal(mu, sigma, count)\n\n # Add each example to the list\n n_labeled = 0\n for x in class_examples:\n x_dict = {'x_{}'.format(i+1): x_i for i, x_i in enumerate(x)}\n\n # Only label MAX_LABELED per class\n if n_labeled < MAX_LABELED:\n x_dict['z'] = class_idx\n n_labeled += 1\n else:\n x_dict['z'] = UNLABELED\n\n examples.append(x_dict)\n\n random.shuffle(examples)\n\n df = pd.DataFrame(examples)\n\n return df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the model for a given tag.
|
def get_model_by_tag(tag: str, device) -> nn.Module:
# Select model
if tag.lower() == "spn":
model = SPNNet(
in_features=28 * 28 * ARGS.n_labels,
n_labels=ARGS.n_labels,
n_mv=ARGS.n_gaussians,
).to(device)
else:
raise Exception("Invalid network: %s" % tag)
return model
|
[
"def get_model_by_tag(\n model_name: str,\n tag_name: str,\n tag_value: str,\n aml_workspace: Workspace = None\n) -> AMLModel:\n try:\n # Validate params. cannot be None.\n if model_name is None:\n raise ValueError(\"model_name[:str] is required\")\n if tag_name is None:\n raise ValueError(\"tag_name[:str] is required\")\n if tag_value is None:\n raise ValueError(\"tag[:str] is required\")\n if aml_workspace is None:\n aml_workspace = get_current_workspace()\n\n # get model by tag.\n model_list = AMLModel.list(\n aml_workspace, name=model_name,\n tags=[[tag_name, tag_value]], latest=True\n )\n\n # latest should only return 1 model, but if it does,\n # then maybe sdk or source code changed.\n should_not_happen = (\"Found more than one model \"\n \"for the latest with {{tag_name: {tag_name},\"\n \"tag_value: {tag_value}. \"\n \"Models found: {model_list}}}\")\\\n .format(tag_name=tag_name, tag_value=tag_value,\n model_list=model_list)\n no_model_found = (\"No Model found with {{tag_name: {tag_name} ,\"\n \"tag_value: {tag_value}.}}\")\\\n .format(tag_name=tag_name, tag_value=tag_value)\n\n if len(model_list) > 1:\n raise ValueError(should_not_happen)\n if len(model_list) == 1:\n return model_list[0]\n else:\n print(no_model_found)\n return None\n except Exception:\n raise",
"def get_tag_detail(self, tag_name):\n return Tag.objects(name=tag_name).first()",
"def get_tag(self, tag):\n if tag not in self.tags: return\n return self.tags[tag]",
"def get_model_from_name(self, search):\n models = [model for model, name in self.get_models()\n if name.endswith('.'+name) or name == search]\n if not models:\n raise CommandError(\"Unknown model: %s\" % search)\n if len(models)>1:\n raise CommandError(\"Ambiguous model name: %s\" % search)\n return models[0]",
"def get_model(model_name):\n if model_name in ['video', 'image', 'file']:\n return apps.get_model(app_label='courses', model_name=model_name)\n\n return apps.get_model(app_label='courses', model_name='text')",
"def _get_model(self, model_name):\n\n return self.registry.get(model_name)",
"def get_model(self):\n model = self.kwargs.get('model', None)\n if model:\n return model\n\n app_label = self.kwargs.get(self.app_label_url_kwarg, None)\n model_name = self.kwargs.get(self.model_name_url_kwarg, None)\n\n if not app_label or not model_label:\n raise AttributeError('Generic vote view must be called with '\n 'app_label and model_label ')\n\n model = get_model(app_label, model_name)\n if not model:\n raise AttributeError('Model %s.%s does not exist' % (\n app_label, model_name))\n\n return model",
"def model_matching_tag(tag_text, model_class, current_user, matching_property=None):\n filter_against = matching_property or model_class.tag_property_name\n models = model_class.objects.filter(**{filter_against: tag_text})\n\n if len(models) is 1:\n return models[0]\n\n elif len(models) is 0:\n model = model_class()\n setattr(model, filter_against, tag_text)\n model.save(current_user)\n return model\n\n else:\n return None",
"def get_model(model_pk):\n return Model.objects.get(pk=model_pk)",
"def tag(self, tag):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.tag\", \r\n self._object._eco_id, tag)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)",
"def by_tag(tag):\n return _ohlcfield_lookup[tag.lower()]",
"def get_latest_model(\n model_name: str,\n tag_name: str = None,\n tag_value: str = None,\n aml_workspace: Workspace = None\n) -> AMLModel:\n try:\n # Validate params. cannot be None.\n if model_name is None:\n raise ValueError(\"model_name[:str] is required\")\n\n if aml_workspace is None:\n print(\"No workspace defined - using current experiment workspace.\")\n aml_workspace = get_current_workspace()\n\n model_list = None\n tag_ext = \"\"\n\n # Get lastest model\n # True: by name and tags\n if tag_name is not None and tag_value is not None:\n model_list = AMLModel.list(\n aml_workspace, name=model_name,\n tags=[[tag_name, tag_value]], latest=True\n )\n tag_ext = f\"tag_name: {tag_name}, tag_value: {tag_value}.\"\n # False: Only by name\n else:\n model_list = AMLModel.list(\n aml_workspace, name=model_name, latest=True)\n\n # latest should only return 1 model, but if it does,\n # then maybe sdk or source code changed.\n\n # define the error messages\n too_many_model_message = (\"Found more than one latest model. \"\n f\"Models found: {model_list}. \"\n f\"{tag_ext}\")\n\n no_model_found_message = (f\"No Model found with name: {model_name}. \"\n f\"{tag_ext}\")\n\n if len(model_list) > 1:\n raise ValueError(too_many_model_message)\n if len(model_list) == 1:\n return model_list[0]\n else:\n print(no_model_found_message)\n return None\n except Exception:\n raise",
"def get_model(data, labels, params):\n\t\tif params['model_type'] == 'single': \n\t\t\treturn SingleModel(data, labels, params) \n\t\telse:\n\t\t\treturn EnsembleModel(data, labels, params)",
"def from_tag(cls, tag: str) -> \"Release\":\n resp = requests.get(f\"{cls.class_url}/{tag}\")\n if resp.status_code != 200:\n error(f\"Failed to fetch release {tag}: {resp.status_code} - {resp.json()['message']}\")\n breakpoint()\n exit(1)\n return cls.from_api(resp.json())",
"def get_model(self, model_id) -> Model:\n return self._get_single(Entity.Model, model_id)",
"def get_model(self, index):\n\n return self._models[index]",
"def lookup_variant(self, tag):\n for variant in self.variants():\n if self.serialize(variant) == tag:\n return variant",
"def get_Model(self):\n return self.GetStringDescriptor(StringDescriptor.Model)",
"def lookup_model(cls, model_name):\n return cls.driver.Model._decl_class_registry[model_name.capitalize()]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run the MNIST experiment.
|
def main():
log_file = os.path.join(ARGS.result_dir, ARGS.experiment_name, "log.txt")
print("Result dir: %s", ARGS.result_dir)
print("Log file: %s", log_file)
# Setup logging in base_dir/log.txt
setup_logging(level=ARGS.log_level, filename=log_file)
logger.info(" -- MNIST Multilabel -- Started ")
tstart = time.time()
try:
if not ARGS.cuda:
# Set number of CPU threads
torch.set_num_threads(ARGS.njobs)
# Create and run experiment
run_multilabel_mnist(ARGS)
except Exception as e:
logger.exception("Experiment crashed.")
logger.exception("Exception: %s", str(e))
# Measure time
tstr = time_delta_now(tstart)
logger.info(" -- MNIST -- Finished, took %s", tstr)
|
[
"def mnist():\n output_header(\"Running the MNIST Classifier Pipeline.\")\n pipeline = MnistPipeline()\n pipeline.execute_pipeline()",
"def test_keras_mnist():\n data = fetch('mnist')\n check(data, (60000, 28*28), (10000, 28*28))",
"def get_mnist():\n mndata = MNIST('./data/')\n train_x, train_y = mndata.load_training()\n test_x, test_y = mndata.load_testing()\n print(\"Loaded MNIST\")\n return train_x, train_y, test_x, test_y",
"def mnist_augment():\n output_header(\"Running the MNIST Classifier Pipeline with Augmented Data.\")\n pipeline = MnistShiftPipeline()\n pipeline.execute_pipeline()",
"def initializeMNISTData():\n # Initialize the MNIST dataset\n print()\n print(\"Initializing MNIST dataset...\")\n mnistData = MNISTData()\n return mnistData",
"def load_mnist():\r\n\r\n print('Loading train data...')\r\n train_data = torch.utils.data.DataLoader(\r\n torchvision.datasets.MNIST('mnist/', \r\n train=True, \r\n download=True,\r\n transform=torchvision.transforms.Compose([\r\n torchvision.transforms.ToTensor()\r\n ])),\r\n shuffle=True,)\r\n\r\n train_input = []\r\n train_label = []\r\n \r\n cnt = 0\r\n for batch, label in tqdm(train_data):\r\n train_input.append(batch.squeeze().numpy().reshape(784,))\r\n train_label.append(label.numpy())\r\n cnt += 1\r\n if cnt == 1300: break\r\n\r\n print('Loading test data...')\r\n test_data = torch.utils.data.DataLoader(\r\n torchvision.datasets.MNIST('mnist/', \r\n train=False, \r\n download=True,\r\n transform=torchvision.transforms.Compose([\r\n torchvision.transforms.ToTensor()\r\n ])),\r\n shuffle=True,)\r\n\r\n test_input = []\r\n test_label = []\r\n \r\n for batch, label in tqdm(test_data):\r\n test_input.append(batch.squeeze().numpy().reshape(784,))\r\n test_label.append(label.numpy())\r\n\r\n return np.array(train_input), np.array(train_label), np.array(test_input), np.array(test_label)",
"def import_mnist(preprocess=True):\n print(\"Downloading MNIST data...\", end='')\n from keras.datasets import mnist\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\n X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)\n if(preprocess):\n X_train = pre_process(X_train)\n X_test = pre_process(X_test)\n print(\"done.\")\n return X_train, y_train, X_test, y_test, X_train.shape[0]",
"def train_mnist(data_dir, num_epochs, use_fake_data=False):\n # Load a dataset.\n tf.logging.info(\"Loading MNIST into memory.\")\n examples, labels = mnist.load_mnist(\n data_dir,\n num_epochs=num_epochs,\n batch_size=64,\n flatten_images=True,\n use_fake_data=use_fake_data)\n\n # Build an MLP. The model's layers will be added to the LayerCollection.\n tf.logging.info(\"Building model.\")\n layer_collection = lc.LayerCollection()\n loss, accuracy = build_model(examples, labels, 10, layer_collection)\n\n # Fit model.\n minimize(loss, accuracy, layer_collection, 1)",
"def preprocess_mnist(test, img_rows=28, img_cols=28, n_samples=None):\n print(\"\\nLoading mnist.\")\n\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, 10)\n y_test = keras.utils.to_categorical(y_test, 10)\n\n if n_samples:\n x_train = x_train[:n_samples]\n y_train = y_train[:n_samples]\n x_test = x_test[:n_samples]\n y_test = y_test[:n_samples]\n else:\n if test:\n x_train = x_train[:TEST_SIZE]\n y_train = y_train[:TEST_SIZE]\n x_test = x_test[:TEST_SIZE]\n y_test = y_test[:TEST_SIZE]\n\n num_classes = 10\n data_format = 'channels_last'\n\n # # swap channels\n # x_train = np.zeros((x_train.shape[0], img_rows, img_cols, 1))\n # x_train = np.rollaxis(x_train, 3, 1)\n # x_test = np.zeros((x_test.shape[0], img_rows, img_cols, 1))\n # x_test = np.rollaxis(x_test, 3, 1)\n # data_format = \"channels_first\"\n # input_shape = (1, img_rows, img_cols)\n\n print('x_train shape:', x_train.shape, '\\nx_test shape:', x_test.shape)\n return x_train, y_train, x_test, y_test, input_shape, num_classes, data_format",
"def fashion_mnist_client():\n test_df = pd.read_csv('fashion-mnist_test.csv')\n train_df = pd.read_csv('fashion-mnist_train.csv')\n y_train = one_hot_encode(np.array(train_df['label'])).astype(np.float64)\n del train_df['label']\n x_train = np.array(train_df, dtype=np.float64)\n y_test = one_hot_encode(np.array(test_df['label'])).astype(np.float64)\n del test_df['label']\n x_test = np.array(test_df, dtype=np.float64)\n\n # Normalize the data\n x_train /= 255.0\n x_test /= 255.0\n\n x_layer = layers.Input(784, False)\n b1_layer = layers.Input(28, True)\n b1_layer.randomize()\n W1_layer = layers.Input([28, 784], True)\n W1_layer.randomize()\n linear1_layer = layers.Linear(x_layer, W1_layer, b1_layer)\n relu_layer = layers.ReLU(linear1_layer)\n b2_layer = layers.Input(10, True)\n b2_layer.randomize()\n W2_layer = layers.Input([10, 28], True)\n W2_layer.randomize()\n linear2_layer = layers.Linear(relu_layer, W2_layer, b2_layer)\n y_layer = layers.Input(10, False)\n softmaxce_layer = layers.SoftmaxCrossEntropy(linear2_layer, y_layer)\n\n net = network.Network()\n net.add(x_layer)\n net.add(W1_layer)\n net.add(b1_layer)\n net.add(linear1_layer)\n net.add(relu_layer)\n net.add(W2_layer)\n net.add(b2_layer)\n net.add(linear2_layer)\n net.add(softmaxce_layer)\n\n start = time.perf_counter()\n\n for epoch in range(10):\n count_training = 0\n total_loss_training = 0\n for i in range(60000): # 1 epoch of training\n y_layer.set(torch.from_numpy(y_train[i]))\n if net.forward(x_train[i]) == np.argmax(y_train[i]):\n count_training = count_training + 1\n total_loss_training = total_loss_training + net.get_output().item()\n net.backward()\n net.step(.01)\n\n count_testing = 0\n total_loss_testing = 0\n for i in range(10000): # 1 epoch of testing\n y_layer.set(torch.from_numpy(y_test[i]))\n if net.forward(x_test[i]) == np.argmax(y_test[i]):\n count_testing = count_testing + 1\n total_loss_testing = total_loss_testing + net.get_output().item()\n print('Epoch ' + str(epoch+1) + ', Training acc: ' + str(count_training/60000 * 100) + '%, Training loss: ' + str(total_loss_training/60000)\n + ', Testing acc: ' + str(count_testing/10000 * 100) + '%, Testing loss: ' + str(total_loss_testing/10000))\n end = time.perf_counter()\n print('total training time: ' + str(end - start) + 'sec')",
"def download_mnist():\n logging.info(\"Generate mnist_x.npy and mnist_y.npy\")\n (x, y), _ = tf.keras.datasets.mnist.load_data()\n np.save('mnist_x', x)\n np.save('mnist_y', y)",
"def train_network_on_mnist(\n layers, params, subset_size=None, output_file=None, noise=None):\n # Load MNIST data\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets(\"mnist-data\", one_hot=True)\n X_train, y_train = mnist.train.images, mnist.train.labels\n X_valid, y_valid = mnist.validation.images, mnist.validation.labels\n if subset_size is not None:\n X_train, y_train = X_train[-subset_size:], y_train[-subset_size:]\n X_valid, y_valid = X_valid[-subset_size:], y_valid[-subset_size:]\n X_train = np.reshape(X_train, (-1, 28, 28, 1))\n X_valid = np.reshape(X_valid, (-1, 28, 28, 1))\n features_shape = (28, 28, 1)\n # Create a neural network, train it and measure time\n neural_network = NeuralNetwork(features_shape, layers, params)\n t0 = time.time()\n neural_network.train({ \"train\": X_train, \"valid\": X_valid })\n t1 = time.time()\n print(\"Duration: %.1fs\" % (t1 - t0))\n # Visualize results on a few random samples of the validation set\n if output_file is not None:\n try:\n import matplotlib.pyplot as pyplot\n import matplotlib.image as imglib\n except ImportError:\n print(\"Module 'matplotlib' not found. Skipping visualization.\")\n return\n num_samples = 40\n input_indices = np.random.choice(X_valid.shape[0], num_samples)\n inputs = X_valid[input_indices]\n # Add gaussian noise with given standard deviation\n if noise is not None:\n inputs = np.maximum(0, np.minimum(1, inputs +\n np.random.normal(scale=noise, size=inputs.shape)))\n outputs = neural_network.calculate_output(inputs)\n num_image_cols = 5\n num_image_rows = 8\n col_spacing = 40\n row_spacing = 10\n image = np.zeros((num_image_rows * (28 + row_spacing) - row_spacing,\n num_image_cols * (2*28 + col_spacing) - col_spacing))\n for i in range(num_samples):\n col = i % num_image_cols\n row = i % num_image_rows\n x = col * (2 * 28 + col_spacing)\n y = row * (28 + row_spacing)\n image[y:y+28,x:x+28] = inputs[i].reshape((28, 28))\n image[y:y+28,x+28:x+2*28] = outputs[i].reshape((28, 28))\n imglib.imsave(output_file, image,\n format=os.path.splitext(output_file)[1][1:], cmap=\"Greys\")",
"def mnist_model(inputs, mode):\r\n # Input Layer\r\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\r\n # MNIST images are 28x28 pixels, and have one color channel\r\n inputs = tf.reshape(inputs, [-1, 28, 28, 1])\r\n data_format = 'channels_last'\r\n\r\n if tf.test.is_built_with_cuda():\r\n # When running on GPU, transpose the data from channels_last (NHWC) to\r\n # channels_first (NCHW) to improve performance.\r\n # See https://www.tensorflow.org/performance/performance_guide#data_formats\r\n data_format = 'channels_first'\r\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\r\n\r\n # Convolutional Layer #1\r\n # Computes 32 features using a 5x5 filter with ReLU activation.\r\n # Padding is added to preserve width and height.\r\n # Input Tensor Shape: [batch_size, 28, 28, 1]\r\n # Output Tensor Shape: [batch_size, 28, 28, 32]\r\n conv1 = tf.layers.conv2d(\r\n inputs=inputs,\r\n filters=32,\r\n kernel_size=[5, 5],\r\n padding='same',\r\n activation=tf.nn.relu,\r\n data_format=data_format)\r\n\r\n # Pooling Layer #1\r\n # First max pooling layer with a 2x2 filter and stride of 2\r\n # Input Tensor Shape: [batch_size, 28, 28, 32]\r\n # Output Tensor Shape: [batch_size, 14, 14, 32]\r\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2,\r\n data_format=data_format)\r\n\r\n # Convolutional Layer #2\r\n # Computes 64 features using a 5x5 filter.\r\n # Padding is added to preserve width and height.\r\n # Input Tensor Shape: [batch_size, 14, 14, 32]\r\n # Output Tensor Shape: [batch_size, 14, 14, 64]\r\n conv2 = tf.layers.conv2d(\r\n inputs=pool1,\r\n filters=64,\r\n kernel_size=[5, 5],\r\n padding='same',\r\n activation=tf.nn.relu,\r\n data_format=data_format)\r\n\r\n # Pooling Layer #2\r\n # Second max pooling layer with a 2x2 filter and stride of 2\r\n # Input Tensor Shape: [batch_size, 14, 14, 64]\r\n # Output Tensor Shape: [batch_size, 7, 7, 64]\r\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2,\r\n data_format=data_format)\r\n\r\n ########################################\r\n #Matthew Bitter Edit - May 7th, 2018\r\n ########################################\r\n #Adding an additional convolution to model to improve flexbility in learning\r\n conv3 = tf.layers.conv2d(\r\n inputs=pool2,\r\n filters=64,\r\n kernel_size=[5, 5],\r\n padding='same',\r\n activation=tf.nn.relu,\r\n data_format=data_format)\r\n\r\n\r\n # Flatten tensor into a batch of vectors\r\n # Input Tensor Shape: [batch_size, 7, 7, 64]\r\n # Output Tensor Shape: [batch_size, 7 * 7 * 64]\r\n pool2_flat = tf.reshape(conv3, [-1, 7 * 7 * 64])\r\n\r\n # Dense Layer\r\n # Densely connected layer with 1024 neurons\r\n # Input Tensor Shape: [batch_size, 7 * 7 * 64]\r\n # Output Tensor Shape: [batch_size, 1024]\r\n dense = tf.layers.dense(inputs=pool2_flat, units=1024,\r\n activation=tf.nn.relu)\r\n\r\n # Add dropout operation; 0.6 probability that element will be kept\r\n dropout = tf.layers.dropout(\r\n inputs=dense, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN))\r\n\r\n # Logits layer\r\n # Input Tensor Shape: [batch_size, 1024]\r\n # Output Tensor Shape: [batch_size, 10]\r\n logits = tf.layers.dense(inputs=dropout, units=10)\r\n return logits",
"def run_iris_test_case(num_iterations = 300, learning_rate = 0.01):\n layer_dims = [5,3,1]\n X_train, X_test, y_train, y_test = get_train_test_iris()\n test_pred, _ = train_network(X_train, y_train, X_test, y_test, layer_dims, learning_rate, num_iterations)\n return test_pred",
"def load_mnist_images():\n (mnist_x, mnist_y), (mnist_x_test, mnist_y_test) = mnist.load_data()\n\n mnist_x, mnist_x_test = np.reshape(mnist_x, (-1, 28, 28, 1)), np.reshape(mnist_x_test, (-1, 28, 28, 1))\n\n # Scale everything to 0-1\n mnist_x, mnist_x_test, = normalize_0_1([mnist_x, mnist_x_test])\n\n return (mnist_x, transform_to_one_hot(mnist_y, depth=10)), (mnist_x_test, transform_to_one_hot(mnist_y_test, depth=10))",
"def get_and_process_MNIST_data(self):\n\n #mndata = MNIST() \n #self.train_images, self.train_labels = mndata.load_training() \n self.train_images, self.train_labels = np.reshape(mndata.train_images(),(60000,784)), mndata.train_labels()\n self.train_images, self.train_labels = self.train_images[:500], self.train_labels[:500] \n print(np.shape(self.train_images)) \n print(np.shape(self.train_labels)) \n ## labeling the pixels back \n self.train_images, self.train_labels = np.array([[1 if p > 0.5 else -1 for p in i] for i in self.train_images]), np.array(self.train_labels)\n \n ### i need to change the below code so it iterate through the matrix properly \n #self.train_images, self.train_labels = np.array([[1 if p > 0.5 else -1 for p in i] for i in self.train_images), np.array(self.train_labels)\n side_length = int(np.sqrt(self.train_images.shape[1]))\n self.orig_train_images = copy.deepcopy(self.train_images.reshape((self.train_images.shape[0], side_length, side_length)))\n self.noisy_train_images = np.zeros((self.train_images.shape[0], side_length, side_length))\n for im in np.arange(self.train_images.shape[0]):\n random_inds = random.sample(range(1, self.train_images.shape[1]), int(0.02 * self.train_images.shape[1]))\n self.train_images[im, random_inds] = np.where(self.train_images[im, random_inds] == -1, 1, -1)\n self.noisy_train_images[im, :, :] = self.train_images[im, :].reshape(side_length, side_length)\n self.side_length = side_length",
"def main(_):\n # Fix directories\n if tf.gfile.Exists(FLAGS.log_dir):\n tf.gfile.DeleteRecursively(FLAGS.log_dir)\n tf.gfile.MakeDirs(FLAGS.log_dir)\n\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n n_samples = mnist.train.num_examples\n\n # Start training\n print \"Starting Session\"\n with tf.Session() as sess:\n # Instantiate Network\n vae = VAE()\n\n # Create a saver\n saver = tf.train.Saver(tf.all_variables())\n\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n\n # Run through the epochs\n for epoch in range(FLAGS.epochs):\n avg_cost = 0.\n total_batch = n_samples / FLAGS.batch_size\n\n # Loop over batches\n for i in range(total_batch):\n batch_x, _ = mnist.train.next_batch(FLAGS.batch_size)\n cost, _ = sess.run([vae.loss_val, vae.train_op], feed_dict={vae.X: batch_x})\n avg_cost += cost / (n_samples * FLAGS.batch_size)\n\n # Display step\n if epoch % FLAGS.display_step == 0:\n print \"Epoch:\", epoch, \" \" * 4, \"Average Cost:\", avg_cost\n checkpoint_path = os.path.join(FLAGS.log_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, epoch)\n\n # Generate Reconstructed Pictures\n if FLAGS.z_dim > 2:\n x_sample = mnist.test.next_batch(FLAGS.batch_size)[0]\n x_reconstruct = vae.reconstruct(sess, x_sample)\n\n plt.figure(figsize=(8, 12))\n for i in range(5):\n plt.subplot(5, 2, 2 * i + 1)\n plt.imshow(x_sample[i+10].reshape(28, 28), vmin=0, vmax=1)\n plt.title(\"Test input\")\n plt.colorbar()\n plt.subplot(5, 2, 2 * i + 2)\n plt.imshow(x_reconstruct[i+10].reshape(28, 28), vmin=0, vmax=1)\n plt.title(\"Reconstruction\")\n plt.colorbar()\n plt.tight_layout()\n plt.show()\n else:\n nx = ny = 20\n x_values = np.linspace(-3, 3, nx)\n y_values = np.linspace(-3, 3, ny)\n canvas = np.empty((28 * ny, 28 * nx))\n for i, yi in enumerate(x_values):\n for j, xi in enumerate(y_values):\n z_mu = np.tile(np.array([[xi, yi]]), (FLAGS.batch_size, 1))\n x_mean = vae.generate(sess, z_mu)\n canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0].reshape(28, 28)\n\n plt.figure(figsize=(8, 10))\n Xi, Yi = np.meshgrid(x_values, y_values)\n plt.imshow(canvas, origin=\"upper\")\n plt.tight_layout()\n plt.show()",
"def main(args):\n mnist = load_mnist(val_seed=123)\n\n if not args.no_training:\n train_network(mnist, 1, args.outdir)\n train_network(mnist, 2, args.outdir)\n\n model1 = load_network(1, args.outdir)\n model2 = load_network(2, args.outdir)\n encoder1, decoder1 = split_network(model1)\n encoder2, decoder2 = split_network(model2)\n\n create_montage(mnist, model1, model2, args.outdir)\n create_scatter(mnist, encoder1, decoder1, args.outdir)\n do_experiment_on_model1_rules(decoder1, args.outdir)\n do_experiment_on_model2_rules(mnist, encoder2, decoder2, args.outdir)",
"def display_mnist_image(image: np.ndarray):\n plt.imshow(image.reshape(28, 28))\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests whether ``put_value_into`` is working as intended.
|
def test__put_value_into():
for input_value, defaults, expected_output in (
(None, False, {'value': None}),
(None, True, {'value': None}),
('a', False, {'value': 'a'}),
(12.6, False, {'value': 12.6}),
(6, False, {'value': 6}),
):
data = put_value_into(input_value, {}, defaults)
vampytest.assert_eq(data, expected_output)
|
[
"def test__put_type_into(input_value, defaults):\n return put_type_into(input_value, {}, defaults)",
"def test_put_value():\n runner = CliRunner()\n testPut = runner.invoke(commands, ['put', 'key1', 'value1'])\n assert testPut.exit_code == 0\n assert testPut.output == '{\"key1\": \"value1\"}\\n\\n'",
"def test_core_save_stored_value_v1(self):\n pass",
"def test_map_value():\n\tvalues = []\n\n\tfor key_list, value_list in zip(MAP_KEYS, MAP_VALUES):\n\t\tdata = {}\n\n\t\tfor key, value in zip(key_list, value_list):\n\t\t\tdata[key] = value\n\n\t\tvalues.append(data)\n\n\tbackup_and_restore(\n\t\tlambda context: put_values(lib.SET, \"key\", values),\n\t\tNone,\n\t\tlambda context: check_values(lib.SET, \"key\", values)\n\t)",
"def test_setitem(self):\n\n st_struct = struct.WritableObjectProxy()\n st_struct['hi'] = True\n\n assert 'hi' in st_struct\n assert st_struct['hi'] is True",
"def assume_working_value_setting(self, setting, value, type_, source=\"direct\",\n desired_value=None):\n setting.set_value(value, source)\n self.assertEqual(setting.value, desired_value if desired_value is not None else value)\n self.assert_(isinstance(setting.value, type_))",
"def test_list_value():\n\tbackup_and_restore(\n\t\tlambda context: put_values(lib.SET, \"key\", LIST_VALUES),\n\t\tNone,\n\t\tlambda context: check_values(lib.SET, \"key\", LIST_VALUES)\n\t)",
"def test_set_saves_value(self):\n RedisGateway.set_value_for_key('fake_key', 'fake_value')\n\n self.assertEquals(\n RedisGateway.fetch_value_for_key('fake_key'),\n 'fake_value',\n )",
"def test_core_get_stored_value_v1(self):\n pass",
"def test_put(self):\n s = IntegralStuff(8)\n o = IntegralStuff(4)\n res = s.put(o)\n self.assertEqual(s.units, 12)\n self.assertEqual(o.units, 0)\n self.assertIs(res, s)\n\n s = IntegralStuff(8)\n o = IntegralStuff(4)\n s += o\n self.assertEqual(s.units, 12)\n self.assertEqual(o.units, 0)",
"def test_put(self):\n self._test(\n \"put foo in bar\",\n [\"You put the foo in the bar.\"],\n [\"Test Player puts a foo in a bar.\"])\n self.assertIdentical(self.player.location, self.location)\n self.assertIdentical(self.object.location, self.container)\n self.assertIdentical(self.container.location, self.location)\n self.assertEquals(list(self.containerContainer.getContents()), [self.object])",
"def test_put_in():\n print('Testing put_in()')\n\n alist = [0,1,2,4]\n result = funcs.put_in(alist,3)\n introcs.assert_equals(None,result)\n introcs.assert_equals([0,1,2,3,4],alist)\n\n result = funcs.put_in(alist,-1)\n introcs.assert_equals(None,result)\n introcs.assert_equals([-1,0,1,2,3,4],alist)\n\n result = funcs.put_in(alist,2)\n introcs.assert_equals(None,result)\n introcs.assert_equals([-1,0,1,2,2,3,4],alist)\n\n result = funcs.put_in(alist,0)\n introcs.assert_equals(None,result)\n introcs.assert_equals([-1,0,0,1,2,2,3,4],alist)\n\n alist = []\n result = funcs.put_in(alist,0)\n introcs.assert_equals(None,result)\n introcs.assert_equals([0],alist)\n\n result = funcs.put_in(alist,1)\n introcs.assert_equals(None,result)\n introcs.assert_equals([0,1],alist)\n\n alist = ['a','aa','ab','b','ce']\n result = funcs.put_in(alist,'aab')\n introcs.assert_equals(None,result)\n introcs.assert_equals(['a','aa','aab','ab','b','ce'],alist)",
"def test__put_position_into(position, defaults):\n return put_position_into(position, {}, defaults)",
"def step_add_return_value(context: dict) -> None:\n context.expected_output = context.fake.pydict()\n context.mocked_function.return_value = context.expected_output",
"def test_config_put(self):\n pass",
"def can_set_value(self):\n raise NotImplementedError",
"def put_if_empty(self, key, value):\n if self.has_data_for_key(key):\n return False\n self.put_data(key, value)\n return True",
"def test__put_target_id_into():\n for input_, defaults, expected_output in (\n (0, False, {}),\n (0, True, {'target_id': None}),\n (1, False, {'target_id': '1'}),\n ):\n data = put_target_id_into(input_, {}, defaults)\n vampytest.assert_eq(data, expected_output)",
"def test__put_permissions_into():\n for input_value, defaults, expected_output in (\n (None, True, {'permissions': []}),\n ((TeamMemberPermission.admin, ), False, {'permissions': [TeamMemberPermission.admin.value]}),\n ):\n data = put_permissions_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize the user list.
|
def __init__(self):
self._user_list = []
self._current_user = None
|
[
"def __init__(\n self, xdata: Optional[XData] = None, name=\"DefaultDict\", appid=\"EZDXF\"\n ):\n self._xlist = XDataUserList(xdata, name, appid)\n self._user_dict: dict[str, Any] = self._parse_xlist()",
"def initialize_users_table(self):\n self.execute_queries(queryutils.sql.INIT_USERS[self.dbtype])",
"def load_users(self):\n\n self.users.load()",
"def initialize_user():\n flask.g.user = readit.User(flask.session.get('session_key', None))\n flask.g.user.user_id = flask.session.get('user_id', None)",
"def init_users():\n admin = AuthUser(username='admin')\n # Setting and encrypting the hardcoded password.\n admin.set_and_encrypt_password('password', salt='123')\n # Persisting users for this request.\n g.users = {'admin': admin}",
"def setMainRoomUserListStore(self):\n self._mainRoomUserListStore.clear()\n for u in self._userStore:\n if u.userChatRoom == ROOM_IDS.MAIN_ROOM:\n self._mainRoomUserListStore.append([\"A\", u.userName])\n elif u.userChatRoom != ROOM_IDS.OUT_OF_THE_SYSTEM_ROOM:\n self._mainRoomUserListStore.append([\"M\", u.userName])",
"def __read_users(self):\n\n path = os.path.join(self.cwd,'data/users')\n available_users = os.listdir(path)\n if len(available_users)>0:\n for user_id in available_users:\n if user_id == 'README.md':\n continue\n #assuming the user data was stored in JSON format\n with open(os.path.join(path,user_id),'r') as file:\n user_data = json.load(file)\n user = UserProfile(user_data['id'],user_data['name'], user_data['email'], \n user_data['password'], user_data['timeline'])\n user.init_friends(user_data['friends'])\n user.init_my_groups(user_data['my_groups'])\n user.init_joined_groups(user_data['joined_groups'])\n user.init_my_pages(user_data['my_pages'])\n user.init_followed_pages(user_data['followed_pages'])\n user.init_events(user_data['my_events'])\n self.users[user_id.split('.')[0]]=user",
"def populateList(self):\n self.send(\"USR ,\")",
"def add_authorized_users(\n self, users_list: Sequence[int], no_auth_state: Optional[State] = None\n ):\n self.no_auth_state = no_auth_state\n self.users_list = users_list",
"def __init__(self, user_name, user_email, user_password):\n self.user_name = user_name\n self.user_email = user_email\n self.user_password = user_password\n self.bucket_lists = {}",
"def initalize_user_tables(db):\n \n from shotglass2.users.models import init_db as users_init_db \n users_init_db(db)",
"def test_list_all_users(self):\n pass",
"def __init__(self, user):\n super(UserItemData, self).__init__()\n self._user = user",
"def initialization(self):\n self.user_menu = component.Component(security.get_user())\n if security.get_user():\n if self.default_board_id:\n self.select_board(self.default_board_id)\n else:\n self.select_last_board()\n return self",
"def __init__(self, usernames):\n super(Oplist, self).__init__(usernames)\n for entry in self.acl:\n entry['level'] = MINECRAFT_OP_CODE",
"async def user_list():\n print(\"!!!!usrlist!!!! Scanning Servers and nicknames as requested\")\n await bot.say(\"A list of the users in the servers has been logged into the bot console.\")\n print(\"\\nLog datetime: \" + current_datetime)\n print(\"----------------------\")\n for server in bot.servers:\n for member in server.members:\n print(\n \"server: {0} | user: {1.name} | user_id: {1.id} | role: {1.top_role} | role_id: {1.top_role.id}\".format(\n server, member,\n member))",
"def __init__(self, params):\n super(Users, self).__init__()\n self.params = params",
"def initialize_user_profile(sender, request, user, **kwargs):\n authz_token = get_authz_token(request)\n user_profile_client_pool.initializeUserProfile(authz_token)\n log.debug(\"initialized user profile for {}\".format(user.username))",
"def initialize_options(self):\n # Each user option must be listed here with their default value."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the user that is currently logged in.
|
def current_user(self, user):
self._current_user = user
|
[
"def set_user(self, user):\n self.user = user",
"def set_auth_user(self, user):\n patcher = mock.patch('sndlatr.auth.get_current_user')\n self.get_user_mock = patcher.start()\n self.get_user_mock.return_value = user\n self.addCleanup(patcher.stop)",
"def set_new_user(self):\n self.current_user = random.choice(self.hosts)",
"def set_user_id(self,user_id):\n self.user_id = user_id",
"def set_admin_user(self, user, password):\n\n self.user = user\n self.password = password",
"def set_user_name(self, user_name): \n self.user_name = user_name",
"def _set_anonymous_user(self):\n _request_ctx_stack.top.keystone_user = self.Anonymous()",
"def set_for_user(self, user_id: Union[UUID, str], token: str) -> NoReturn:\n raise NotImplementedError()",
"def login(self, user):\n self.set_secure_cookie(\"user_id\", str(user.key().id()))",
"def user_default(self, user_default: ConfigNodePropertyString):\n\n self._user_default = user_default",
"def login(self, userid):\n self.session['auth.userid'] = userid",
"def user_login(user):\n session['user'] = user.username",
"def useridentifier(self, useridentifier):\n self._useridentifier = useridentifier",
"def initialize_user():\n flask.g.user = readit.User(flask.session.get('session_key', None))\n flask.g.user.user_id = flask.session.get('user_id', None)",
"def user_id(self, user_id: ConfigNodePropertyString):\n\n self._user_id = user_id",
"def running_user(self, running_user):\n\n self._running_user = running_user",
"def set_user_obj(influencer):\n if not influencer.blog_url:\n return False\n blog_url = influencer.blog_url.strip('http://').strip('www.').rstrip('/')\n prof = UserProfile.objects.filter(blog_page__icontains=blog_url)\n if prof.exists():\n print \"Influencer %s has a user %s blog %s\" % (influencer.blog_url, prof, prof[0].blog_page)\n influencer.shelf_user = prof[0].user\n influencer.save()\n return True\n return False",
"def setUserName(*args, **kwargs):\n \n pass",
"def set_user(self, user: str = 'sdss', password: str = None) -> None:\n\n default_user = self._custom_config.get('default_username', None)\n default_pass = self._custom_config.get('default_userpass', None)\n user = default_user or user\n password = default_pass or password\n log.debug(f'Setting user {user}. '\n '{\"No password specified.\" if not password else \"Password specified.\"}')\n self.user = User(user)\n if not self.user.validated and user and password:\n self.user.validate_user(password)\n\n if not self.user.validated:\n raise BrainError(f'Could not validate default user {user}!')\n else:\n log.debug(f'Validated user {user}')\n\n if not self.user.validated:\n log.warning(f'User {user} is not validated. Check your netrc credentials '\n 'or validate your user with config.set_user(username, password)')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve the FAM user list.
|
def user_list(self):
return self._user_list
|
[
"def getUserList(self):\n\n url = self.url.replace('/ClientSettings.do', '/FTPSettings.do', 1)\n self._openPath(url)\n data = self._parseForJsVarPart('tableData0')\n # data['rows'] - list of users\n # There is other information in tableData0, but all we want is the client rows for now\n for user in iter(data['rows']):\n # user[0] is a list containing ['UserID', '', 'disabled']\n # user[1] is \"username\"\n # user[2] is \"Full Name\" (Primary User)\n # user[3] is \"(javaScript stuff to show/change password\"\n # user[4] is \"accessLevel\" (/)\n userID = user[0][0]\n userName = user[1]\n name = user[2]\n accessLevel = user[4]\n self.users[userName] = (userID, name, accessLevel)\n return self.users.keys()",
"def get_users(self):\n res = self.getuserslist()\n # convert to user object\n return [WithingsUser.create(u) for u in res['users']]",
"def list(self, subcmd):\n\n for user in self.db.get_users():\n print(user.name)",
"def list(cls, include_deleted=False):\n data = cls.api._get('users/list',\n include_deleted='1' if include_deleted else '0')\n return [cls._parse(u) for u in data['users']]",
"def get_userlist(self, room):\n users = \"\"\n with Chat.lock:\n for user in room.users:\n users += \" * {}\".format(user)\n if user == self.name:\n users += \" (** this is you)\\n\"\n else:\n users += \"\\n\"\n users += \"end of list.\"\n return users",
"def getAllUsers(self):\n self.cursor.execute(\"select * from LmsUser ORDER BY LmsUserID\")\n res = self.cursor.fetchall()\n return res",
"def get_user_all_action():\n token = request.args.get('token')\n validate_token(token)\n data = User.get_user_all()\n return response_ok_list(data)",
"def listUsers():\n exec_get_all('SELECT username FROM users')",
"def list_users(self, instance, limit=None, marker=None):\r\n return instance.list_users(limit=limit, marker=marker)",
"def get_users_list():\n users_list = []\n path = f\"{os.environ['HOMEDRIVE']}/Users\"\n for in_dir in os.listdir(path=path):\n user_path = os.path.join(path, in_dir)\n if not os.path.isfile(user_path):\n users_list.append(user_path)\n return users_list",
"def getMemberList(self):\n memberList = list(users.find({\"account\": {\"$in\": self.registry.call(\"getMemberList\")}}, users.anonymous_info if self.get('rules').get(\"anonymous\") else users.public_info))\n return memberList",
"def get_users() -> list:\n ans = DatabaseConnector.get_values(\"SELECT * FROM user ORDER BY registry_date DESC \")\n\n return ans",
"def show_all_users():\n current_user_role = get_jwt_identity()['role']\n url = app.config[\"USERS_URL\"]\n user_service = UserProxyAccess(url)\n response = user_service.get_all_users()\n return jsonify(response), 200",
"def get_logged_in_users_list(user):\n t = TwitterUser(user.access_token, user.access_token_secret)\n lists = t.get_user_lists()\n res_lists = filter(lambda x:x if '_sees' in x['name'] else None, lists)\n return res_lists",
"def get_all():\n return list(User.objects.all())",
"def list_users(cluster):\n return AuthenticationTests.get_user_data(cluster).keys()",
"def get_users(self):\r\n sql = \"SELECT * FROM user WHERE auth <> 'root' LIMIT \" + str(self.user_per_page) + \" OFFSET \" + str(self.offset)\r\n self.cur.execute(sql)\r\n data = self.cur.fetchall()\r\n return data",
"def ipmiUserList():\n logging.debugv(\"functions/linux.py->ipmiUserList()\", [])\n\n cmd = locations.IPMITOOL + ' -I open user list | awk \\'{print $1\" \"$2}\\''\n logging.debug(cmd)\n\n users = os.popen(cmd)\n users.readline()\n choices = []\n for line in users.readlines():\n (id, user) = line.split()\n choices += [(id, user)]\n return choices",
"def get_all_users(self):\n return self.byte_decode_search_results(\n self.search(\n self.domain_base, \"(&(objectCategory=person)(objectClass=user))\"\n )\n )",
"def getUserList(self):\n \"\"\"Sets allowed days for the user\n server expects only the days that are allowed, sorted in ascending order\"\"\"\n # result\n result = 0\n message = \"\"\n userList = []\n\n try:\n # init store\n timekprUStore = timekprUserStore()\n # check if we have this user\n userList = timekprUStore.getSavedUserList(self._timekprConfig.getTimekprConfigDir())\n except Exception as unexpectedException:\n # logging\n log.log(cons.TK_LOG_LEVEL_INFO, \"Unexpected ERROR (%s): %s\" % (misc.whoami(), str(unexpectedException)))\n\n # result\n result = -1\n message = msg.getTranslation(\"TK_MSG_CONFIG_LOADER_USERLIST_UNEXPECTED_ERROR\")\n\n # result\n return result, message, userList"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the user list to the value passed in.
|
def user_list(self, user_list):
self._user_list = user_list
|
[
"def set_AssignedToUser(self, value):\n super(ListIncidentsInputSet, self)._set_input('AssignedToUser', value)",
"def modify_users(self, user_list):\n return self.user_manager.modify_objects(user_list)",
"def populateList(self):\n self.send(\"USR ,\")",
"def update_user_list():\n\n users_ = bot.client.api_call('users.list')\n users = json.loads(users_.decode('utf8'))['members']\n\n for user in users:\n id_ = user['id']\n name = user['name']\n\n user_obj = session.query(User).get(id_)\n if user_obj is None:\n user_obj = User(id=id_, name=name)\n session.add(user_obj)\n\n else:\n user_obj.name = name\n\n session.commit()",
"def _add_user_to_list(self, user):\n self._user_list.append(user)",
"def change_users(nodehandle, vesselname, userkeylist):\n assert_str(vesselname)\n assert_list_of_str(userkeylist)\n \n _do_signed_call(nodehandle[0], nodehandle[1], 'ChangeUsers', vesselname, '|'.join(userkeylist))",
"def setInternalList(self, lst):\n\n self.genomeList = lst",
"def set_list(self, quick_list):\n self.quick_list = quick_list",
"def __set_list_value(self, prop, val):\n\t\tif isinstance(val, str):\n\t\t\tif val != \"\":\n\t\t\t\tprop.append(val)\n\t\telif isinstance(val, list):\n\t\t\tif val:\n\t\t\t\tprop.extend([x.strip() for x in val])\n\t\telse:\n\t\t\traise TypeError(\"Expected string, got %r instead\" % type(val))",
"def serviceusers_list(self, serviceusers_list: ConfigNodePropertyArray):\n\n self._serviceusers_list = serviceusers_list",
"def save_User(self):\n User.User_lst.append(self)",
"def __init__(self):\n self._user_list = []\n self._current_user = None",
"def add_user(self, user_id, ranking):\n if type(ranking) is not list:\n ranking = list(ranking)\n self.lists[user_id] = ranking",
"def set_UserID(self, value):\n super(SearchPhotosInputSet, self)._set_input('UserID', value)",
"def SetValue(self, *args):\n return _TColStd.TColStd_ListNodeOfListOfReal_SetValue(self, *args)",
"def set(self, *value):\n del self[0:len(self)]\n for item in value:\n self.append(item)",
"def SetValue(self, *args):\n return _TColStd.TColStd_ListNodeOfListOfInteger_SetValue(self, *args)",
"def SetAclUserIds(self, value):\n valid_input = True\n if not isinstance(value, list):\n valid_input = False\n else:\n for acl_value in value:\n if not (isinstance(acl_value, basestring) and acl_value):\n valid_input = False\n break\n\n if not valid_input:\n raise endpoints.BadRequestException(\n 'ACL user IDs must be non-empty strings.')\n\n self._acl_user_ids = value",
"def setMainRoomUserListStore(self):\n self._mainRoomUserListStore.clear()\n for u in self._userStore:\n if u.userChatRoom == ROOM_IDS.MAIN_ROOM:\n self._mainRoomUserListStore.append([\"A\", u.userName])\n elif u.userChatRoom != ROOM_IDS.OUT_OF_THE_SYSTEM_ROOM:\n self._mainRoomUserListStore.append([\"M\", u.userName])",
"def set_use(self,v):\n _ldns.ldns_key_list_set_use(self,v)\n #parameters: ldns_key_list *,bool,\n #retvals: "
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a user to the list.
|
def _add_user_to_list(self, user):
self._user_list.append(user)
|
[
"def adduser(self, nick):\n # add user\n if not self.users.has_key(nick):\n i = GtkListItem(nick)\n i.show()\n self.list.append_items([i])\n self.users[nick] = i\n if len(self.users) == 1:\n # select the user if it's the first / only in the list\n self.setselection(nick)\n i.connect(\"button-press-event\", self.item_handler, nick)",
"def add_user(self, user: User) -> None:\n\t\tpass",
"def save_user (self):\n User.user_list.append(self)",
"def add_users(self, user_list):\n return self.user_manager.add_objects(user_list)",
"def add_user(self, username, vec):\n self.__add_row_to_data(username, vec)\n self.__save_current_user_data()\n self.build_annoy_index()",
"async def add_user(self, user):\n await self._user_queue.put(user)",
"def add_user(self):\n db.session.flush()\n db.session.add(self)\n db.session.commit()",
"def save_User(self):\n User.User_lst.append(self)",
"def add_user(self, user):\n if not user.id in self.queue:\n self.queue.append(user.id)",
"def add_user(self, user_id, ranking):\n if type(ranking) is not list:\n ranking = list(ranking)\n self.lists[user_id] = ranking",
"def add(cls, user):\n cls.users[user['id']] = CachedUser(user)",
"def add_user(self, username, user_ip):\n #TODO check validation if user already in the chatroom\n self.users[user_ip] = username\n return True",
"def add_user(self, version=None):\n # since id is auto-generated, need to grab the most recent added and simply increment\n # if this is the first user added, start at 1\n if not self.users:\n u_id = 1\n else:\n # otherwise, get the length of the dict (num of keys) & our new user_id is +1\n u_id = len(self.users) + 1\n\n new_user = User(version)\n new_user.id = u_id\n # user_id as key and obj as val in graph's users dict\n self.users[u_id] = new_user\n self.total_users += 1",
"def add_user(self, id, name):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\n\t\tcursor.execute(\"insert into users values (%s, %s, %s, %s);\", (id,name, '{}', '{}'))\n\t\tconnection.commit()\n\t\tconnection.close()",
"def add_user(self, group: str, user: User):\n self.groups[group].users.add(user)",
"def add_user(self, user_uid: Union[str, UUID]):\n self.session.checked_post(self._path() + \"/users/{}\".format(user_uid),\n {'role': MEMBER, 'actions': []})\n return True",
"def add_user(self, request, pk=None):\n body = request.data\n org = Organization.objects.get(pk=pk)\n user = User.objects.get(pk=body['user_id'])\n\n org.add_member(user)\n\n return JsonResponse({'status': 'success'})",
"async def add(self, ctx, user: discord.User):\n\n session = self.bot.Session()\n trainer = session.query(Trainer) \\\n .filter(Trainer.id == user.id).one_or_none()\n\n if trainer is not None:\n # The user already has permission\n await ctx.send(embed=discord.Embed(\n description=f'{user.mention} is already a trainer.',\n color=discord.Color.orange()\n ))\n else:\n # Add the user to the trainers list\n session.add(Trainer(id=user.id))\n session.commit()\n\n await ctx.send(embed=discord.Embed(\n description=f'{user.mention} has been added as a trainer!',\n color=discord.Color.green()\n ))\n\n session.close()",
"def input_and_create_user(self):\n print(\"Please input username!\")\n users.append(user.User(input()))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show the menu for registering a new user to the system.
|
def _show_registration_menu(self):
# register the user
self._register_user()
|
[
"def create_user_form():\n \n\n return render_template(\"/create-user.html\" )",
"def show_add_form():\n return render_template(\"add_user.html\")",
"def create_new_user(self):\n name = get_param('What is your name?', self.screen)\n address = get_param('What is your street address?', self.screen)\n city = get_param('What city do you live in?', self.screen)\n state = get_param('What state do you live in?', self.screen)\n zipcode = get_param('What is your zipcode?', self.screen)\n phone = get_param('What is your phone number?', self.screen)\n\n try:\n self.current_user = generate_new_customer(name, address, city, state, zipcode, phone)\n self.user_name = name\n self.logged_in_menu()\n except:\n self.unlogged_in_menu()",
"def show_main_menu(self):\n\n # Display a welcome message\n print(\"\"\" \n ___ \n /'___\\ \n /\\ \\__/ __ ___ ___ \n \\ \\ ,__\\/'__`\\ /' __` __`\\ \n \\ \\ \\_/\\ \\L\\.\\_/\\ \\/\\ \\/\\ \\ \n \\ \\_\\\\ \\__/.\\_\\ \\_\\ \\_\\ \\_\\\\\n \\/_/ \\/__/\\/_/\\/_/\\/_/\\/_/ \n \"\"\")\n\n # Prompt user to register, login, or exit the F.A.M until they choose a valid option.\n while True:\n print(\"\\n Family Appointed Moderator\")\n print(\"----------------------------------------\")\n print(\n \"1 - Register new user\\n\"\n \"2 - Login\\n\"\n \"3 - Exit\\n\"\n )\n\n try:\n choice = int(input(\"Enter your choice: \"))\n except ValueError:\n print(\"\\nInvalid choice. Please try again.\")\n continue\n\n if choice == 3:\n return\n elif choice > 3 or choice < 0:\n print(\"\\nInvalid choice. Please try again.\")\n else:\n input_map = {\n 1: self._register_user,\n 2: self._login_user,\n }\n\n # Catch any string values\n try:\n operation = input_map[choice]\n except ValueError:\n print(\"Invalid choice. Please try again.\")\n continue\n\n # Move to the actions menu after a user is logged in or registered\n if operation():\n try:\n self._show_actions_menu()\n except UserIsLockedError as e:\n print(e)",
"def reg_user_window(self):\n global REG_WINDOW\n REG_WINDOW = AddUserWindow(self.server, self.database)",
"def press_Registrar():\n\n usuario = app.entry(\"Usuario\")\n hashPassword = secure.hashingPassword(app.entry(\"Contrasenia\"))\n\n # Creamos el usuario con su nombre, password hasheada y su salt.\n # Cuando realice el login, se actualizaran los datos IP, Puerto, estadoUser,...\n if api.create_User(usuario, hashPassword):\n app.infoBox(\"Registrado con exito\", \"Te registraste como: \"+usuario)\n else:\n app.errorBox(\"Error en el registro\", \"Hubo un error a la hora de registrarse \")",
"def click_create_user(self):\n element = Helper.find_element_by_id(self.driver, self.create_user_button_id)\n if element is not None:\n element.click()",
"def register_user_page_menu_bar(title, route):\n p = Menu(title=title, route=route)\n app.plugin_menu_bar.append(p)",
"def register():\n \n return render_template('register.html')",
"def add_user(self,group,**user_info):\n ### choose menu\n if group == '.':\n self.left_menu(u\"ユーザ管理(自グループ)\")\n else:\n self.left_menu(u\"ユーザ管理(他グループ)\")\n self._selenium.select_from_list_by_label(\"policy_group_id\", group)\n\n self._selenium.wait_until_page_contains_element(u\"//input[@value='ユーザの追加']\")\n self._selenium.click_button(u\"//input[@value='ユーザの追加']\")\n self._selenium.input_text(\"user_name\",user_info['name'])\n self._selenium.input_text(\"password1\",user_info['password'])\n self._selenium.input_text(\"password2\",user_info['password'])\n self._selenium.select_from_list(\"privilege_group_id\",user_info[\"privilege\"])\n policy = ''\n if 'policy' in user_info:\n policy = user_info['policy']\n if policy == '*':\n self._selenium.execute_javascript(\"change_all_check_box(document.FORM1.policy_id, true)\")\n else:\n for entry in [x.strip() for x in policy.split(',')] :\n self._selenium.select_checkbox(u\"//label[contains(.,'%s')]/../input\" % entry)\n #\n self._selenium.click_button(u\"//button[contains(.,'追加')]\")\n self._selenium.wait_until_page_contains_element(u\"//span[contains(.,'ユーザを追加しました')]\")\n BuiltIn().log(\"Added user `%s`\" % user_info['name'])",
"def registration():\r\n form = RegistrationForm()\r\n return render_template(\"registration.html\", form = form)",
"def showUserManagemet(self):\n\n self.cleanWorkspace()\n self.userManagementWidget = UserManagementWidget()\n self.window.layoutUserManagement.addWidget(\n self.userManagementWidget)\n controllerUserManagement = ControllerUserManagement(self.userManagementWidget)",
"def display_signup_form():\n\n return render_template('/signup.html')",
"def register():\n return render_template('dashboard/register.html', tagname = 'register')",
"def add_new_user (username, password, title):\n storage_format = f\"{username}|{password}|{title}\"\n append_new_line(users_credentials, storage_format)",
"def registered_users():\n users = User.query.all()\n return render_template(\n 'admin/registered_users.html', users=users)",
"def get(self):\n self.render(\"account-create.html\", \"account\", check_reg=False)",
"def input_and_create_user(self):\n print(\"Please input username!\")\n users.append(user.User(input()))",
"def adduser(self, nick):\n # add user\n if not self.users.has_key(nick):\n i = GtkListItem(nick)\n i.show()\n self.list.append_items([i])\n self.users[nick] = i\n if len(self.users) == 1:\n # select the user if it's the first / only in the list\n self.setselection(nick)\n i.connect(\"button-press-event\", self.item_handler, nick)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show the actions menu and takes input from the user.
|
def _show_actions_menu(self):
while True:
# Check if a user is locked, if so exit out of the actions menu
if self.current_user.can_lock_account():
raise UserIsLockedError("Your account is locked. We have logged you out")
print(f"\nLogged in as {self.current_user.name}\n")
# options:
print("Actions menu:\n"
"----------------\n"
"1 - View budgets\n"
"2 - Record transaction\n"
"3 - View transactions by budget\n"
"4 - View bank account details\n"
"5 - Logout\n"
)
try:
option = int(input("Please enter the number your selection: "))
except ValueError:
print("Invalid choice. Please try again.")
continue
# option 5 = LOGOUT, back to main menu
if option == 5:
return
else:
# performs the action selected by the user.
self._perform_action(option)
|
[
"def display_menu(self):\n print(\"~~~~~~~~~~~~MENU~~~~~~~~~~~~\")\n self.user_choice = self.utils.ask_choices(self.menu_choices)\n print(\"\")",
"def show_main_menu(self):\n\n # Display a welcome message\n print(\"\"\" \n ___ \n /'___\\ \n /\\ \\__/ __ ___ ___ \n \\ \\ ,__\\/'__`\\ /' __` __`\\ \n \\ \\ \\_/\\ \\L\\.\\_/\\ \\/\\ \\/\\ \\ \n \\ \\_\\\\ \\__/.\\_\\ \\_\\ \\_\\ \\_\\\\\n \\/_/ \\/__/\\/_/\\/_/\\/_/\\/_/ \n \"\"\")\n\n # Prompt user to register, login, or exit the F.A.M until they choose a valid option.\n while True:\n print(\"\\n Family Appointed Moderator\")\n print(\"----------------------------------------\")\n print(\n \"1 - Register new user\\n\"\n \"2 - Login\\n\"\n \"3 - Exit\\n\"\n )\n\n try:\n choice = int(input(\"Enter your choice: \"))\n except ValueError:\n print(\"\\nInvalid choice. Please try again.\")\n continue\n\n if choice == 3:\n return\n elif choice > 3 or choice < 0:\n print(\"\\nInvalid choice. Please try again.\")\n else:\n input_map = {\n 1: self._register_user,\n 2: self._login_user,\n }\n\n # Catch any string values\n try:\n operation = input_map[choice]\n except ValueError:\n print(\"Invalid choice. Please try again.\")\n continue\n\n # Move to the actions menu after a user is logged in or registered\n if operation():\n try:\n self._show_actions_menu()\n except UserIsLockedError as e:\n print(e)",
"def do_action_for_input(self, user_input):\n try:\n if user_input == CommandLineProgram.ACTION.HELP:\n self.print_help()\n elif user_input == CommandLineProgram.ACTION.ADD_USER:\n self.input_and_create_user()\n elif user_input == CommandLineProgram.ACTION.LIST_USERS:\n self.print_users()\n elif user_input == CommandLineProgram.ACTION.ADD_TRANSACTION:\n self.select_user_and_add_transaction()\n elif user_input == CommandLineProgram.ACTION.GENERATE_REPORT:\n self.select_user_and_print_report()\n except Exception:\n print(\"Try again\")",
"def show_main_menu(self):\n while True:\n menu_msg = (\"\\nPlease select an action \"\n \"\\n1---Withdraw\"\n \"\\n2---Deposit\"\n \"\\n3---Check balance\"\n \"\\n4---Edit account details\"\n \"\\n5---Log out and exit\")\n print(menu_msg)\n\n choices = {'1': self.user_account.withdraw,\n '2': self.user_account.deposit,\n '3': self.user_account.print_account_balance,\n '4': self.user_account.edit_account_menu,\n '5': quit}\n\n user_choice = choices.get(input())\n if user_choice is not None:\n user_choice()\n else:\n print(\"Invalid choice. Please try again: \")",
"def main_menu(message=None):\n clear()\n print(\"WORK LOG\\n========\\n\")\n if message:\n print(message+\"\\n\")\n else:\n print(\"What would you like to do?\\n\")\n print(\"(A)dd a task\")\n if Task.select().count() != 0:\n print(\"(V)iew all tasks\")\n print(\"(S)earch for a task\")\n print(\"(Q)uit\")\n return input(\"> \")",
"def openmenu(self):\r\n\r\n # getting the name of pet from user input by using get\r\n petName = self.petnameEntry.get()\r\n user_pet = Action(petName)\r\n print(\"I am your pet,\", petName)\r\n\r\n # a label for better design\r\n window = tk.Toplevel(root)\r\n w = Label(window, text=\"What would you like \\nto do with your pet?\")\r\n w.pack()\r\n\r\n # menu buttons including feed, play, Vet, sleep, clean\r\n btFeed = Button(window, text=\"Feed\", command=lambda: user_pet.eat())\r\n btFeed.pack(pady=3)\r\n btPlay = Button(window, text=\"Play\", command=lambda: user_pet.play())\r\n btPlay.pack(pady=3)\r\n btVet = Button(window, text=\"Vet\", command=lambda: user_pet.see_vet())\r\n btVet.pack(pady=3)\r\n btSleep = Button(window, text=\"Sleep\", command=lambda: user_pet.sleep())\r\n btSleep.pack(pady=3)\r\n btClean = Button(window, text=\"Clean\", command=lambda: user_pet.\r\n clean_up())\r\n btClean.pack(pady=3)\r\n root.mainloop()",
"def display_transition_menu():\r\n print(\"### Transition Menu ###\\n\"\r\n \"Welcome to transition menu! \\n\"\r\n \"what would you like to do?\\n\"\r\n \"1. save audio\\n\"\r\n \"2. change audio\\n\"\r\n \"Please choose your preference from 1 or 2:\")",
"def display_menu(self) -> None:\n self.quit_button.display()\n self.restart_button.display()\n self.undo_move_button.display()\n self.save_log_button.display()",
"def user_menu(game, user_action):\n player = game.player\n if 'inventory' in user_action:\n print(player.inventory)\n elif 'score' in user_action:\n print(f\"Your current score is: {player.stats['point']}\")\n else:\n print(\"No menu option found\")\n return True",
"def menu(self):\n #Database menu options\n menu = {'1':self.select_all,'2':self.add_student,'3':self.update_student,'4':self.delete_student,'5':self.search,'0':self.close_app}\n while not self.user_exit: #while the user has not closed the application\n print(\"\\n\\n\")\n #print menu\n selection = input(\"Please select one of the following:\\n\\tDisplay all students: 1\\n\\tAdd a student to the database: 2\\n\\tUpdate an existing student's information: 3\\n\\tDelete a student: 4\\n\\tSearch for students with a given Major, GPA, or Faculty Advisor: 5\\n\\tExit the application: 0\\n\").strip()\n if not selection in menu:# if invalid input given\n print(\"Invalid input given, please try again\")\n continue\n #launch selected menu item\n menu[selection]()",
"def main():\n menu()",
"def handle_employee_menu():\n print(\"\"\"\n Welcome\n What would you like to do:\n (1) List students\n (2) View students details\n (0) Exit CcMS\n \"\"\")\n option = input(\"Your choice: \")\n return option",
"def click_menu(self):\n pass",
"def prompt_for_action():\n while True:\n print()\n print(\"What would you like to do?\")\n print()\n print(\" A = add an item to the inventory.\")\n print(\" R = remove an item from the inventory.\")\n print(\" C = generate a report of the current inventory levels.\")\n print(\" O = generate a report of the inventory items to re-order.\")\n print(\" Q = quit.\")\n print()\n action = input(\"> \").strip().upper()\n if action == \"A\": return \"ADD\"\n elif action == \"R\": return \"REMOVE\"\n elif action == \"C\": return \"INVENTORY_REPORT\"\n elif action == \"O\": return \"REORDER_REPORT\"\n elif action == \"Q\": return \"QUIT\"\n else:\n print(\"Unknown action!\")",
"def search_menu(self):\n clr_screen() \n \n print (misc.SEARCH_MENU)\n\n for key in sorted(misc.search_menu):\n print (misc.search_menu[key])\n\n print('\\n')\n choice = input(\"Please select:\")\n\n if choice == '1':\n self.search_by_range_date()\n self.main_menu()\n elif choice == '2': \n self.find_by_time()\n self.main_menu()\n elif choice == '3':\n self.find_by_string()\n self.main_menu()\n elif choice == '4': \n self.find_by_pattern()\n self.main_menu()\n elif choice == '5': \n print (\"return to main menu\")\n self.main_menu()\n else: \n misc.option_error()\n self.main_menu()",
"def generate_menu():\r\n print('\\nPlease choose from the following menu:',\r\n\r\n '\\n\\n1. Display all U.S. States in Alphabetical order along',\r\n 'with the Capital, State Population, and Flower',\r\n\r\n '\\n2. Search for a specific state and display the appropriate Capital',\r\n 'name, State Population, and an image of the associated State Flower.',\r\n\r\n '\\n3. Provide a Bar graph of the top 5 populated States showing the overall population.',\r\n\r\n '\\n4. Update the overall state population for a specific state.',\r\n\r\n '\\n5. Exit the program.')",
"def run(self):\n current_ind = 0\n while True:\n num_printed = self._display_search_results(current_ind)\n if (num_printed is None) or (current_ind + num_printed == len(self.search_res)):\n print(\n '\\nPlease select the action that you would like to take:\\n'\n '\\t[#] Enter the number corresponding to the question that you would like to perform an action on\\n'\n '\\t[r] Return to the main menu'\n )\n selection = select_from_menu(self.valid_inputs + ['r'])\n else:\n current_ind += num_printed\n print(\n '\\nPlease select the action that you would like to take:\\n'\n '\\t[#] Enter the number corresponding to the question that you would like to perform an action on\\n'\n '\\t[m] See more search results\\n'\n '\\t[r] Return to the main menu'\n )\n selection = select_from_menu(self.valid_inputs + ['m', 'r'])\n if selection != 'm':\n break\n if selection != 'r':\n QuestionAction(self.db_manager, self.user_id, self.search_res[int(selection) - 1]).run()",
"def main_menu(self):\n\n print(\"Welcome to MASTERMIND\")\n while True:\n\n # print menu and get some input data\n menu_options, options_dic = self.print_menu()\n\n # get proper user input\n i = self.menu_input(menu_options)\n\n # interpret input based on options given in print_menu\n d = options_dic[i]\n if d == 'new game':\n self.new_game()\n elif d == 'continue game':\n self.continue_game()\n elif d == 'save game':\n self.save_game()\n elif d == 'load game':\n self.load_game()\n elif d == 'see stats':\n self.statistics()\n elif d == 'change config':\n self.change_configurations()\n elif d == 'quit':\n os.system('clear')\n print(\"Goodbye!\")\n exit()\n\n os.system('clear')",
"def show_start_menu(): # The startup menu\n print('MAIN MENU')\n print('\\t1. Start a new game.')\n accepted_answers = ['1', 'q']\n save = find_save()\n if save is not None:\n print('\\t2. Continue from existing save.')\n accepted_answers.append('2')\n print('\\tq. Quit.\\n')\n answer = input('Choose your desired option: ')\n while answer not in accepted_answers:\n answer = input('You have entered an invalid option. Please try again: ')\n globals.clear_screen()\n if answer is '1':\n return None\n elif answer is 'q':\n exit_program()\n else:\n return save"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform an action based on the option selected by a user.
|
def _perform_action(self, option):
if option == 1:
self.current_user.view_budgets()
elif option == 2:
self.current_user.record_transaction()
elif option == 3:
self.current_user.view_transactions()
elif option == 4:
self.current_user.view_bank_details()
else:
print("Please enter a valid option.")
|
[
"def chooseAction(self, choice):\n\n option = 'undefinedMethod'\n\n #If the choice is a valid option, prepare to run the corresponding method\n try:\n option = self._options[choice]\n except Exception:\n pass\n\n #Run the corresponding method. Based on:\n #https://www.pydanny.com/why-doesnt-python-have-switch-case.html\n method = getattr(self, option, 'invalid')\n if method == 'invalid':\n method = getattr(self, 'reprompt')\n return method()",
"def do_action_for_input(self, user_input):\n try:\n if user_input == CommandLineProgram.ACTION.HELP:\n self.print_help()\n elif user_input == CommandLineProgram.ACTION.ADD_USER:\n self.input_and_create_user()\n elif user_input == CommandLineProgram.ACTION.LIST_USERS:\n self.print_users()\n elif user_input == CommandLineProgram.ACTION.ADD_TRANSACTION:\n self.select_user_and_add_transaction()\n elif user_input == CommandLineProgram.ACTION.GENERATE_REPORT:\n self.select_user_and_print_report()\n except Exception:\n print(\"Try again\")",
"def run(self):\n valid_inputs = ['1', '2', '3', 'r']\n selection = select_from_menu(valid_inputs)\n if selection == '1':\n self._answer_question()\n elif selection == '2':\n self._list_answers()\n elif selection == '3':\n self._try_adding_vote(self.question_data, self.user_id)",
"def process_and_display(self, user_choice):\n\n if user_choice in ('Saturate',):\n self.show_saturated(no_toggle=True)\n elif user_choice in ('Background_only',):\n self.show_background_only(no_toggle=True)\n elif user_choice in ('Tails_trimmed', 'Tails trimmed'):\n self.show_tails_trimmed(no_toggle=True)\n elif user_choice in ('Original',):\n self.show_original()\n else:\n print('Chosen option seems to be not implemented!')",
"def process_options(self):\n data = self.receive()\n if 'option_selected' in data.keys() and 1 <= data['option_selected'] <= 6: # validates a valid option selected\n option = data['option_selected']\n if option == 1:\n self._send_user_list()\n elif option == 2:\n self._save_message(data)\n elif option == 3:\n self._send_messages()\n elif option == 4:\n self._create_chat(data)\n elif option == 5:\n self._join_chat(data)\n elif option == 6:\n self._disconnect_from_server()\n else:\n print(\"The option selected is invalid\")",
"def user_choose_action(pok):\n blank_text()\n choice_text = myfont.render(\"Battle (b)\",True,BLACK)\n blit(choice_text, text_blit_pos)\n update_text()",
"def handle_employee_menu():\n print(\"\"\"\n Welcome\n What would you like to do:\n (1) List students\n (2) View students details\n (0) Exit CcMS\n \"\"\")\n option = input(\"Your choice: \")\n return option",
"def call_option(self, input_number):\n if input_number == 1:\n self.add_new_contact()\n elif input_number == 2:\n self.modify_existing_contact()\n elif input_number == 3:\n self.delete_contact()\n elif input_number == 4:\n self.display_all_contacts()\n elif input_number == 5:\n self.search_by_attribute()\n elif input_number == 6:\n quit()",
"def user_menu(game, user_action):\n player = game.player\n if 'inventory' in user_action:\n print(player.inventory)\n elif 'score' in user_action:\n print(f\"Your current score is: {player.stats['point']}\")\n else:\n print(\"No menu option found\")\n return True",
"def select_action(self, observation):",
"def __do_step_choose_opp(self):\r\n params = self._prepare_values_to_be_rendered()\r\n params.instruction = \"Want to play against human or machine?\"\r\n params.options.update({\r\n Commands.CHOOSE_OPP_AS_HUMAN: \"Human\",\r\n Commands.CHOOSE_OPP_AS_MACHINE: \"Machine\",\r\n })\r\n self._gui.print_screen(params)\r\n\r\n input = self._read_input()\r\n\r\n if not self._handle_common_inputs(input, params.options):\r\n if input == Commands.CHOOSE_OPP_AS_HUMAN:\r\n self._state.game.machine = None\r\n self._state.activity = States.CHOOSE_PLAYER_ORDER\r\n self._state.feedback = \"You have chosen to play \" + \\\r\n \"against another human.\"\r\n\r\n elif input == Commands.CHOOSE_OPP_AS_MACHINE:\r\n self._state.activity = States.CHOOSE_MACHINE_STRATEGY\r\n self._state.feedback = \"You have chosen to play \" + \\\r\n \"against the machine.\"",
"def on_todo_action(self, option):\n if option > -1:\n if option == ACTION_DONE_STATE:\n self.todo_file.mark_todo(self.todo_file.todo_position)\n elif option == ACTION_EDIT:\n self.window.show_input_panel(\"Edit Todo\", self.todo_file.get_line(self.todo_file.todo_position), self.on_edit_todo, None, self.on_cancel)\n elif option == ACTION_DELETE:\n self.todo_file.delete_todo(self.todo_file.todo_position)\n elif option == ACTION_MOVE:\n self.window.show_quick_panel(MOVE_OPTIONS, self.on_move_action)\n else:\n pass",
"def run(self):\n valid_inputs = ['1', 'r']\n selection = select_from_menu(valid_inputs)\n if selection != 'r':\n self._try_adding_vote(self.answer_data, self.user_id)",
"def player_choose(self) -> None:\n print(\"(1) Rock\\n(2) Paper\\n(3) Scissors\")\n self.human_choice = OPTIONS[int(input(\"Enter the number of your choice: \")) - 1]",
"def manipulate():\r\n manipulate_menu = \"\" \\\r\n \"| Choose a Class to Manipulate |\" \\\r\n \"\\n(1) User\" \\\r\n \"\\n(2) Group\"\r\n print(manipulate_menu)\r\n admin_choice = input(\"Enter here: \")\r\n\r\n if admin_choice == \"1\":\r\n manipulate_user()\r\n elif admin_choice == \"2\":\r\n manipulate_group()\r\n else:\r\n print(\"\\n| Re-enter the option |\\n\")",
"def get_user_choice():\n\n return input('Your choice: ')",
"def response_action(self, request, queryset):\n\n # There can be multiple action forms on the page (at the top\n # and bottom of the change list, for example). Get the action\n # whose button was pushed.\n try:\n action_index = int(request.POST.get('index', 0))\n except ValueError:\n action_index = 0\n\n # Construct the action form.\n data = request.POST.copy()\n data.pop(helpers.ACTION_CHECKBOX_NAME, None)\n data.pop(\"index\", None)\n\n # Use the action whose button was pushed\n try:\n data.update({'action': data.getlist('action')[action_index]})\n except IndexError:\n # If we didn't get an action from the chosen form that's invalid\n # POST data, so by deleting action it'll fail the validation check\n # below. So no need to do anything here\n pass\n\n action_form = self.action_form(data, auto_id=None)\n action_form.fields['action'].choices = self.get_action_choices(request)\n\n # If the form's valid we can handle the action.\n if action_form.is_valid():\n action = action_form.cleaned_data['action']\n select_across = action_form.cleaned_data['select_across']\n func = self.get_actions(request)[action][0]\n\n # Get the list of selected PKs. If nothing's selected, we can't\n # perform an action on it, so bail. Except we want to perform\n # the action explicitly on all objects.\n selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)\n if not selected and not select_across:\n # Reminder that something needs to be selected or nothing will\n # happen\n msg = _(\"Items must be selected in order to perform \"\n \"actions on them. No items have been changed.\")\n messages.add_message(request, messages.WARNING, msg)\n return None\n\n if not select_across:\n # Perform the action only on the selected objects\n queryset = queryset.filter(pk__in=selected)\n\n response = func(self, request, queryset)\n\n # Actions may return an HttpResponse-like object, which will be\n # used as the response from the POST. If not, we'll be a good\n # little HTTP citizen and redirect back to the changelist page.\n if isinstance(response, HttpResponseBase):\n return response\n else:\n return HttpResponseRedirect(request.get_full_path())\n else:\n msg = _(\"No action selected.\")\n messages.add_message(request, messages.WARNING, msg)\n return None",
"def option_activated(self, *args, **kwargs):\n commands.command_use_item(self.game, self.options[self.selected], self.director.main_game_scene)\n super().option_activated(*args, **kwargs)",
"def _on_option_clicked(self, *_):\n self.variable.set(True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Select a user from the user list to log in.
|
def _login_user(self):
# Display list of users and prompt an input
print("\n---- Login Menu ----")
for user in self.user_list:
print(f"{self.user_list.index(user) + 1} - {user}")
# Exit if the last option is chosen
choice_exit = len(self.user_list) + 1
print(f"{choice_exit} - Back to main menu")
valid_users = range(1, len(self.user_list) + 1)
choice = 0
while True:
try:
choice = int(input("Choose a user by entering the id: "))
except ValueError:
print("\nInvalid choice. Please try again.")
continue
# Loop until a valid user is selected
if choice in valid_users:
break
elif choice == choice_exit:
return False
else:
print("\nPlease enter a valid option")
# Set current user to selected user
self.current_user = self.user_list[choice - 1]
return True
|
[
"def sign_in(self, lb, patients):\n # Check that something is selected\n if lb.curselection() is None or len(lb.curselection()) == 0:\n return\n patient = patients[lb.curselection()[0]]\n self.window.email = patient\n # Swap view to patient\n logging.info(\"Admin Logged in as \" + patient)\n self.window.swap_view(\"Patient\")",
"def setselection(self, nick):\n # if type(nick) == type([]) ?\n self.name = nick\n i = self.users[nick]\n i.select()\n self.targetlabel.set_text(nick)",
"def user_login(user):\n session['user'] = user.username",
"def login(self):\n userName = raw_input('Login: ')\n if self.existUser(userName):\n user = self.getUser(userName)\n rawI = raw_input('Password: ')\n if rawI == user.pw:\n self.currentUser = user\n else:\n logging.info(\"Incorrect password!\")\n self.login()\n else:\n logging.info(\"User does not exist.\")\n self.login()",
"def _get_user(self, name, list_users):\n for user in list_users:\n print(\"get user method {} {}\".format(name, user.screen_name))\n if user.screen_name.lower() == name.lower():\n return user",
"def make_active_users_selectable(self, user=None):\n active_users = HypothesisUtils().get_active_users()\n most_recently_active_user = active_users[0][0]\n select = ''\n for active_user in active_users:\n if user is not None and active_user[0] == user:\n option = '<option selected value=\"%s\">%s (%s)</option>'\n else:\n option = '<option value=\"%s\">%s (%s)</option>'\n option = option % (active_user[0], active_user[0], active_user[1])\n select += option\n select = \"\"\"<select class=\"stream-active-users\" name=\"active_users\" \n onchange=\"javascript:show_user()\">\n <option>choose</option>\n %s\n </select>\"\"\" % (select)\n if user==None:\n return most_recently_active_user, select, active_users\n else:\n return user, select, active_users",
"def set_new_user(self):\n self.current_user = random.choice(self.hosts)",
"def adduser(self, nick):\n # add user\n if not self.users.has_key(nick):\n i = GtkListItem(nick)\n i.show()\n self.list.append_items([i])\n self.users[nick] = i\n if len(self.users) == 1:\n # select the user if it's the first / only in the list\n self.setselection(nick)\n i.connect(\"button-press-event\", self.item_handler, nick)",
"def get_single_user():",
"def login(user):\n login_user(user)\n identity_changed.send(current_app._get_current_object(),\n identity=Identity(user.email))",
"def get_users_choices():\n users = [(user.id, user.login) for user in db_queries.get_all_users()]\n return users",
"def admin_users():\n users = User.select()\n return render_template('users.html', users=users)",
"def test_select_user(self):\n values = (\n \"\"\"\n 'password', 'pointer@gmail.com', 'false', 'false',\n NULL, NULL, now(), NULL\n \"\"\"\n )\n insert_user_row(values)\n data = select_user_row('pointer@gmail.com')\n date = datetime.datetime.today().strftime('%Y-%m-%d')\n self.assertEqual(data[0][0], 1)\n self.assertEqual(data[0][1], 'password')\n self.assertEqual(data[0][2], 'pointer@gmail.com')\n self.assertEqual(data[0][3], False)\n self.assertEqual(data[0][4], False)\n self.assertEqual(data[0][5], None)\n self.assertEqual(data[0][6], None)\n self.assertEqual(str(data[0][7]), date)\n self.assertEqual(data[0][8], None)",
"def login(mList):\n print(\"Logging in system\\n\")\n username = input(\"Enter your Mod user name:\")\n passwd = getpass.getpass()\n\n return authMod(username, passwd, mList)",
"def actionAccountChoose(self, address):\n\n\t\ttry:\n\t\t\tUser.getUser(self.getDataDirectory(), address)\n\t\texcept KeyError:\n\t\t\tsys.stderr.write(\"No account %s\\n\" % address)\n\t\t\treturn 1\n\n\t\tconfig.set(\"account.current\", address)",
"def input_and_create_user(self):\n print(\"Please input username!\")\n users.append(user.User(input()))",
"def test_list_user_logins_users(self):\r\n user_id = None # Change me!!\r\n\r\n r = self.client.list_user_logins_users(user_id)",
"def save_login(self):\n User.user_list.append(self)",
"def find_user(user): \n user.find_by_username()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show the main menu with options to register a new user, login, or exit the FAM application.
|
def show_main_menu(self):
# Display a welcome message
print("""
___
/'___\
/\ \__/ __ ___ ___
\ \ ,__\/'__`\ /' __` __`\
\ \ \_/\ \L\.\_/\ \/\ \/\ \
\ \_\\ \__/.\_\ \_\ \_\ \_\\
\/_/ \/__/\/_/\/_/\/_/\/_/
""")
# Prompt user to register, login, or exit the F.A.M until they choose a valid option.
while True:
print("\n Family Appointed Moderator")
print("----------------------------------------")
print(
"1 - Register new user\n"
"2 - Login\n"
"3 - Exit\n"
)
try:
choice = int(input("Enter your choice: "))
except ValueError:
print("\nInvalid choice. Please try again.")
continue
if choice == 3:
return
elif choice > 3 or choice < 0:
print("\nInvalid choice. Please try again.")
else:
input_map = {
1: self._register_user,
2: self._login_user,
}
# Catch any string values
try:
operation = input_map[choice]
except ValueError:
print("Invalid choice. Please try again.")
continue
# Move to the actions menu after a user is logged in or registered
if operation():
try:
self._show_actions_menu()
except UserIsLockedError as e:
print(e)
|
[
"def _show_registration_menu(self):\n\n # register the user\n self._register_user()",
"def main_menu(self) -> None:\n logger.info(\"logged in as GP\")\n while True:\n Parser.print_clean(\"You're currently viewing main menu options for GP {}.\".format(self.username))\n self.print_information()\n option_selection = Parser.selection_parser(\n options={\"A\": \"View/Edit availability\", \"M\": \"Manage bookings\", \"V\": \"View/Start appointment\",\n \"U\": \"update your profile\", \"--logout\": \"Logout\"})\n\n if option_selection == \"--logout\":\n # Quitting is required for logout to ensure all personal data is cleared from session\n logger.info(\"User Logged Out\")\n Parser.print_clean(\"Logging you out...\")\n Parser.user_quit()\n\n elif option_selection == \"A\":\n self.edit_availability()\n elif option_selection == \"M\":\n self.manage_bookings()\n elif option_selection == \"V\":\n self.view_appointment()\n elif option_selection == \"U\":\n self.edit_information()",
"def show_main_menu(self):\n while True:\n menu_msg = (\"\\nPlease select an action \"\n \"\\n1---Withdraw\"\n \"\\n2---Deposit\"\n \"\\n3---Check balance\"\n \"\\n4---Edit account details\"\n \"\\n5---Log out and exit\")\n print(menu_msg)\n\n choices = {'1': self.user_account.withdraw,\n '2': self.user_account.deposit,\n '3': self.user_account.print_account_balance,\n '4': self.user_account.edit_account_menu,\n '5': quit}\n\n user_choice = choices.get(input())\n if user_choice is not None:\n user_choice()\n else:\n print(\"Invalid choice. Please try again: \")",
"def display_menu(self):\n print(\"~~~~~~~~~~~~MENU~~~~~~~~~~~~\")\n self.user_choice = self.utils.ask_choices(self.menu_choices)\n print(\"\")",
"def _show_actions_menu(self):\n while True:\n # Check if a user is locked, if so exit out of the actions menu\n if self.current_user.can_lock_account():\n raise UserIsLockedError(\"Your account is locked. We have logged you out\")\n\n print(f\"\\nLogged in as {self.current_user.name}\\n\")\n\n # options:\n print(\"Actions menu:\\n\"\n \"----------------\\n\"\n \"1 - View budgets\\n\"\n \"2 - Record transaction\\n\"\n \"3 - View transactions by budget\\n\"\n \"4 - View bank account details\\n\"\n \"5 - Logout\\n\"\n )\n\n try:\n option = int(input(\"Please enter the number your selection: \"))\n except ValueError:\n print(\"Invalid choice. Please try again.\")\n continue\n # option 5 = LOGOUT, back to main menu\n if option == 5:\n return\n else:\n # performs the action selected by the user.\n self._perform_action(option)",
"def main_menu(self):\n selected_exit = False\n while not selected_exit:\n choice = self.view.main_menu_wrapper(self.directory_model)\n if choice is 1:\n # user wants to update the routes\n self.update_routes()\n elif choice is 2:\n # user wants to exit the program\n selected_exit = True\n else:\n # user has selected a route by entering its name\n self.route_selected(choice)\n print(\"\\n\")",
"def menu(self):\n #Database menu options\n menu = {'1':self.select_all,'2':self.add_student,'3':self.update_student,'4':self.delete_student,'5':self.search,'0':self.close_app}\n while not self.user_exit: #while the user has not closed the application\n print(\"\\n\\n\")\n #print menu\n selection = input(\"Please select one of the following:\\n\\tDisplay all students: 1\\n\\tAdd a student to the database: 2\\n\\tUpdate an existing student's information: 3\\n\\tDelete a student: 4\\n\\tSearch for students with a given Major, GPA, or Faculty Advisor: 5\\n\\tExit the application: 0\\n\").strip()\n if not selection in menu:# if invalid input given\n print(\"Invalid input given, please try again\")\n continue\n #launch selected menu item\n menu[selection]()",
"def show_start_menu(): # The startup menu\n print('MAIN MENU')\n print('\\t1. Start a new game.')\n accepted_answers = ['1', 'q']\n save = find_save()\n if save is not None:\n print('\\t2. Continue from existing save.')\n accepted_answers.append('2')\n print('\\tq. Quit.\\n')\n answer = input('Choose your desired option: ')\n while answer not in accepted_answers:\n answer = input('You have entered an invalid option. Please try again: ')\n globals.clear_screen()\n if answer is '1':\n return None\n elif answer is 'q':\n exit_program()\n else:\n return save",
"def main(self) -> dict:\n\n questions = [\n Checkbox(\n name=\"main\",\n message=\"SELECT AN OPTION:\",\n choices=[\"ADMIN\", \"PLAY\", \"EXIT\"])\n ]\n\n return prompt(questions)",
"def main():\n\n root = tk.Tk()\n app = login.Authentication(root)\n app.mainloop()",
"def main():\n menu()",
"def home(self):\n self.window.show_view(Menu())",
"def menu(self):\n self.parent.switch_screen(\"Menu\")",
"def handle_employee_menu():\n print(\"\"\"\n Welcome\n What would you like to do:\n (1) List students\n (2) View students details\n (0) Exit CcMS\n \"\"\")\n option = input(\"Your choice: \")\n return option",
"def _login_user(self):\n # Display list of users and prompt an input\n print(\"\\n---- Login Menu ----\")\n for user in self.user_list:\n print(f\"{self.user_list.index(user) + 1} - {user}\")\n\n # Exit if the last option is chosen\n choice_exit = len(self.user_list) + 1\n print(f\"{choice_exit} - Back to main menu\")\n\n valid_users = range(1, len(self.user_list) + 1)\n choice = 0\n while True:\n try:\n choice = int(input(\"Choose a user by entering the id: \"))\n except ValueError:\n print(\"\\nInvalid choice. Please try again.\")\n continue\n\n # Loop until a valid user is selected\n if choice in valid_users:\n break\n elif choice == choice_exit:\n return False\n else:\n print(\"\\nPlease enter a valid option\")\n\n # Set current user to selected user\n self.current_user = self.user_list[choice - 1]\n return True",
"def display_menu(self) -> None:\n self.quit_button.display()\n self.restart_button.display()\n self.undo_move_button.display()\n self.save_log_button.display()",
"def displayLogin(self):\n self.l1.setVisible(False)\n self.l2.setVisible(False)\n self.l3.setVisible(False)\n self.logl1.setVisible(True)\n self.adminl1.setVisible(False)\n\n self.adminUsername.setVisible(True)\n self.adminPassword.setVisible(True)\n self.log.setVisible(True)\n\n self.lRecharge.setVisible(False)\n self.bRecharge.setVisible(False)\n self.moneyBox.setVisible(False)\n\n self.username.setVisible(False)\n self.name.setVisible(False)\n self.surname.setVisible(False)\n self.bCreateAccount.setVisible(False)\n\n self.lAddDevice.setVisible(False)\n self.username2.setVisible(False)\n self.bAddDevice.setVisible(False)",
"def attach_users_menu(menu_base, screen_def): \n users_menu = Menu(menu_base) \n menu_base.add_cascade(label=\"Users\", menu=users_menu) \n \n users_menu.add_command(label=\"All Users\",\n command=lambda :\n display_window(screens.get_screen_def('UserList')))\n \n attach_usersutil_menu(users_menu, screen_def)",
"def show_login():\n\n # form = LoginForm()\n\n return render_template(\"login.html\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
isAnswer should be true if you are setting the title to a chosen answer
|
def setTitle(self, newTitle, isAnswer = True):
if not self.chosenAnswer:
self.title["text"] = newTitle
self.backBtn.show()
if isAnswer:
self.chosenAnswer = True
|
[
"def test_showanswer_answered(self):\n # Can not see \"Show Answer\" when student answer is wrong\n answer_wrong = CapaFactory.create(\n showanswer=SHOWANSWER.ANSWERED,\n max_attempts=\"1\",\n attempts=\"0\",\n due=self.tomorrow_str,\n correct=False\n )\n assert not answer_wrong.answer_available()\n\n # Expect to see \"Show Answer\" when answer is correct\n answer_correct = CapaFactory.create(\n showanswer=SHOWANSWER.ANSWERED,\n max_attempts=\"1\",\n attempts=\"0\",\n due=self.tomorrow_str,\n correct=True\n )\n assert answer_correct.answer_available()",
"def _answer_question(self):\n clear_screen()\n print('ANSWER QUESTION')\n body = input('\\nPlease enter the text corresponding to your answer:\\n> ')\n self.db_manager.add_answer(self.question_data['Id'], body, self.user_id)\n clear_screen()\n print('ANSWER QUESTION')\n input('\\nAnswer successfully posted - please enter any key to return to the main menu:\\n> ')",
"def _basicAnswerCreation(self, survey, slide, answer, label = ''):\n if not getSurveyAnswer(survey, slide, c.authuser):\n sa = SurveyAnswer(survey, slide, answer, label)\n result = 'create'\n else:\n sa = editSurveyAnswer(survey, slide, answer, label)\n result = 'modify'\n return sa, result",
"def process_question(self):\n for rb in self.rbs:\n rb.configure(state = DISABLED)\n if self.var.get()==self.questions[self.index].answer: \n self.correct += 1\n self.feedback.config(text = \"Correct! \" + str(self.correct) + \"/\" + str(self.index + 1))\n else:\n self.feedback.config(text = \"Incorrect! The answer is \"+ self.questions[self.index].answer + \" \" +\n str(self.correct) + \"/\" + str(self.index + 1))",
"def print_answer(self, answer):\n\t\tprint(\"Case #%d: %s\" % (self.i, answer))\n\t\tself.i += 1",
"def check(self,answer):\n raise NotImplementedError(\"Each question must implement the check method\")",
"def show_question(self):\n print(question)",
"def test_generate_form_with_titles_and_no_answer_label(self):\n store = AnswerStore()\n\n conditional_answer = Answer(\n answer_id='behalf-of-answer',\n answer_instance=0,\n group_instance=0,\n value='chad',\n )\n\n store.add_or_update(conditional_answer)\n\n with self.app_request_context():\n schema = load_schema_from_params('test', 'titles')\n\n block_json = schema.get_block('multiple-question-versions-block')\n\n data = {\n 'gender-answer': 'male',\n 'age-answer': '25',\n 'sure-answer': 'yes'\n }\n\n expected_form_data = {\n 'csrf_token': '',\n 'gender-answer': 'male',\n 'age-answer': int('25'),\n 'sure-answer': 'yes'\n }\n with patch('app.questionnaire.path_finder.evaluate_goto', return_value=False):\n form = generate_form(schema, block_json, store, metadata={}, group_instance=0, group_instance_id=None, formdata=data)\n\n form.validate()\n self.assertEqual(form.data, expected_form_data)",
"def answer(self, answer):\n if answer is None:\n raise ValueError(\"Invalid value for `answer`, must not be `None`\")\n\n self._answer = answer",
"def display_question(content):\n\n question = random.randint(0, len(content[0])-1)\n print \"\\nUnit Test:\", content[0][question], ''\n options = [random.randint(0, len(content[1])-1),\n random.randint(0, len(content[1])-1),\n random.randint(0, len(content[1])-1)]\n options[random.randint(0,2)] = question\n print '1: ', content[1][options[0]],\n print '\\n2: ', content[1][options[1]],\n print '\\n3: ', content[1][options[2]],\n\n answer = input('\\nYour choice: ')\n\n answers_list = []\n answers_list.extend([options,answer,question])\n return answers_list",
"def score_answer(self, answer, answer_spec):\n raise NotImplementedError",
"def question():\n quest = raw_input(\"Do you want to Insert another article? Y/N\\n\")\n quest = quest.lower()\n if quest == \"y\" or quest == \"yes\":\n os.system(\"cls\")\n os.system(\"clear\")\n articles()\n elif quest == \"n\" or quest == \"no\":\n menu()\n else:\n print \"Insert a valid option\"\n question()",
"def show_question(self):\n\t\tprint(question)",
"def vqa_prompt(self, question, answer=None) -> str:",
"def next_question(self):\n # There is still another question to ask \n if self.index < len(self.questions) - 1:\n for rb in self.rbs:\n rb.configure(state = NORMAL)\n self.index+=1\n self.question_label.configure(text = self.questions[self.index].question)\n\n self.var = StringVar()\n self.var.set(0)\n for i in range(len(self.questions[self.index].answers)):\n ans_txt = self.questions[self.index].answers[i]\n self.rbs[i].configure(text = ans_txt, variable = self.var, value = ans_txt )\n self.feedback.config(text = \"\", height = 3)\n else:\n self.finish_quiz()",
"async def add_answer(self, ctx: Context, content: str, is_correct: bool, *, question: str):\n question_id = await ctx.db.fetchval(\"SELECT question_id from question where LOWER(content) = $1\",\n question.lower())\n if not question_id:\n return await ctx.send(\":no_entry: This question doesn't exist.\")\n\n async with ctx.db.acquire():\n await ctx.db.execute(\"\"\"INSERT INTO answer (question_id, content, is_correct) \n VALUES ($1,$2,$3) ON CONFLICT DO NOTHING\"\"\", question_id, content, is_correct)\n\n await ctx.send(\"> successfully updated.\")",
"def set_answer(self, answer_str):\n self._answer = answer_str",
"def check_user_answer(quiz, answers, blanks, blank_count):\n user_answer = raw_input(\"The current paragraph reads as such:\\n\\n\" + quiz +\n \"\\n\\nWhat should be substituted for\" + blanks[blank_count-1] + \"?\\n\\n\")\n correct_answer = answers[blank_count-1]\n if user_answer == correct_answer:\n return True\n else:\n return False",
"def test_student_set_answer_base_case() -> None:\n student = Student(1, 'John')\n q1 = MultipleChoiceQuestion(1, \"a b c or d?\", ['a', 'b', 'c', 'd'])\n a1 = Answer('a')\n q2 = CheckboxQuestion(5, \"do you like dogs?\", ['yes', 'no', 'sometimes'])\n a2 = Answer([\"yes\", \"sometimes\"])\n q3 = NumericQuestion(2, \"Pick num\", 1, 5)\n a3 = Answer(3)\n q4 = YesNoQuestion(4, \"T or F\")\n a4 = Answer(True)\n student.set_answer(q1, a1)\n student.set_answer(q2, a2)\n student.set_answer(q3, a3)\n student.set_answer(q4, a4)\n assert len(student._answers) == 4\n assert student._answers[1] == a1\n assert student._answers[5] == a2\n assert student._answers[2] == a3\n assert student._answers[4] == a4\n assert student._answers[1].content == 'a'\n assert student._answers[5].content == [\"yes\", \"sometimes\"]\n assert student._answers[2].content == 3\n assert student._answers[4].content == True",
"def do_story():\n # q_num and questions are global so they can be modified within the function globally\n global q_num\n global questions\n global breaks\n\n if q_num == 3:\n #print story so far and return\n story.set(\"\"\"It is the morning of September 20th, 2019. Yet the sun can't be seen. The \"\"\"+answers[0]+\"\"\" fog covers the desert. You and your \"best friend\" \"\"\"+answers[1]+\"\"\" ;) are joining in on the raid of Area 51. People are chanting \"Free them \"\"\"+answers[2]+\"\"\" aliens!!!\" And then you know, you and \"\"\"+answers[1]+\"\"\" will be victorious today.\"\"\")\n elif q_num == 7:\n # 7 is right before choice, print story so far, and then go back to questions[7] (q_num=8) to make choice\n story.set(\"\"\"Around you are middle-aged women with \"\"\"+answers[3]+\"\"\" red lipstick, they look like they're itching to speak to the managers of Area 51. Obviously Karens. Further out, \"\"\"+answers[4]+\"\"\" in the \"\"\"+answers[5]+\"\"\" desert sun, Monster Energy drinks litter the ground, lightly dusted in \"\"\"+answers[6]+\"\"\" inches of drywall dust. The Kyles. Definitely. You and \"\"\"+answers[1]+\"\"\" are the chosen leaders of this raid, chosen because of how cute both of you are together ;). Do you chose to send out the Kyles or Karens into battle first? We really need them aliens.\"\"\")\n elif q_num == 8:\n #this is after use chooses between karens and kyles, need to add corresponding questions to end of questions list, which is a global\n if answers[7] == '1':\n #if the answer to the choice answers[7] is 1, they chose Kyles\n #append the new corresponding questions to the questions list (which is global)\n questions.extend(['Enter an adjective that reminds you of power...', 'Enter a verb ending in ing...', 'Enter another verb ending in ing...', 'Enter an adverb (ends in -ly)...', 'Enter a body part (plural)...', 'Enter your favorite color...', 'Enter an adverb (ending in -ly)...', 'Enter a verb, present tense...', 'Enter something that you want...', 'Enter your favorite movie franchise...', 'Enter a body part (singular)...', 'Enter an adjective (something spicy ;))..., ', 'Enter an adverb (ending in -ly)', 'Enter a verb, present tense...', 'Enter another verb, past tense...', 'Enter a kind of pet...', 'Enter the name of a place you want to go...', 'Enter an adjective <3...', 'Enter the name of a song you would dance to...', 'Enter your name...', 'Enter a verb ending in ing...', 'Enter an internal organ...', \"\"\"Do you chose to...\\nPress 1 to stay with your lover\\nPress 2 to get with that alien\"\"\"])\n #after appending, you don't need to continue in do_story() loop because there is no new story to print\n else: \n #if the answer to choice answers[7] is 2 (else), they chose Karens\n #append the new corresponding questions to the questions list (which is global)\n questions.extend(['Enter a verb ending in ing...', 'Enter an adjective...', 'Enter another verb ending in ing (something violent)...', 'Enter a superlative (ending in -est)...', 'Enter a 3-digit code...', 'Something you look for in a lover (adjective)...', 'Enter an adverb (ending in -ly)...', 'Enter a verb, present tense...', 'Enter something that you want...', 'Enter your favorite movie franchise...', 'Enter a body part (singular)...', 'Enter an adjective (something spicy ;))..., ', 'Enter an adverb (ending in -ly', 'Enter a verb, ending in -ing...', 'Enter another verb, past tense...', 'Enter a kind of pet...', 'Enter the name of a place you want to go...', 'Enter an adjective <3...', 'Enter the name of a song you would dance to...', 'Enter your name...', 'Enter a verb ending in ing...', 'Enter an internal organ...', \"\"\"Do you chose to...\\nPress 1 to stay with your lover\\nPress 2 to get with that alien\"\"\"])\n #after appending, you don't need to continue in do_story() loop because there is no new story to print\n #q_num 14 should be the end of the story (so far), so then print the remaining story\n elif q_num == 14:\n #check the answer to the choice so the right part of the story is printed\n #Kyles\n if answers[7] == '1':\n story.set(\"\"\"You chose to send the Kyles first. Their blood pumping with the \"\"\"+answers[8]+\"\"\" power of good 'ol Monster Energy. As you are \"\"\"+answers[9]+\"\"\" through the desert terrain, you feel the water in the air \"\"\"+answers[10]+\"\"\" your face. The Kyles pile up against the gates and begin punching the walls \"\"\"+answers[11]+\"\"\". But this is concrete, not drywall. It won't break. Annoyed with their failure, they begin bashing their \"\"\"+answers[12]+\"\"\" on the walls, and they slowly begin dying. \"\"\"+answers[13].capitalize()+\"\"\" blood covers the ground, the Kyles are down. It's up to the Karens now.\"\"\")\n #Karens\n else:\n story.set(\"\"\"The Karens march across the desert, \"\"\"+answers[8]+\"\"\" at the \"\"\"+answers[9]+\"\"\" guards. \"\"\"+answers[1]+\"\"\" whispers to you, \"I'm glad they're on our side xD\". A high pitch droning starts, each of the Karens \"\"\"+answers[10]+\"\"\" the guards. Using this as a distraction, the Kyles try to get through the door, which asks for a 3 digit pin. First, the \"\"\"+answers[11]+\"\"\" Kyle tried the code '\"\"\"+answers[12]+\"\"\"'. It works! We're in! I can already smell them \"\"\"+answers[13]+\"\"\" aliens.\"\"\")\n elif q_num == 30:\n story.set(\"\"\"The remainder of your army charges through the small door \"\"\"+answers[14]+\"\"\", all anxious to \"\"\"+answers[15]+\"\"\" more guards and see if they can find \"\"\"+answers[16]+\"\"\" in the Area 51 complex. As you rush into the complex, you see all the artifacts around you, including \"\"\"+answers[17]+\"\"\" 7 and a copy of the Krabby Patty secret formula. But what really catches your \"\"\"+answers[18]+\"\"\" is the \"\"\"+answers[19]+\"\"\" body of the alien, lying \"\"\"+answers[20]+\"\"\" in its cell. You don't really know if it's a guy or girl, but you're instantly attracted. You start \"\"\"+answers[21]+\"\"\" towards it, but then you feel the hand of \"\"\"+answers[1]+\"\"\" yank you back. Tears streaming down their face, you know they know what you saw. 'Please don't leave me, all the things we've been through together, the time when you \"\"\"+answers[22]+\"\"\" my \"\"\"+answers[23]+\"\"\", or when you took me to \"\"\"+answers[24]+\"\"\" for the first time and we stood in the \"\"\"+answers[25]+\"\"\" lights and danced to \"\"\"+answers[26]+\"\"\" all night long. Don't forget all of that! Gone. Us, together! \"\"\"+answers[27]+\"\"\", don't leave for a stupid stupid alien, please!'. Your feel your heart \"\"\"+answers[28]+\"\"\" in your \"\"\"+answers[29]+\"\"\" Do you stay with your love, \"\"\"+answers[1]+\"\"\" or do you go over to the alien, who you seem to like so much? :(\"\"\")\n elif q_num == 31:\n if answers[30] == '1':\n story.set(\"\"\"love is still alive. there is hope. for now. but you'll always have this itch in the back of your head, a regret in your head. you didn't choose the alien. you don't know its name. you'll never know what you could have been. or what you could be. you are only who you are now, and you can't change that. they always told you \"the present tense of regret is indecision\". you faced that indecision. you're with your love now, but was that the right choice?\"\"\")\n else: \n story.set(\"\"\"love is dead. there is no hope. but maybe you made the right choice. maybe 'love' is just a chemical reaction that compells us to breed. it hit you hard, but it will slowly fade, leaving you stranded in a failing marriage. there's no way out. at least now you have an alien.\"\"\")\n\n breaks.remove(q_num)\n #unpack all elements to show story\n q_text.pack_forget()\n q_input.config(state='disabled')\n q_input.pack_forget()\n q_num_label.pack_forget()\n submit_button.pack_forget()\n bottom_instructions.pack_forget()\n #pack label to show story\n story_lbl.pack(pady='20')\n #continue button to go back to story\n continue_button.pack(pady='15')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Translate a named color into rrggbb' format. if 'name' is not a string it is returned unchanged. If 'name' is already in 'rrggbb' format then it is returned unchanged. If 'name' is not in global_color_database then getColor(default, None) is called and that result returned.
|
def getColor(name: str, default: str = None) -> str:
if not isinstance(name, str):
return name
if name[0] == '#':
return name
name = name.replace(' ', '').lower().strip()
if name in leo_color_database:
name2 = leo_color_database[name]
return name2
if default:
return getColor(default, default=None)
return None
|
[
"def get_named_color(name: str) -> Optional[LinearColor]:\n blueprint = get_editor_blueprint()\n if blueprint:\n config = blueprint.get_config()\n color_config = config.get(\"colors\", {})\n hex_color = color_config.get(name)\n if hex_color:\n return LinearColor.from_hex(hex_color)",
"def name_to_rgb(self, name):\n color = {\n 'R' : (0,0,255),\n 'L' : (0,165,255),\n 'B' : (255,0,0),\n 'F' : (0,255,0),\n 'U' : (255,255,255),\n 'D' : (0,255,255)\n }\n return color[name]",
"def getColorRGB(name: str, default: str = None) -> tuple[int, int, int]:\n s = getColor(name, default)\n try:\n color = int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16)\n except Exception:\n color = None\n return color",
"def getColor(name):\n api_r_ = c_int()\n api_g_ = c_int()\n api_b_ = c_int()\n api_a_ = c_int()\n ierr = c_int()\n lib.gmshOptionGetColor(\n c_char_p(name.encode()),\n byref(api_r_),\n byref(api_g_),\n byref(api_b_),\n byref(api_a_),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshOptionGetColor returned non-zero error code: \",\n ierr.value)\n return (\n api_r_.value,\n api_g_.value,\n api_b_.value,\n api_a_.value)",
"def colorname(name):\n name = name.lower()\n if name in Faction.csh:\n name = Faction.csh.get(name) # color shorthand (w,b,r,g,u)\n return name",
"def name_to_rgb(name: str, spec: str = CSS3) -> IntegerRGB:\n return hex_to_rgb(name_to_hex(name, spec=spec))",
"def getColorCairo(name: str, default: str = None) -> tuple[float, float, float]:\n color = getColorRGB(name, default)\n if color is None:\n return None\n try:\n r, g, b = color\n return r / 255.0, g / 255.0, b / 255.0\n except Exception:\n return None",
"def get_color_name(self):\n return self._color_name",
"def get_node_color(name):\n colordict = {'default': 'black', 'w_': 'blue', 'v_': 'red', 'q_': 'green'}\n if name[:2] in ['w_', 'q_', 'v_']:\n color = colordict[name[:2]]\n else:\n color = colordict['default']\n return color",
"def get_colorname(self,word,group):\n if group in self.groups:\n cN=self.groups[group].find_word(word)\n if cN:\n return cN\n cND=self.groups['General'].find_word(word)\n if cND:\n return cND",
"def get_colormap(name=\"normal\"):\n name = __process_name(name)\n assert name in list_colorsets(), \"name should exist in \" + str(list_colorsets())\n\n return distinctipy.get_colormap(colors[name], name=\"distinctipy_\" + name)",
"def get_card_color(color_name: str) -> CardColor:\n upper_color: str = color_name.replace('(UnityEngine.Material)', '').replace('card_', '').strip().upper()\n\n for data in CardColor:\n if data.value == upper_color:\n return data\n raise ValueError('Card color not found: %r' % upper_color)",
"def getMaskPlaneColor(name):\n\n if _maskPlaneColors.has_key(name):\n return _maskPlaneColors[name]\n else:\n return None",
"def get_brewer(cname=None, names=False, rgb=False, rgb256=False, reverse=False, grey=False, gray=False):\n if names:\n if names.lower() == 'sequential':\n return sequential_maps\n elif names.lower() == 'diverging':\n return diverging_maps\n elif names.lower() == 'qualitative':\n return qualitative_maps\n elif names.lower() == 'osu':\n return osu_maps\n elif names.lower() == 'ncl_large':\n return ncl_large_maps\n elif names.lower() == 'ncl_small':\n return ncl_small_maps\n elif names.lower() == 'ncl_meteo_swiss':\n return ncl_meteo_swiss_maps\n elif names.lower() == 'mma':\n return mma_maps\n else:\n cmaps = all_maps\n return cmaps\n else:\n cname = capitalise(cname)\n if rgb256:\n d = {}\n if cname in ncl_large_maps + ncl_small_maps + ncl_meteo_swiss_maps + mma_maps:\n exec('cpool = [ tuple([k*255. for k in j]) for j in '+cname+' ]', globals(), d)\n else:\n exec('cpool = '+cname, globals(), d)\n cpool = d['cpool']\n if reverse:\n cpool = cpool[::-1]\n if grey | gray:\n for j in range(len(cpool)):\n isgray = 0.2125 * cpool[j][0] + 0.7154 * cpool[j][1] + 0.072* cpool[j][2]\n cpool[j] = (isgray,isgray,isgray)\n return cpool\n # get colour tuple in 0-1\n elif rgb:\n d = {}\n if cname in ncl_large_maps + ncl_small_maps + ncl_meteo_swiss_maps + mma_maps:\n exec('cpool = '+cname, globals(), d)\n else:\n exec('cpool = [ tuple([k/255. for k in j]) for j in '+cname+' ]', globals(), d)\n cpool = d['cpool']\n if reverse:\n cpool = cpool[::-1]\n if grey | gray:\n for j in range(len(cpool)):\n isgray = 0.2125 * cpool[j][0] + 0.7154 * cpool[j][1] + 0.072* cpool[j][2]\n cpool[j] = (isgray,isgray,isgray)\n return cpool\n # register colour map with matplotlib\n else:\n import matplotlib.cm as cm\n register_brewer(cname,reverse=reverse, grey=grey, gray=gray)\n return cm.get_cmap(cname)",
"def parse_name_color(n):\n #probably is already the name\n return n",
"def get_detected_color_name(self):\n # ---------------------------------------------------------------------\n # TODO: With your instructor, implement this method.\n # ---------------------------------------------------------------------\n return self.color_names[self.get_reading()]",
"def from_name(cls, name):\n self = cls.__new__(cls) # TODO\n self.original_literal = name\n\n r, g, b, a = COLOR_NAMES[name]\n\n self.value = r, g, b, a\n return self",
"def add_color(self, html_color, name):\n self.add_package(\"color\")\n #hex->dec\n if len(html_color) == 4: #triple color code\n color = (int(html_color[1], 16), int(html_color[2], 16), int(html_color[3], 16))\n else:\n color = (int(html_color[1:3], 16), int(html_color[3:5], 16), int(html_color[5:7], 16))\n #get name\n if name:\n if name in self._defined_colors and self._defined_colors[name] == color:\n return name #we have already defined this color\n if name in self._defined_colors and not self._defined_colors[name] == color:\n #we have same name but different color codes, so we create new name by adding number to it\n i = 1\n while name + str(i) in self._defined_colors:\n i += 1\n self._defined_colors[name + str(i)] = color\n self._other.append(\"\\\\definecolor{\" + name + str(i) + \"}{RGB}{\" + \",\".join((str(x) for x in color)) + \"}\")\n return name + str(i)\n #we have unique name so we just add it\n self._defined_colors[name] = color\n self._other.append(\"\\\\definecolor{\" + name + \"}{RGB}{\" + \",\".join((str(x) for x in color)) + \"}\")\n return name\n else:\n sys.stderr(\"Invalid name for color\")",
"def get_color(self):\n color = askcolor(color=(self.g, self.r, self.b))\n grb = color[0]\n if grb != None:\n self.g = grb[0]\n self.r = grb[1]\n self.b = grb[2]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a named color into an (r, g, b) tuple.
|
def getColorRGB(name: str, default: str = None) -> tuple[int, int, int]:
s = getColor(name, default)
try:
color = int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16)
except Exception:
color = None
return color
|
[
"def colorTuple(c):\n return c.getRgb()",
"def name_to_rgb(self, name):\n color = {\n 'R' : (0,0,255),\n 'L' : (0,165,255),\n 'B' : (255,0,0),\n 'F' : (0,255,0),\n 'U' : (255,255,255),\n 'D' : (0,255,255)\n }\n return color[name]",
"def getColorCairo(name: str, default: str = None) -> tuple[float, float, float]:\n color = getColorRGB(name, default)\n if color is None:\n return None\n try:\n r, g, b = color\n return r / 255.0, g / 255.0, b / 255.0\n except Exception:\n return None",
"def color_to_triple(color: Optional[str] = None) -> Tuple[int, int, int]:\n if color is None:\n r = np.random.randint(0, 0x100)\n g = np.random.randint(0, 0x100)\n b = np.random.randint(0, 0x100)\n return (r, g, b)\n else:\n return ImageColor.getrgb(color)",
"def color_string_to_tuple(s):\n if isinstance(s, tuple): # CIETmap requirement\n return s\n if s.startswith('('): # assume this is '(r,g,b,a)' (or '(r g b a)')\n s = s[1:-1]\n sep = None\n if ',' in s: # in case this is a space separated variant\n sep = ','\n rgba = s.split(sep)\n return tuple([float(c) for c in rgba])",
"def getColor(name):\n api_r_ = c_int()\n api_g_ = c_int()\n api_b_ = c_int()\n api_a_ = c_int()\n ierr = c_int()\n lib.gmshOptionGetColor(\n c_char_p(name.encode()),\n byref(api_r_),\n byref(api_g_),\n byref(api_b_),\n byref(api_a_),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshOptionGetColor returned non-zero error code: \",\n ierr.value)\n return (\n api_r_.value,\n api_g_.value,\n api_b_.value,\n api_a_.value)",
"def to_rgb(value: TypeColor) -> Tuple[int, int, int]:\n if isinstance(value, tuple):\n return value\n\n if isinstance(value, int):\n value = (value, value, value)\n\n elif isinstance(value, str):\n value = COLORS[value]\n\n return value",
"def name_to_rgb(name: str, spec: str = CSS3) -> IntegerRGB:\n return hex_to_rgb(name_to_hex(name, spec=spec))",
"def parse_color(val, dflt=None):\n if val in named_colors:\n return named_colors[val]\n\n vals = val.split(':')\n if len(vals) == 3:\n return tuple(float(v) / 255 for v in vals)\n\n return dflt",
"def getColor(rgb=None, hsv=None):\n # recursion, return a list if input is list of colors:\n if _isSequence(rgb) and (len(rgb) > 3 or _isSequence(rgb[0])):\n seqcol = []\n for sc in rgb:\n seqcol.append(getColor(sc))\n return seqcol\n\n # because they are most common:\n if rgb=='r':\n return (0.9960784313725, 0.11764705882352, 0.121568627450980)\n elif rgb=='g':\n return (0.0156862745098, 0.49803921568627, 0.062745098039215)\n elif rgb=='b':\n return (0.0588235294117, 0.0, 0.984313725490196)\n\n if str(rgb).isdigit():\n rgb = int(rgb)\n\n if hsv:\n c = hsv2rgb(hsv)\n else:\n c = rgb\n\n if _isSequence(c):\n if c[0] <= 1 and c[1] <= 1 and c[2] <= 1:\n return c # already rgb\n else:\n if len(c) == 3:\n return list(np.array(c) / 255.0) # RGB\n else:\n return (c[0] / 255.0, c[1] / 255.0, c[2] / 255.0, c[3]) # RGBA\n\n elif isinstance(c, str): # is string\n c = c.replace(\"grey\", \"gray\").replace(\" \", \"\")\n if 0 < len(c) < 3: # single/double letter color\n if c.lower() in color_nicks.keys():\n c = color_nicks[c.lower()]\n else:\n vedo.logger.warning(f\"Unknown color nickname {c}\\nAvailable abbreviations: {color_nicks}\")\n return (0.5, 0.5, 0.5)\n\n if c.lower() in colors.keys(): # matplotlib name color\n c = colors[c.lower()]\n # from now format is hex!\n\n if c.startswith(\"#\"): # hex to rgb\n h = c.lstrip(\"#\")\n rgb255 = list(int(h[i : i + 2], 16) for i in (0, 2, 4))\n rgbh = np.array(rgb255) / 255.0\n if np.sum(rgbh) > 3:\n vedo.logger.error(f\"in getColor(): Wrong hex color {c}\")\n return (0.5, 0.5, 0.5)\n return tuple(rgbh)\n\n else: # vtk name color\n namedColors = vtk.vtkNamedColors()\n rgba = [0, 0, 0, 0]\n namedColors.GetColor(c, rgba)\n return (rgba[0]/255.0, rgba[1]/255.0, rgba[2]/255.0)\n\n elif isinstance(c, int): # color number\n if c >= 0:\n return colors1[c % 10]\n else:\n return colors2[-c % 10]\n\n elif isinstance(c, float):\n if c >= 0:\n return colors1[int(c) % 10]\n else:\n return colors2[int(-c) % 10]\n\n # print(\"Unknown color:\", c)\n return (0.5, 0.5, 0.5)",
"def color_to_tuple(value):\n if isinstance(value, tuple):\n return value\n if isinstance(value, int):\n if value >> 24:\n raise ValueError(\"Only bits 0->23 valid for integer input\")\n r = value >> 16\n g = (value >> 8) & 0xFF\n b = value & 0xFF\n return [r, g, b]\n\n raise ValueError(\"Color must be a tuple or 24-bit integer value.\")",
"def matplotlib_rgb_color(rgb_color):\n return tuple([i/255. for i in rgb_color])",
"def get_rgb_from_value(v: float) -> Tuple[int, int, int]:\n # colorsys returns rgb values between 0 and 1\n r, g, b = colorsys.hls_to_rgb(v, 0.5, 1)\n\n # multiply by 255 to get values between 0 and 255\n red = round(r * 255)\n green = round(g * 255)\n blue = round(b * 255)\n return red, green, blue",
"def get_named_color(name: str) -> Optional[LinearColor]:\n blueprint = get_editor_blueprint()\n if blueprint:\n config = blueprint.get_config()\n color_config = config.get(\"colors\", {})\n hex_color = color_config.get(name)\n if hex_color:\n return LinearColor.from_hex(hex_color)",
"def unpack255(color):\n r, g, b, a = unpack(color)\n r = r << 3 | r >> 2\n g = g << 3 | g >> 2\n b = b << 3 | b >> 2\n return (r, g, b, 255 if a else 0)",
"def get_color(self):\n color = askcolor(color=(self.g, self.r, self.b))\n grb = color[0]\n if grb != None:\n self.g = grb[0]\n self.r = grb[1]\n self.b = grb[2]",
"def get_light_color(self, light_name: str):\n self.__send_command(CommandsBytes.GET_LIGHT_COLOR)\n # Send the name\n self.__send_string(light_name)\n result = self.__receive_string()\n if result == \"ok\":\n # Receive the color\n r = self.__receive_int()\n g = self.__receive_int()\n b = self.__receive_int()\n return r, g, b\n print(\"Error getting light color\")\n return None",
"def parse_color_string(color_string):\n\n if len(color_string) == 3:\n r = int(color_string[0], 16) * 17\n g = int(color_string[1], 16) * 17\n b = int(color_string[2], 16) * 17\n elif len(color_string) == 6:\n r = int(color_string[0:2], 16)\n g = int(color_string[2:4], 16)\n b = int(color_string[4:6], 16)\n else:\n ValueError('Color string must be either 3 or 6 hexadecimal digits long')\n\n return r, g, b",
"def parse_name_color(n):\n #probably is already the name\n return n"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a named color into a cairo color tuple.
|
def getColorCairo(name: str, default: str = None) -> tuple[float, float, float]:
color = getColorRGB(name, default)
if color is None:
return None
try:
r, g, b = color
return r / 255.0, g / 255.0, b / 255.0
except Exception:
return None
|
[
"def colorTuple(c):\n return c.getRgb()",
"def color_to_triple(color: Optional[str] = None) -> Tuple[int, int, int]:\n if color is None:\n r = np.random.randint(0, 0x100)\n g = np.random.randint(0, 0x100)\n b = np.random.randint(0, 0x100)\n return (r, g, b)\n else:\n return ImageColor.getrgb(color)",
"def getColorRGB(name: str, default: str = None) -> tuple[int, int, int]:\n s = getColor(name, default)\n try:\n color = int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16)\n except Exception:\n color = None\n return color",
"def parse_color(val, dflt=None):\n if val in named_colors:\n return named_colors[val]\n\n vals = val.split(':')\n if len(vals) == 3:\n return tuple(float(v) / 255 for v in vals)\n\n return dflt",
"def color_rgb_to_cairo(color): \n return (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)",
"def color_tuple_to_gdk(color):\n r,g,b,a = color\n return gtk.gdk.Color(int(r*MAX_COLOR), int(g*MAX_COLOR), int(b*MAX_COLOR))",
"def color_string_to_tuple(s):\n if isinstance(s, tuple): # CIETmap requirement\n return s\n if s.startswith('('): # assume this is '(r,g,b,a)' (or '(r g b a)')\n s = s[1:-1]\n sep = None\n if ',' in s: # in case this is a space separated variant\n sep = ','\n rgba = s.split(sep)\n return tuple([float(c) for c in rgba])",
"def to_rgb(value: TypeColor) -> Tuple[int, int, int]:\n if isinstance(value, tuple):\n return value\n\n if isinstance(value, int):\n value = (value, value, value)\n\n elif isinstance(value, str):\n value = COLORS[value]\n\n return value",
"def mkColor(*args):\n err = 'Not sure how to make a color from \"%s\"' % str(args)\n if len(args) == 1:\n if isinstance(args[0], str):\n c = args[0]\n if len(c) == 1:\n try:\n return Colors[c]\n except KeyError:\n raise ValueError('No color named \"%s\"' % c)\n have_alpha = len(c) in [5, 9] and c[0] == '#' # \"#RGBA\" and \"#RRGGBBAA\"\n if not have_alpha:\n # try parsing SVG named colors, including \"#RGB\" and \"#RRGGBB\".\n # note that QColor.setNamedColor() treats a 9-char hex string as \"#AARRGGBB\".\n qcol = QtGui.QColor()\n qcol.setNamedColor(c)\n if qcol.isValid():\n return qcol\n # on failure, fallback to pyqtgraph parsing\n # this includes the deprecated case of non-#-prefixed hex strings\n if c[0] == '#':\n c = c[1:]\n else:\n raise ValueError(f\"Unable to convert {c} to QColor\")\n if len(c) == 3:\n r = int(c[0]*2, 16)\n g = int(c[1]*2, 16)\n b = int(c[2]*2, 16)\n a = 255\n elif len(c) == 4:\n r = int(c[0]*2, 16)\n g = int(c[1]*2, 16)\n b = int(c[2]*2, 16)\n a = int(c[3]*2, 16)\n elif len(c) == 6:\n r = int(c[0:2], 16)\n g = int(c[2:4], 16)\n b = int(c[4:6], 16)\n a = 255\n elif len(c) == 8:\n r = int(c[0:2], 16)\n g = int(c[2:4], 16)\n b = int(c[4:6], 16)\n a = int(c[6:8], 16)\n else:\n raise ValueError(f\"Unknown how to convert string {c} to color\")\n elif isinstance(args[0], QtGui.QColor):\n return QtGui.QColor(args[0])\n elif np.issubdtype(type(args[0]), np.floating):\n r = g = b = int(args[0] * 255)\n a = 255\n elif hasattr(args[0], '__len__'):\n if len(args[0]) == 3:\n r, g, b = args[0]\n a = 255\n elif len(args[0]) == 4:\n r, g, b, a = args[0]\n elif len(args[0]) == 2:\n return intColor(*args[0])\n else:\n raise TypeError(err)\n elif np.issubdtype(type(args[0]), np.integer):\n return intColor(args[0])\n else:\n raise TypeError(err)\n elif len(args) == 3:\n r, g, b = args\n a = 255\n elif len(args) == 4:\n r, g, b, a = args\n else:\n raise TypeError(err)\n args = [int(a) if np.isfinite(a) else 0 for a in (r, g, b, a)]\n return QtGui.QColor(*args)",
"def name_to_rgb(self, name):\n color = {\n 'R' : (0,0,255),\n 'L' : (0,165,255),\n 'B' : (255,0,0),\n 'F' : (0,255,0),\n 'U' : (255,255,255),\n 'D' : (0,255,255)\n }\n return color[name]",
"def getColor(name):\n api_r_ = c_int()\n api_g_ = c_int()\n api_b_ = c_int()\n api_a_ = c_int()\n ierr = c_int()\n lib.gmshOptionGetColor(\n c_char_p(name.encode()),\n byref(api_r_),\n byref(api_g_),\n byref(api_b_),\n byref(api_a_),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshOptionGetColor returned non-zero error code: \",\n ierr.value)\n return (\n api_r_.value,\n api_g_.value,\n api_b_.value,\n api_a_.value)",
"def color_hex_to_cairo(color): \n if color[0] == '#': \n color = color[1:] \n (r, g, b) = (int(color[:2], 16), \n int(color[2:4], 16), \n int(color[4:], 16)) \n return color_rgb_to_cairo((r, g, b))",
"def color_to_tuple(value):\n if isinstance(value, tuple):\n return value\n if isinstance(value, int):\n if value >> 24:\n raise ValueError(\"Only bits 0->23 valid for integer input\")\n r = value >> 16\n g = (value >> 8) & 0xFF\n b = value & 0xFF\n return [r, g, b]\n\n raise ValueError(\"Color must be a tuple or 24-bit integer value.\")",
"def getColor(rgb=None, hsv=None):\n # recursion, return a list if input is list of colors:\n if _isSequence(rgb) and (len(rgb) > 3 or _isSequence(rgb[0])):\n seqcol = []\n for sc in rgb:\n seqcol.append(getColor(sc))\n return seqcol\n\n # because they are most common:\n if rgb=='r':\n return (0.9960784313725, 0.11764705882352, 0.121568627450980)\n elif rgb=='g':\n return (0.0156862745098, 0.49803921568627, 0.062745098039215)\n elif rgb=='b':\n return (0.0588235294117, 0.0, 0.984313725490196)\n\n if str(rgb).isdigit():\n rgb = int(rgb)\n\n if hsv:\n c = hsv2rgb(hsv)\n else:\n c = rgb\n\n if _isSequence(c):\n if c[0] <= 1 and c[1] <= 1 and c[2] <= 1:\n return c # already rgb\n else:\n if len(c) == 3:\n return list(np.array(c) / 255.0) # RGB\n else:\n return (c[0] / 255.0, c[1] / 255.0, c[2] / 255.0, c[3]) # RGBA\n\n elif isinstance(c, str): # is string\n c = c.replace(\"grey\", \"gray\").replace(\" \", \"\")\n if 0 < len(c) < 3: # single/double letter color\n if c.lower() in color_nicks.keys():\n c = color_nicks[c.lower()]\n else:\n vedo.logger.warning(f\"Unknown color nickname {c}\\nAvailable abbreviations: {color_nicks}\")\n return (0.5, 0.5, 0.5)\n\n if c.lower() in colors.keys(): # matplotlib name color\n c = colors[c.lower()]\n # from now format is hex!\n\n if c.startswith(\"#\"): # hex to rgb\n h = c.lstrip(\"#\")\n rgb255 = list(int(h[i : i + 2], 16) for i in (0, 2, 4))\n rgbh = np.array(rgb255) / 255.0\n if np.sum(rgbh) > 3:\n vedo.logger.error(f\"in getColor(): Wrong hex color {c}\")\n return (0.5, 0.5, 0.5)\n return tuple(rgbh)\n\n else: # vtk name color\n namedColors = vtk.vtkNamedColors()\n rgba = [0, 0, 0, 0]\n namedColors.GetColor(c, rgba)\n return (rgba[0]/255.0, rgba[1]/255.0, rgba[2]/255.0)\n\n elif isinstance(c, int): # color number\n if c >= 0:\n return colors1[c % 10]\n else:\n return colors2[-c % 10]\n\n elif isinstance(c, float):\n if c >= 0:\n return colors1[int(c) % 10]\n else:\n return colors2[int(-c) % 10]\n\n # print(\"Unknown color:\", c)\n return (0.5, 0.5, 0.5)",
"def get_named_color(name: str) -> Optional[LinearColor]:\n blueprint = get_editor_blueprint()\n if blueprint:\n config = blueprint.get_config()\n color_config = config.get(\"colors\", {})\n hex_color = color_config.get(name)\n if hex_color:\n return LinearColor.from_hex(hex_color)",
"def matplotlib_rgb_color(rgb_color):\n return tuple([i/255. for i in rgb_color])",
"def _color_to_tripple( color ):\n from paramio import pget, plist\n\n if color in plist(\"colors.par\"):\n rgb = pget('colors.par', color ) \n rgb = rgb.split()\n rgb = [float(x) for x in rgb]\n return rgb\n\n if len(color.split()) == 3:\n rgb = color.split()\n rgb = [float(x) for x in rgb]\n if max(rgb) > 1.0:\n rgb = [x/255.0 for x in rgb]\n if max(rgb) > 1.0 or min(rgb) < 0.0:\n raise ValueError(\"Color values must be between 0 and 255\")\n return rgb\n\n if len(color) == 8 and color.lower().startswith('0x') is True:\n def __hex_to_int(cc):\n return int( '0x'+cc, base=16 )\n \n rr = __hex_to_int( color[2:4])\n gg = __hex_to_int( color[4:6])\n bb = __hex_to_int( color[6:])\n rgb = [ rr/255.0, gg/255.0, bb/255.0] \n return rgb\n \n\n raise ValueError(\"Unable to parse color value and cannot locate color='{}' in colors.par\".format(color))",
"def getColor(name: str, default: str = None) -> str:\n if not isinstance(name, str):\n return name\n if name[0] == '#':\n return name\n name = name.replace(' ', '').lower().strip()\n if name in leo_color_database:\n name2 = leo_color_database[name]\n return name2\n if default:\n return getColor(default, default=None)\n return None",
"def black_to_color(color, **kwargs):\n\n return lut_colors( ['black', color], **kwargs )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return lat lon coordinates for an address
|
def get_lat_lng(address):
g = geocoder.google(address)
return g.latlng
|
[
"def address_to_coords(self, address):\n params = urlencode({\"sensor\": \"false\",\n \"address\": address})\n url = \"http://maps.googleapis.com/maps/api/geocode/json?\" + params\n results = json.loads(self.send(url))\n if results['status'] != 'OK':\n return None\n if len(results['results']) > 1:\n print \"Warning: search for %s returned more then one results, using the first one\" % address\n result = results['results'][0]\n location = result['geometry']['location']\n return \"%.7f\" % location['lat'], \"%.7f\" % location['lng']",
"def get_gps_from_address(adress):\n\n google_api_url = \"http://maps.google.com/maps/api/geocode/json?address=%s&sensor=false\" \\\n % adress.encode('utf8')\n\n data_google = json.loads(requests.get(google_api_url).content)\n if data_google.get('results'):\n lat = float(data_google['results'][0]['geometry']['location']['lat'])\n lng = float(data_google['results'][0]['geometry']['location']['lng'])\n else:\n lat = 48\n lng = 2\n return lat, lng",
"def address_to_latlng(address):\n location_geo = geocode(address)\n location = {}\n location['lat'] = location_geo['lon']\n location['lon'] = location_geo['lat']\n print location\n return tuple(location.values())",
"def get_lat_lng_from_area_name(address):\n \n # Complete the address by adding \"Chicago, United States\"\n complete_address = address + \" \" + cst.CHICAGO_ADDRESS\n \n # get url to request\n url = get_url_from_address(complete_address)\n \n # get response from url\n r = requests.get(url)\n \n # parse response text to find string containing latitude and longitude\n lat_and_lng = r.text.partition(cst.LAT_LNG_HTML_POSITION_START)[2].partition(cst.LAT_LNG_HTML_POSITION_STOP)[0]\n \n # retrieve lat and lng\n lng, lat = lat_and_lng.split(',')[1:]\n \n return lat, lng",
"def get_coordinates(address, timeout=5):\n geolocator = Nominatim(user_agent=\"my-application\")\n location = geolocator.geocode(address, timeout=timeout)\n if not location:\n return None, None\n str_lat = str(location.latitude)\n str_long = str(location.longitude) \n str_loc = str_lat + \" \" + str_long\n print(str_loc, type(str_loc))\n return str_loc",
"def get_latlng(address):\n print(\"querying mapquest for\", address)\n\n try:\n result = geocoder.mapquest(\n address, key=os.environ.get(\"MAPQUEST_API_KEY\")\n ).json\n except Exception as e:\n print(e)\n # Exit if a match can't be found\n sys.exit(1)\n \n result = (result[\"lat\"], result[\"lng\"])\n print(\"found\", result)\n return result",
"def get_position(address):\n return GoogleGeocoder().geocode(address)[1]",
"def get_address(lat, lng):\n def _norm_len(s):\n return len(unicodedata.normalize(\"NFC\", s).encode('utf-8'))\n def _cut(s, max_len=72):\n if _norm_len(s) < max_len: return s\n while _norm_len(s) >= max_len:\n ss = s.split(\",\")\n s = ', '.join([x.strip() for x in ss[1:]])\n return s\n\n # otherwise we get unicode mixed with latin which often exceeds\n # the 140character limit of twitter :(\n headers = {'Accept-Language': \"en-US,en;q=0.8\"}\n nominatim_url = (\"http://nominatim.openstreetmap.org/reverse?lat=%f&lon=%f&\"\n \"addressdetails=0&format=json&zoom=6&extratags=0\")\n info = json.loads(requests.get(nominatim_url % (lat, lng),\n headers=headers).text)\n if 'error' in info:\n return 'Unknown location, do you know it? Tell @openstreetmap'\n\n return _cut(info['display_name'])",
"def geocode_location(address):\n try:\n result = Geocoder.geocode(address)\n lat, lng = result[0].coordinates\n if result.city != \"San Francisco\": # Database only returns foodtrucks in San Francisco\n return None\n return lat, lng\n except:\n return None",
"def get_revaddress(address):\n headers = {'Accept-Language': \"en-US,en;q=0.8\"}\n nom_str = \"http://nominatim.openstreetmap.org/search?q={address}&format=json&polygon=1&addressdetails=1&limit=1\"\n return [(float(cobj['lat']), float(cobj['lon'])) for cobj in json.loads(requests.get(nom_str.format(address = \n address),\n headers = headers\n ).text)]",
"def get_address_location(self, address: str) -> Location:\n result: dict = self._client.geocode(address)\n x, y = result[0]['geometry']['location'].values()\n return Location(x, y)",
"def reverse_address(address, original_lat = None, original_long = None):\n locator = Nominatim(user_agent=\"openmapquest\")\n try:\n location = locator.geocode(address)\n print(location)\n if location:\n return location.latitude, location.longitude\n else:\n if original_lat and original_long:\n print(\"Returning original lat and long\")\n return original_lat,original_long\n else:\n return 0,0\n except geopy.exc.GeocoderUnavailable as e:\n if original_lat and original_long:\n print(\"Returning original lat and long\")\n\n return original_lat,original_long\n else:\n return 0,0",
"def find_coords_by_bad_address(address):\n data = pd.read_excel(\"sample.xlsx\")\n rows = data.shape[0]\n for row in range(rows):\n if address in data.at[row, \"Address\"]:\n return [data.at[row, \"Latitude\"], data.at[row, \"Longitude\"]]\n return None",
"def _queryOSM(address):\n url = _LONG_LAT_URL_Nominatim + quote(address) + '&format=json&polygon=0'\n response = _cached_json_get(url)\n if not response:\n raise ValueError(f\"Address not found: '{address}'\")\n return (float(response[0].get(key)) for key in ('lat', 'lon'))",
"def geocode(self, recode=False):\n if not self.lat or not self.long or recode:\n # get the geocoordinates for the adress\n # TODO log geocodings into the db\n g = geocoders.Google(settings.GOOGLE_API_KEY)\n adr = '%s, %s %s, %s' % (self.street, self.zipcode, self.city, self.country)\n (self.lat, self.long) = g.geocode(adr)[1]\n self.save()\n return (self.lat, self.long)",
"def get_coords_from_postcode(postcode):\n # Insert space if one doesn't exist\n if postcode[-4] != ' ':\n postcode = list(postcode)\n postcode.insert(-3, ' ')\n postcode = ''.join(postcode)\n location = geolocator.geocode(\"{}, UK\".format(postcode))\n\n return location.longitude, location.latitude",
"def get_geo_location(address, max_result):\n if Geocoder.isPresent():\n print(\"GeoCoder is present...\")\n geo = Geocoder(PythonActivity.mActivity, Locale.getDefault())\n print(\"Looked up addresses\")\n java_list = geo.getFromLocationName(address, max_result)\n if java_list:\n print(\"List found...\")\n addresses = []\n for addr in java_list.toArray():\n addresses.append(_GeoAddress(\n city=str(addr.getLocality()),\n county=str(addr.getSubAdminArea()),\n country=str(addr.getAdminArea()),\n postcode=str(addr.getPostalCode()),\n second_address=str(addr.getThoroughfare()),\n house_number=str(addr.getSubThoroughfare()),\n latitude=addr.getLatitude(),\n longitude=addr.getLongitude()\n ))\n return addresses\n else:\n print(\"No list found...\")\n else:\n print(\"No GeCoder present\")\n return []",
"def get_coords(query):\n response = geocoder.forward(query)\n if response.status_code and len(response.geojson()['features']) >= 1:\n first = response.geojson()['features'][0]\n return first['geometry']['coordinates']\n else:\n return handle_failure(query)",
"def get_address(self, address: str) -> Address:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add several nodes (eg. a fusion) to the graph.
|
def add_nodes(self, fusions: List[hmn_fusion.Fusion]) -> None:
for fusion in fusions:
self.add_node(fusion)
|
[
"def add_nodes(self, *nodes):\n if isinstance(nodes, tuple):\n for node in nodes:\n self.nodes.add(node)\n else:\n self.nodes.add(nodes)",
"def add_edges(self, *nodes):\n for node in nodes:\n self.adjacent.add(node)\n node.adjacent.add(self)",
"def add_friend(self,friends):\n\n\t\tfor friend in friends:\n\t\t\tself.nodes.add(friend)",
"def do_add_nodes(self, args):\n lb = self.findlb(args.loadbalancer)\n nodes = split_nodes(args.nodes)\n lb.add_nodes(nodes)",
"def add_nodes(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"\\n%%%%%%%%%% ADDING NODES %%%%%%%%%%%%%\\n\\n\")\n\t\t\ti = 0\n\t\t\tfor v in self.G.nodes:\n\t\t\t\tf.write('\\t\\\\Vertex[x={}, y={}]{{{}}}\\n'.format(round(self.factor*v.x, 3), round(self.factor*v.y, 3), i))\n\t\t\t\t\n\t\t\t\tself.vtoid[v] = i\t\t\t\t\n\t\t\t\t\n\t\t\t\ti += 1",
"def add(self, node):\n self.nodes.append(node)\n self.count += 1",
"def add_nodes(self, events, timexes):\n for timex in timexes:\n node = Node(timex=timex)\n self.nodes[node.id] = node\n for event in events:\n node = Node(event=event)\n self.nodes[node.id] = node\n for n1 in self.nodes.keys():\n self.edges[n1] = {}\n for n2 in self.nodes.keys():\n self.edges[n1][n2] = Edge(n1, n2, self)",
"def add_nodes(self, nb_sample=40, verbose=True):\n samples = self.sample(nb_sample)\n for state in samples:\n self.graph.add_node(state)",
"def createNodes(self, node_set: set):\n for n in node_set:\n self.addNode(n)",
"def add_node(self, node: BONNode):\n self.nodes.append(node)",
"def _connect_nodes(self, node_a, node_b):\n self._graph[node_a].append(node_b)\n self._graph[node_b].append(node_a)",
"def add_node(self, data):\n\n self.nodes.add(data)",
"def add(self, nodes):\n if len(self._subgroups) > 1:\n raise AddingNodesNotAllowedError('In the merge phase adding nodes is not longer possible.')\n\n if isinstance(nodes, type(self)):\n # only the nodes of the second merge group are collected. That is NOT a merging operation!\n self.add(list(nodes))\n else:\n if not isinstance(nodes, (set, list)):\n nodes = [nodes]\n if len(self._subgroups) == 0:\n self._subgroups.append(set())\n self._index_of_primary_subgroup = 0\n self._subgroups[0].update(nodes)",
"def register_nodes():\n values = request.get_json()\n\n nodes = values.get('nodes')\n new_node = values.get('new-node')\n if new_node != node.uri:\n node.network.add(new_node)\n node.blockchain.nodes.add(new_node)\n\n for n in node.network:\n if n not in nodes:\n node.register_node(n, values)\n\n response = {\n 'message': 'Node Added',\n 'nodes': list(node.network),\n }\n return jsonify(response), 201",
"def test_add_node(nodes_to_add):\n from graph import Graph\n graph = Graph()\n\n for node in nodes_to_add:\n graph.add_node(node)\n\n assert graph.nodes() == nodes_to_add",
"def addNodes(dim, tag, nodeTags, coord, parametricCoord=[]):\n api_nodeTags_, api_nodeTags_n_ = _ivectorsize(nodeTags)\n api_coord_, api_coord_n_ = _ivectordouble(coord)\n api_parametricCoord_, api_parametricCoord_n_ = _ivectordouble(parametricCoord)\n ierr = c_int()\n lib.gmshModelMeshAddNodes(\n c_int(dim),\n c_int(tag),\n api_nodeTags_, api_nodeTags_n_,\n api_coord_, api_coord_n_,\n api_parametricCoord_, api_parametricCoord_n_,\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshAddNodes returned non-zero error code: \",\n ierr.value)",
"def add_edge(self, node1, node2):\n node1.add_edges(node2)",
"def addNode(graph={}, node=''):\r\n graph1 = graph\r\n graph2 = {node :[ ]}\r\n return unionGraphs(graph1, graph2)",
"def add_nodes(self, loadbalancer, nodes):\r\n return loadbalancer.add_nodes(nodes)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create consensus nodes from nodes already here in the graph. Return None
|
def consensus_single(self) -> None:
softwares = set(
[self.graph.nodes[x]["fusion"].software for x in self.graph.nodes]
)
for software in sorted(list(softwares)):
g, cons, alone = self._grapp_subgraph(software)
nodes = g.nodes
nodes_added = []
for fl, fr in itertools.combinations(nodes, 2):
if self.graph.nodes[fl]["fusion"].is_near(
self.graph.nodes[fr]["fusion"],
self.graph.graph["consensus_interval"],
):
if self.graph.degree[fl] > 0 or self.graph.degree[fr] > 0:
neighbors = list(nx.neighbors(self.graph, fl))
neighbors += list(nx.neighbors(self.graph, fr))
for nc in neighbors:
self.graph.add_edge(nc, fl)
self.graph.add_edge(nc, fr)
# Update.
if (
self.graph.nodes[fl]["fusion"]
> self.graph.nodes[nc]["fusion"]
):
self.graph.nodes[nc]["fusion"].update(
self.graph.nodes[fl]["fusion"]
)
if (
self.graph.nodes[fr]["fusion"]
> self.graph.nodes[nc]["fusion"]
):
self.graph.nodes[nc]["fusion"].update(
self.graph.nodes[fr]["fusion"]
)
else:
nodes_added.append(self._add_node_consensus(fl, fr))
# Refined for circular nodes.
while len(nodes_added) > 1:
combs = list(itertools.combinations(nodes_added, 2))
loop = -1
for ix, (fl, fr) in enumerate(combs):
if self.graph.nodes[fl]["fusion"].is_near(
self.graph.nodes[fr]["fusion"],
self.graph.graph["consensus_interval"],
):
node_kept = -1
node_rm = -1
neighbors = []
# Select node to keep.
if (
self.graph.nodes[fl]["fusion"]
> self.graph.nodes[fr]["fusion"]
):
node_kept = fl
node_rm = fr
else:
node_kept = fr
node_rm = fl
neighbors = list(
set(nx.neighbors(self.graph, node_rm)).difference(
set(nx.neighbors(self.graph, node_kept))
)
)
for nc in neighbors:
self.graph.add_edge(nc, node_kept)
nodes_added.remove(node_rm)
self.graph.remove_node(node_rm)
break
loop = ix
# If no update was done
if loop + 1 == len(combs):
break
|
[
"def create_cluster_branch(self, orphan, line_num): # create cluster\n\n try:\n cluster_id = orphan.flat_value\n except:\n cluster_id = tuple([x.flat_value for x in orphan])\n\n # create a node instance and add it to the graph\n orphan_node = Node(cluster_id, cluster_id, cluster_id, line_num, orphan)\n Graph.cluster_dict[cluster_id][cluster_id] = [orphan_node]",
"def _init_nodes(self, op_cls):\n\t\tself.node_ops = nn.ModuleList()\n\t\tif self.reduction_prev:\n\t\t\tself.node0 = FactorizedReduce(self.C_pp, self.C, affine=False)\n\t\telse:\n\t\t\tself.node0 = ReLUConvBN(self.C_pp, self.C, 1, 1, 0, affine=False)\n\t\tself.node1 = ReLUConvBN(self.C_p, self.C, 1, 1, 0, affine=False)\n\n\t\tfor i in range(self.num_nodes):\n\t\t\t# Creating edges connect node `i` to other nodes `j`. `j < i` \n\t\t\tfor j in range(2+i):\n\t\t\t\tstride = 2 if self.reduction and j < 2 else 1\n\t\t\t\top = op_cls(self.C, stride)\n\t\t\t\tself.node_ops.append(op)",
"def __init__(self, city):\n self.clusters = []\n # We get all city street network nodes:\n allNodes = city.getStreetNodes()\n # When a node is added to a cluster, we eliminate it from the allNodes list.\n # So we go adding nodes to a cluster while allNodes list is not empty.\n while len(allNodes)!=0:\n # Initialize a new cluster:\n openSet = []\n closeSet = []\n actCluster = []\n openSet.append(allNodes[0])\n # The actual cluster grows adding neighbors of the nodes belonging to it.\n # When the actual node neighbors are added to the openSet, the actual node is eliminated from openSet and added to closeSet\n # So we go adding nodes to the actCluster while openSet is not empty.\n while len(openSet)!=0:\n # First openSet node, get neighbors.\n neig = city.getNodeNeigNodes(openSet[0])\n # Add first openSet node neighbors to the openSet.\n if neig!=None:\n for n in neig:\n # Each n is a tuple (neigNode, wayToGetNeigNode).\n # Add neighbor node to openSet if is not already in it:\n if (n[0] not in openSet) and (n[0] not in closeSet):\n openSet.append(n[0])\n # Add way to get to the neighbor node to the actCluster if is not already in it:\n if n[1] not in actCluster:\n actCluster.append(n[1])\n # When actual node has been added to the cluster:\n # we add it to the closeSet\n closeSet.append(openSet[0])\n # and we eliminate it form allNodes and openSet lists\n allNodes.remove(openSet[0])\n del openSet[0]\n # A cluster is completed, no more neighbors\n self.clusters.append(actCluster)\n # No nodes on allNodes_l, so all clusters found",
"def condense_nodes(self, node, *other_nodes, **kwargs):\n life_recalc = kwargs.pop('life_recalc', True)\n enforce_connectivity = kwargs.pop('enforce_connectivity', True)\n if kwargs:\n raise TypeError('Unexpected keyword argument(s): {}'\n .format(', '.join(kwargs)))\n\n nd = self.node[node]\n L.debug('Node {} incorporating {}'.format(\n node, ', '.join(str(x) for x in other_nodes)))\n\n all_nodes = (node,) + other_nodes\n subg = self.subgraph(all_nodes)\n if (enforce_connectivity\n and nx.number_connected_components(subg.to_undirected()) != 1):\n raise ValueError('Attempting to merge unconnected nodes.')\n\n # not sure which function is trying to merge a node with itself...\n # but it needs to be stopped. until then catch it here.\n if node in other_nodes:\n other_nodes = list(other_nodes)\n other_nodes.pop(other_nodes.index(node))\n\n if not other_nodes:\n return\n\n edges_out = set(self.out_edges_iter(all_nodes))\n edges_in = set(self.in_edges_iter(all_nodes))\n #edges_internal = edges_out & edges_in # (unused, for now)\n edges_external = edges_out ^ edges_in\n\n nd.setdefault(kc.COMPONENTS, set([node]))\n\n # copy/update node data (NO TOPOLOGY CHANGES)\n for other_node in other_nodes:\n other_data = self.node[other_node]\n\n # abscond with born/died\n if life_recalc:\n nd[kc.FRAME_BORN] = min(nd[kc.FRAME_BORN], other_data.pop(kc.FRAME_BORN))\n nd[kc.FRAME_DIED] = max(nd[kc.FRAME_DIED], other_data.pop(kc.FRAME_DIED))\n\n nd[kc.TIME_BORN] = min(nd[kc.TIME_BORN], other_data.pop(kc.TIME_BORN))\n nd[kc.TIME_DIED] = max(nd[kc.TIME_DIED], other_data.pop(kc.TIME_DIED))\n else:\n for key in (kc.FRAME_BORN, kc.FRAME_DIED, kc.TIME_BORN, kc.TIME_DIED):\n del other_data[key]\n\n # combine set/mapping data\n nd[kc.COMPONENTS].update(other_data.pop(kc.COMPONENTS, set([other_node])))\n\n #TODO Nick: uncomment and test it\n merge_mappings(nd, other_data)\n\n #TODO Nick: uncomment and test it\n # propogate original edge data\n for a, b in edges_external:\n # should be in one and only one (xor)\n assert (a in all_nodes) ^ (b in all_nodes)\n\n if a in all_nodes:\n # \"leaving\" edge\n u = node\n v = b\n else:\n # \"incoming\" edge\n u = a\n v = node\n\n edge_data = self.get_edge_data(a, b)\n if self.has_edge(u, v):\n existing_edge_data = self.get_edge_data(u, v)\n merge_mappings(existing_edge_data, edge_data)\n else:\n self.add_edge(u, v, **edge_data)\n\n # cleanup\n for other_node in other_nodes:\n # remove nodes (edges come off with)\n self.remove_node(other_node)\n\n # update what's where\n for component in nd[kc.COMPONENTS]:\n self._whereis_data[component] = node",
"def assign_clusters(nodelist, graph):\n cc = list(nx.connected_components(graph))\n\n cnum = pd.Series(-1, index=nodelist)\n for node in nodelist:\n for i, cluster in enumerate(cc):\n if node in cluster:\n cnum.ix[node] = i\n return cnum",
"def consume_nopred_nodes(self):\n\n # Find a list of (node,changeset,) where the node has no\n # predecessors:\n nopred_nodes = _NoPredNodes(\n self._changeset_db,\n (\n node\n for node in self.nodes.itervalues()\n if not node.pred_ids\n ),\n )\n\n while nopred_nodes:\n (node, changeset,) = nopred_nodes.get()\n del self[node.id]\n # See if any successors are now ready for extraction:\n for succ_id in node.succ_ids:\n succ = self[succ_id]\n if not succ.pred_ids:\n nopred_nodes.add(succ)\n yield (changeset, node.time_range)",
"def start_election(self):\n # Maybe in here we need to start up the nodes just on our local machine\n # Actually from here we should just publish the genesis block to all the nodes which signals to start accepting transactions\n public_key_hex = self.public.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n ).hex()\n for node in self.nodes:\n try:\n node.set_genesis(public_key_hex, self.num_zeros, self.transactions_per_block)\n except:\n pass\n return True",
"def simple_pin_nodes_to_cluster(all_nodes, roller):\n nodes_data = []\n role_counter = {}\n # ctrl_counter = 0\n # compute_counter = 0\n LOG.info('Simple(random) node assign to cluster chosen')\n for node in all_nodes:\n if node['cluster'] is not None:\n LOG.debug('Skip reserved node: {0}{1}'.format(node['name'], node['id']))\n continue\n LOG.debug(\"Get free node: {0}\".format(node['name']))\n for node_label in roller.keys():\n if not roller[node_label].get('assigned_names'):\n # here we save assigned names for nodes\n # and use this for network interface configuration later\n roller[node_label]['assigned_names'] = []\n\n if role_counter.get(node_label) is None:\n # initialize counter for this role\n role_counter[node_label] = 0\n\n if role_counter[node_label] < roller[node_label]['count']:\n LOG.debug(\"Assign node with label {0}. \"\n \"Assigned with this label: {1} from {2}.\".format(\n node_label,\n role_counter[node_label],\n roller[node_label]['count']))\n\n node_name = check_for_name(node['mac'])\n node_data = {\n api_cluster_id: cluster_id,\n 'id': node['id'],\n 'pending_addition': True,\n 'pending_roles': roller[node_label]['roles'],\n 'name': node_name,\n }\n roller[node_label]['assigned_names'].append(node_name)\n role_counter[node_label] += 1\n LOG.info('Add node {0} new name: {1}, roles: {2}'.format(\n node['name'],\n node_name,\n roller[node_label]['roles'],\n ))\n nodes_data.append(node_data)\n # break to the next nailgun node\n break\n return nodes_data",
"def process(self, starting_node):\n\n self.start_nodes = starting_node or []\n self.partial_order = {}\n self.links = []\n self.tree_list = {}\n\n if self.nodes:\n if self.start_nodes:\n # add dummy edges to the nodes which does not have any incoming edges\n tree = self.make_acyclic(None, self.start_nodes[0], 0, [])\n\n for node in self.no_ancester:\n for sec_node in self.transitions.get(node, []):\n if sec_node in self.partial_order.keys():\n self.transitions[self.start_nodes[0]].append(node)\n break\n\n self.partial_order = {}\n tree = self.make_acyclic(None, self.start_nodes[0], 0, [])\n\n\n # if graph is disconnected or no start-node is given\n # than to find starting_node for each component of the node\n if len(self.nodes) > len(self.partial_order):\n self.find_starts()\n\n self.max_order = 0\n # for each component of the graph find ranks and order of the nodes\n for s in self.start_nodes:\n self.start = s\n self.rank() # First step:Netwoek simplex algorithm\n self.order_in_rank() #Second step: ordering nodes within ranks",
"def __init__(self, list_nodes):\n\n self.starter_node = Node(list_nodes[0])\n current_node = self.starter_node\n for val in list_nodes[1:]:\n current_node.link = Node(val)\n current_node = current_node.link",
"def create_network(edges, nodes, log):\n log.info('Creating the graph with attributes...')\n edges = edges.drop_duplicates(subset = ['xs', 'ys'])\n edges_tuples = [(edges.iloc[i]['xs'], edges.iloc[i]['ys']) for i in range(len(edges))]\n edges['edges_couple'] = edges_tuples #this will be useful for successive sorting after the graph is created on bokeh\n\n # build the nx graph\n log.info('Creating nx graph...')\n G=nx.Graph()\n G.add_edges_from(edges_tuples)\n nodes_list = list(G.nodes)\n\n idxs = []\n for i in nodes_list:\n idxs.append(nodes[nodes['Company_Name']==i].index[0])\n\n #sorting with same graph order\n nodes = nodes.iloc[idxs]\n\n #nodes analysis to define their centrality\n log.info('Calculating centralities...')\n centrality = nx.degree_centrality(G) #centrality dictionary\n nodes['centrality'] = [centrality[n] for n in list(nodes['Company_Name'])]\n log.info(\"Nodes df updated with the new column 'centrality'...\")\n\n #coordinates\n log.info('Adding coordinates for circular layout...')\n pos = init_layout(G, nodes)\n coordinates = [np.array(pos[j]) for j in nodes['Company_Name']]\n nodes['coords'] = coordinates\n log.info(\"Nodes df updated with the new column 'coords'...\")\n\n return G, edges, nodes",
"def BuildInitialNodeList(self):\n self.logger.info(\"Populating list of nodes\") \n self.InitNHDNodes()\n\n for n, v in self.nodes.items():\n try:\n v.SetNodeAddr(self.k8s.GetNodeAddr(n))\n\n if not v.ParseLabels(self.k8s.GetNodeLabels(n)):\n self.logger.error(f'Error while parsing labels for node {n}, deactivating node')\n v.active = False\n continue\n\n (alloc, free) = self.k8s.GetNodeHugepageResources(n) \n if alloc == 0 or not v.SetHugepages(alloc, free):\n self.logger.error(f'Error while parsing allocatable resources for node {n}, deactivating node')\n v.active = False \n\n except Exception as e:\n self.logger.error(f'Caught exception while setting up node {n}:\\n {e}')\n v.active = False\n\n # JVM: this is only printing a message - maybe should be part of InitNHDNodes() ?\n for k,v in self.nodes.items():\n if v.active:\n self.logger.info(f'Adding node {k} to scheduling list')\n\n self.logger.info(\"Done building initial node list\")",
"def create_cameFrom(self):\n # TODO: return a data structure that shows which node can most efficiently be reached from another,\n # for each node.\n cameFrom = {}\n return cameFrom",
"def create_matching_nodegraph(countgraph):\n tablesizes = countgraph.hashsizes()\n return khmer._Nodegraph(countgraph.ksize(), tablesizes)",
"def to_coco(self):\n for cid, node in self.id_to_node.items():\n # Skip if background already added\n cat = {\n 'id': cid,\n 'name': node,\n }\n parents = list(self.graph.predecessors(node))\n if len(parents) == 1:\n cat['supercategory'] = parents[0]\n else:\n if len(parents) > 1:\n raise Exception('not a tree')\n yield cat",
"def _create_node_set(unavailable_nodes, unavailable_edges, smallest_bin_idx,\n bin_samples, bin_idcs, edge_idcs, num_bins, max_per_bin,\n MAX_PER_BIN_AND_NODE=110):\n def add_incident_edges(inc_edge_idcs, current_set_edges,\n current_set_bin_counts):\n \"\"\"\n Adds the edges given in `inc_edge_idcs` to `current_set_edges` subject\n to some conditions.\n :param inc_edge_idcs:\n :param current_set_edges:\n :param current_set_bin_counts:\n :return:\n \"\"\"\n added_count = 0 # Number of edges actually added\n node_bin_counts = np.zeros(num_bins) # Bin counts for edges actually added\n for inc_edge_idx in inc_edge_idcs:\n edge_bin = bin_idcs[inc_edge_idx]\n if (inc_edge_idx not in current_set_edges\n and inc_edge_idx not in unavailable_edges\n and node_bin_counts[edge_bin] < MAX_PER_BIN_AND_NODE\n and current_set_bin_counts[edge_bin]+node_bin_counts[edge_bin] < max_per_bin[edge_bin]):\n added_count += 1\n node_bin_counts[edge_bin] += 1\n current_set_edges.add(inc_edge_idx)\n return added_count, node_bin_counts\n\n # Samples in smallest bin\n set_nodes, set_edges = set(), set() # Nodes and edges selected for validation/test\n set_bin_counts = np.zeros(num_bins) # Counts of the edges in validation/test set for each bin\n\n for idx, edge_idx in enumerate(bin_samples):\n # If we have enough edges of the rarest (smallest) type, we can stop\n # adding edges.\n if set_bin_counts[smallest_bin_idx] >= max_per_bin[smallest_bin_idx]:\n break\n # If the edge is already in a different set, do not include it\n if edge_idx in unavailable_edges:\n continue\n out_node, in_node = tuple(edge_idcs[edge_idx])\n\n # If both nodes are no longer available, go to next edge\n if in_node in unavailable_nodes and out_node in unavailable_nodes:\n continue\n # Decide which of the two nodes to add based on whether one is already\n # in the node set or no longer available\n if in_node in set_nodes or out_node in unavailable_nodes: # We have already added in_node to set_nodes in a previous iteration OR in_node belongs to an excluded set (e.g. the validation set created in a previous call to this method).\n node_to_add = in_node\n elif out_node in set_nodes or in_node in unavailable_nodes:\n node_to_add = out_node\n else:\n node_to_add = in_node\n\n # Add node to the set\n set_nodes.add(node_to_add)\n # Now add all the edges incident to the new node to the edge set\n # (subject to some conditions).\n # For outgoing edges\n out_edge_idcs, = np.where(edge_idcs[:, 0] == node_to_add)\n add_set_out_count, add_node_bin_counts = add_incident_edges(out_edge_idcs, set_edges, set_bin_counts)\n set_bin_counts += add_node_bin_counts\n # For incoming edges\n in_edge_idcs, = np.where(edge_idcs[:, 1] == node_to_add)\n add_set_in_count, add_node_bin_counts = add_incident_edges(in_edge_idcs, set_edges, set_bin_counts)\n set_bin_counts += add_node_bin_counts\n\n if np.any(set_bin_counts >= max_per_bin):\n print(f\"One bin full after adding {idx+1} edges.\")\n return set_edges, set_nodes",
"def create_cameFrom(self):\n # TODO: return a data structure that shows which node can most efficiently be reached from another,\n # for each node.\n return []",
"def create_cameFrom(self):\n # TODO: return a data structure that shows which node can most efficiently be reached from another,\n # for each node.\n dic={}\n return dic",
"def make_cograph(tree, alist):\n #first find number of verts in cograph\n ord = 1\n for a in alist:\n ord = ord*a\n #initialize a matrix of the right size to be all 0s\n adj = np.zeros((ord, ord))\n #bubble up the tree\n #for each leaf\n leaves = get_vertices_of_depth(tree, len(alist))\n print(leaves)\n for i in range(len(leaves)):\n for j in range(len(leaves)):\n if i != j:\n #we have 2 distinct leaves find MRCA\n n1 = leaves[i]\n n2= leaves[j]\n while True:\n pari = n1.get_parent().get_id()\n parj = n2.get_parent().get_id()\n if pari == parj:\n if n1.get_parent().get_level() % 2==0: # parent is X join\n adj[i][j] = 1\n # adj[j][i] = 1\n break\n n1 = n1.get_parent()\n n2 = n2.get_parent()\n return adj"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plot graph to a file.
|
def to_plot(self, path: str) -> None:
plt.subplot()
nx.draw(self.graph, with_labels=True, font_weight="bold")
plt.savefig(path)
|
[
"def uti_data_file_plot(_fname, _read_labels=1, _e=0, _x=0, _y=0, _graphs_joined=True):\n #if '_backend' not in locals(): uti_plot_init() #?\n _backend.uti_data_file_plot(_fname, _read_labels, _e, _x, _y, _graphs_joined)",
"def draw_graph(self, filename: str, scale_x=10, scale_y=10):\n node_dict = self.get_node_names_with_pos()\n nx_graph = nx.DiGraph()\n nx_graph.add_nodes_from(node_dict.keys())\n\n nx_graph.add_edges_from(self.edges)\n\n plt.figure(1, figsize=(scale_x, scale_y))\n nx.draw(nx_graph, node_dict, with_labels=True)\n plt.gca().invert_yaxis()\n Graph.save_fig(filename)\n plt.show()",
"def simple_plot(file_name, title, x, y, xlabel, ylabel):\n\n plt.plot(x, y)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n\n plt.savefig(file_name)\n plt.clf()",
"def plot(self, filename=''):\n\t\timport matplotlib.pyplot as plt\n\t\tfrom matplotlib.pylab import cm\n\n\t\tfig = plt.figure(1)\n\t\tax = fig.add_subplot(111, xlim=[np.min(self.x), np.max(self.x)], ylim=[np.min(self.y), np.max(self.y)])\n\t\t\n\t\ttc = ax.tripcolor(self.x, self.y, self.simplicies, facecolors=self.shapeMap(), edgecolors='k', cmap=cm.terrain, shading='flat', alpha=0.5)\n\t\ttc.set_clim(0, len(self.shapelist)+0.5)\n\t\tax.scatter(self.x, self.y, c='k')\n\t\tfig.colorbar(tc)\n\n\t\tif filename == '':\n\t\t\tplt.show()\n\t\telse:\n\t\t\t# assert type(filename) is str, \"filename is not a string\"\n\t\t\tacceptable_formats = ['.png', '.jpg', 'jpeg', '.pdf', '.gif', '.eps', '.fig']\n\t\t\tassert filename[-4:] in acceptable_formats, \"filename is not supported\\nChoose between .png, .jpg, .pdf, .eps, etc.\"\n\t\t\tplt.savefig(filename, bbox_inches='tight')",
"def write_graph(self, filename):\n pass",
"def plot_network(self, data, file_path):\n plt.clf()\n plt.title('Network nodes and edges')\n plt.scatter(data[:, 0], data[:, 1], c='b')\n node_pos = {}\n for u in self.network.nodes():\n vector = self.network.node[u]['vector']\n node_pos[u] = (vector[0], vector[1])\n nx.draw(self.network, pos=node_pos, node_color='r')\n plt.draw()\n plt.savefig(file_path)",
"def plot(self, filename:str=None):\n if not filename:\n filename = max(Saver.data_files())\n df = pd.read_csv(filename)\n print('DATAFRAME:')\n print(df)\n plot = self.plotter(df, self.config_change_steps)\n plt.show()",
"def plot(self, args):\n self.databaser.plot(args.song_id, args.f)",
"def plot(path):\n with open(path, 'r') as infile:\n data = json.loads(infile.read())\n\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data['svg'].encode())\n f.close()\n\n ad.plot_setup(f.name)\n ad.options.speed_pendown = 80\n ad.options.reordering = 3\n\n ad.plot_run()\n os.unlink(f.name)",
"async def graph(self, ctx, expr, lower_limit, upper_limit):\n expr_list = expr.split(';')\n expr_list = [parse_expr(e) for e in expr_list]\n\n lower_limit = float(lower_limit)\n upper_limit = float(upper_limit)\n buf = None\n\n # set the axis limits to avoid weird plots\n if lower_limit <= 0:\n axis_limit_lower = 0\n else:\n axis_limit_lower = lower_limit\n try:\n p = plot(*expr_list, (sympy.var('x'), lower_limit, upper_limit), axis_center = (axis_limit_lower, 0), show=False)\n buf = io.BytesIO()\n p.save(buf)\n buf.seek(0)\n await self.bot.send_file(ctx.message.channel, buf, filename='graph.png')\n except Exception as e:\n # log this instead...\n print(f'!! Exception occured during plotting: {e}')\n await self.bot.say(f'Invalid expression: {expr}')\n finally:\n if buf:\n buf.close()",
"def save_plot_from_file(filename, stat_name):\n\n # Read in the data\n data = pd.read_csv(filename, sep=\"\\t\")\n try:\n stat = list(data[stat_name])\n except KeyError:\n s = \"utilities.stats.save_plots.save_plot_from_file\\n\" \\\n \"Error: stat %s does not exist\" % stat_name\n raise Exception(s)\n\n # Set up the figure.\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n # Plot the data.\n ax1.plot(stat)\n\n # Plot title.\n plt.title(stat_name)\n\n # Get save path\n save_path = pathsep.join(filename.split(pathsep)[:-1])\n\n # Save plot and close.\n plt.savefig(path.join(save_path, (stat_name + '.pdf')))\n plt.close()",
"def PlotToFilePath(self) -> str:",
"def plot(source, dest):\n path_name = source # path_name variable which takes source path\n df = pd.read_csv(path_name) # From source path file which is csv it creates dataframe\n\n x_axis = [x+1 for x in range(df.shape[1])] # For creating graph it needs x range which will be created by x_axis list [0 1 ..... 315]\n for i in range(df.shape[0]):\n y_axis = df.loc[i,:].values # y_axis variable takes values from csv file\n plt.plot(x_axis,y_axis)\n path_name = os.path.join(dest,\"{}\".format(i)) # Path name takes destination folder path\n plt.savefig(path_name)\n plt.clf()",
"def save_plot(dataframe, filename):\n plt.clf()\n dataframe.plot()\n plt.savefig(filename)",
"def save_plot(path, filename, extension='png'):\n\n plt.savefig(path + filename + '.' + extension)\n # fig.clear()\n plt.close()",
"def plot_model(root, output_file=\"model.svg\"):\n graph.plot(root, output_file)\n logger.get().info(\"Model graph plotted to: {}\".format(output_file))",
"def cmd_plot(nodes, filename, save, show_dict, backend, show):\n if backend != 'bokeh':\n # Try to set a working GUI backend for matplotlib\n # normally we assume to be on ipython which is not good.\n # The order is arbitrary.\n import matplotlib\n gui_env = [\n 'WebAgg', 'GTK3Agg', 'GTK3Cairo', 'MacOSX', 'Qt4Agg', 'Qt4Cairo', 'Qt5Agg', 'Qt5Cairo', 'TkAgg', 'TkCairo',\n 'WX', 'WXAgg', 'WXCairo', 'nbAgg', 'agg', 'cairo', 'pdf', 'pgf', 'ps', 'svg'\n ]\n for gui in gui_env:\n try:\n print('testing', gui)\n matplotlib.use(gui, force=True)\n from matplotlib import pyplot as plt\n break\n except ImportError as ex:\n print(ex)\n continue\n print('Using:', matplotlib.get_backend())\n\n from aiida_fleur.tools.plot.fleur import plot_fleur\n\n nodes = list(nodes)\n nodesf = []\n if filename is not None:\n nodesf = filename.read().split()\n filename.close()\n nodes = nodes + nodesf\n p = plot_fleur(nodes, save=save, show_dict=show_dict, backend=backend, show=show)",
"def plot(self):\n data = Gnuplot.Data(self.x, self.y, using = (1, 2)) #this ensures that t is used as x axis\n g = Gnuplot.Gnuplot()\n g('set ylabel \"y-axis [arb. units]\"')\n g('set xlabel \"x-axis [arb. units]\"')\n g('set style data lines')\n g.plot(data)",
"def plot_model(root, output_file=\"model.svg\"):\n\n text = graph.plot(root, output_file)\n logger.get().info(text)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Recursively discover all disciplines on studyportal.
|
def discover_disciplines(session, url, section_id='DisciplineSpotlight', parent=None):
# Parse the HTML for this URL
r = session.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, features="lxml")
# Extract discipline metadata from the indicated section
disciplines = []
section = soup.find('section', id=section_id)
for item in section.find_all('li'):
anchor = item.find('a', href=True)
href = anchor['href']
this_discipline = {'discipline_id': int(href.split('/')[-2]), # The discipline ID is hidden in the href
'discipline_title': anchor.text.strip(),
'parent': parent}
disciplines.append(this_discipline)
# For disciplines (rather than subdisciplines), extract subdisciplines
if parent is None:
disciplines += discover_disciplines(session, href,
section_id='SubdisciplinesList', parent=this_discipline)
return disciplines
|
[
"def getDisciplines(self):\r\n return self.__dis",
"def allDisciplines(self):\n auxList = []\n auxD = []\n for item in self.gradesList:\n if item.getGrValue() != \"none\" and item.getDiscId() not in auxD:\n auxList.append(AllDisciplines(item.getDiscId(), self.getAvgForDisc(item.getDiscId())))\n auxD.append(item.getDiscId())\n \n for i in range(0, len(auxList) - 1):\n for j in range(i + 1, len(auxList)):\n if auxList[i].getAvg() < auxList[j].getAvg():\n auxList[i], auxList[j] = auxList[j], auxList[i]\n \n return auxList",
"def _discover_courses(session, di, lvl, total):\n query_string = '|'.join((f'di-{di}', # Discipline\n 'en-3002', # Don't know what this is, could be a mechanism for rate limiting\n f'lv-{lvl}', # Degree level\n 'tc-EUR', # Currency\n 'uc-30', # Don't know what this is\n 'ur-38')) # Don't know what this is\n n_pages = (total // PAGE_SIZE) + (total % PAGE_SIZE > 0)\n for page in range(0, n_pages):\n r = session.get(SEARCH_URL, params={'start': page*PAGE_SIZE, 'q': query_string})\n r.raise_for_status()\n for course in r.json():\n # Don't double count sublevels (e.g. preparation is a level & also incl under bachelor)\n if course['level'] != lvl:\n continue\n yield course",
"def explore(self):\r\n # obtain the self.univariate_list and the others\r\n # TODO: The others.\r\n # Univariate study\r\n # TODO: We need the data-dict center in order to know the variabletype.\r\n for variablename in self.variablelist:\r\n self.univariate_dict[variablename] =\\\r\n DescriptionVars(variablename, '')",
"def disciplines(self, request, pk=None, **kwargs):\n\n instance = self.get_object()\n\n res = []\n if instance.disciplines.exists():\n context = self.get_serializer_context()\n context[\"mptt_tree\"] = MpttFilterItem.objects.get_cached_trees()\n\n res = ThemeDisciplineSerializer(\n many=True, context=context\n ).to_representation(\n instance.disciplines.all()\n )\n\n return Response(res)",
"def load_disciplines(self):\n\n # Connect to the database\n conn = sqlite3.connect(config.cfg['db_location'])\n crsr = conn.cursor()\n\n # Retrieve list of all tags from SQL database\n crsr.execute(\"SELECT id, discipline \"\n \"FROM Disciplines;\")\n\n # Write tags to self.tags and define enumeration for cross-reference\n _discipline_tuples = crsr.fetchall()\n self.discipline_to_id = dict((discipline, ident) for (ident, discipline) in _discipline_tuples)\n self.id_to_discipline = dict((ident, discipline) for (ident, discipline) in _discipline_tuples)\n\n # Close connection\n crsr.close()\n conn.close()",
"def test_portals_id_designs_nk_portal_get(self):\n pass",
"def dfs_all():\n for vertex in g:\n if not visited[vertex]:\n dfs_util(vertex)",
"def get_dssp_info(PDB_file,model,dir):\r\n\r\n\t#TODO : you can run DSSP through biopython. The output contains a lot of useful information.\r\n\t#Tip : make sure your secondary structure indexing matches the sequence order in the PDB file!\r\n\r\n\treturn PDB.DSSP(model, dir + '/' + PDB_file, dssp = 'mkdssp')",
"def _get_documents(self):\n documents = []\n docs = get_study_documents(self.nct_id)\n for doc_type, link in docs.items():\n doc_id = \"_\".join(link.split(\"/\")[2:])\n document = StudyDocument.from_dict(dict(doc_id=doc_id,\n doc_type=doc_type,\n doc_url=link,\n doc_comment=\"Retrieved from clinicaltrials.gov manually\"))\n documents.append(document)\n return documents",
"def _all_pseudo_sites(self):\n def _all_restraint_sites():\n for r in self._all_restraints():\n if hasattr(r, 'cross_links'):\n for xl in r.cross_links:\n if xl.pseudo1:\n for x in xl.pseudo1:\n yield x.site\n if xl.pseudo2:\n for x in xl.pseudo2:\n yield x.site\n return itertools.chain(self.orphan_pseudo_sites,\n _all_restraint_sites(),\n (f.site for f in self._all_features()\n if hasattr(f, 'site') and f.site))",
"def scrape_courts_portal(nprocs, pid, sleep, debug, sample, dry_run):\n\n # Load the shootings data\n shootings = ShootingVictimsData(debug=debug).get(fresh=False)\n shootings[\"dc_key\"] = shootings[\"dc_key\"].astype(str)\n\n # Drop duplicates\n shootings = shootings.drop_duplicates(subset=[\"dc_key\"])\n\n # Sample?\n if sample is not None:\n shootings = shootings.sample(sample)\n\n # Split\n assert pid < nprocs\n if nprocs > 1:\n shootings_chunk = np.array_split(shootings, nprocs)[pid]\n chunk = pid\n else:\n shootings_chunk = shootings\n chunk = None\n\n # Scrape courts info\n courts_data = CourtInfoByIncident(debug=debug)\n courts_data.update(shootings_chunk, chunk=chunk, sleep=sleep, dry_run=dry_run)",
"def get_relevant_cycle_prototypes(self):\n indices_rings = self._data_internal.get_rcps()\n return [self._convert_cycles(ring) for ring in indices_rings]",
"def get_cites(pdf):\n \n # This pattern picks out various name--date combos.\n # TODO:\n # - Get et als.\n # - Get possessives -- Chomsky's (1965) view...\n pattern = r\"(((\\w+\\,\\s)?\\w+\\,?\\s(and|&)\\s)?\\w+\\s)\\(?(\\d{4})\\)?\"\n cites_found = set()\n \n # Search each page of the PDF for the citations.\n for page in pdf_dict[pdf][\"pages\"]:\n results = re.findall(pattern, page[1])\n \n # Attempt to regularize the formatting.\n for result in results:\n strings = [result[0], result[-1]]\n joined = \" \".join(strings)\n joined = re.sub(\"\\s+\", \" \", joined)\n joined = re.sub(\"\\,?\\s&\", \" and\", joined)\n cites_found.add(joined)\n\n return list(sorted(cites_found))",
"def get_subjects(path, years, semesters, graduation_levels):\n institutes_data = json_to_data(path)\n institutes = institutes_data[\"institutes\"]\n for institute in institutes:\n for year in years:\n for semester in semesters:\n for level in graduation_levels:\n # desconsider CEL institute as it doesn't have graduate courses/subjects\n if level == \"P\" and institute[\"initials\"] == \"CEL\":\n continue\n url = base_url + year + '/' + semester + '/' + 'S' + '/' + level + '/' + institute[\n \"initials\"]\n driver.get(url)\n subjects = driver.find_elements_by_class_name('disciplina')\n for subject in subjects:\n sems_obj = {}\n sems_obj[\"year\"] = year\n sems_obj[\"semester\"] = semester\n\n subj_obj = {}\n subj_obj[\"institute\"] = institute[\"initials\"]\n subj_obj[\n \"initials\"] = subject.find_element_by_tag_name(\n 'a').text.split('\\n')[0]\n subj_obj[\"name\"] = subject.find_element_by_tag_name(\n 'a').text.split('\\n')[-1]\n subj_obj[\"degree\"] = level\n subj_obj[\"semester\"] = sems_obj\n data[\"subjects\"].append(subj_obj)",
"def subbasin_delineation(station_id):\n\n import hydrodata.datasets as hds\n from shapely.geometry import mapping, Point\n import geopandas as gpd\n\n session = retry_requests()\n base_url = \"https://labs.waterdata.usgs.gov/api/nldi/linked-data/nwissite\"\n\n comids = []\n for nav in [\"UM\", \"UT\"]:\n url = base_url + f\"/USGS-{station_id}/navigate/{nav}\"\n try:\n r = session.get(url)\n except HTTPError or ConnectionError or Timeout or RequestException:\n raise\n comids.append(\n gpd.GeoDataFrame.from_features(r.json()).nhdplus_comid.astype(str).tolist()\n )\n\n main_ids, trib_ids = comids\n main_fl = hds.nhdplus_byid(comids=main_ids, layer=\"nhdflowline_network\")\n trib_fl = hds.nhdplus_byid(comids=trib_ids, layer=\"nhdflowline_network\")\n\n main_ids = main_fl.sort_values(by=\"hydroseq\").index.tolist()\n len_cs = main_fl.sort_values(by=\"hydroseq\").lengthkm.cumsum()\n\n pp = hds.huc12pp_byid(\"nwissite\", station_id, \"upstreamMain\")\n station = Point(\n mapping(main_fl.loc[main_ids[0]].geometry)[\"coordinates\"][0][-1][:-1]\n )\n pp = pp.append([{\"geometry\": station, \"comid\": main_ids[0]}], ignore_index=True)\n headwater = Point(\n mapping(main_fl.loc[main_ids[-1]].geometry)[\"coordinates\"][0][-1][:-1]\n )\n pp = pp.append([{\"geometry\": headwater, \"comid\": main_ids[-1]}], ignore_index=True)\n pp = (\n pp.set_index(\"comid\")\n .loc[[s for s in main_ids if s in pp.comid.tolist()]]\n .reset_index()\n )\n\n pp_dist = len_cs.loc[pp.comid.astype(int).tolist()]\n pp_dist = pp_dist.diff()\n\n pp_idx = [main_ids[0]] + pp_dist[pp_dist > 1].index.tolist()\n\n if len(pp_idx) < 2:\n msg = \"There are not enough pour points in the watershed for automatic delineation.\"\n msg = \" Try passing the desired number of subbasins.\"\n raise ValueError(msg)\n\n pour_points = pp[pp.comid.isin(pp_idx[1:-1])]\n\n pp_idx = [main_ids.index(i) for i in pp_idx]\n idx_subs = [main_ids[pp_idx[i - 1] : pp_idx[i]] for i in range(1, len(pp_idx))]\n\n catchemnts = []\n sub = [trib_fl]\n fm = main_fl.copy()\n for idx_max in idx_subs:\n sub.append(hds.navigate_byid(idx_max[-1], \"upstreamTributaries\"))\n idx_catch = (\n sub[-2][~sub[-2].index.isin(sub[-1].index)].index.astype(str).tolist()\n )\n catchemnts.append(hds.nhdplus_byid(comids=idx_catch, layer=\"catchmentsp\"))\n fm = fm[~fm.index.isin(idx_max)].copy()\n\n catchemnts[-1] = catchemnts[-1].append(\n hds.nhdplus_byid(comids=sub[-1].index.astype(str).tolist(), layer=\"catchmentsp\")\n )\n\n return catchemnts, pour_points",
"def crawl(url, skip=None, depth=0, **kwargs):\n cat = read_url(url, skip, **kwargs)\n for ds in cat.flat_datasets():\n yield ds\n if depth > 0:\n for ref in cat.flat_references():\n for ds in crawl(ref.url, skip, depth - 1, **kwargs):\n yield ds",
"def generate_dcip_survey_line(\n survey_type, data_type, endl, topo, ds, dh, n, dim_flag=\"2.5D\", sources_only=False\n):\n\n accepted_surveys = [\"pole-pole\", \"pole-dipole\", \"dipole-pole\", \"dipole-dipole\"]\n\n if survey_type.lower() not in accepted_surveys:\n raise Exception(\n \"survey_type must be 'dipole-dipole' | 'pole-dipole' | \"\n \"'dipole-pole' | 'pole-pole' not {}\".format(survey_type)\n )\n\n def xy_2_r(x1, x2, y1, y2):\n r = np.sqrt(np.sum((x2 - x1) ** 2.0 + (y2 - y1) ** 2.0))\n return r\n\n # Compute horizontal locations of sources and receivers\n x1 = endl[0]\n x2 = endl[1]\n\n if dim_flag == \"3D\":\n\n # Station locations\n y1 = endl[2]\n y2 = endl[3]\n L = xy_2_r(x1, x2, y1, y2)\n nstn = int(np.floor(L / ds) + 1)\n dl_x = (x2 - x1) / L\n dl_y = (y2 - y1) / L\n stn_x = x1 + np.array(range(int(nstn))) * dl_x * ds\n stn_y = y1 + np.array(range(int(nstn))) * dl_y * ds\n\n # Locations of poles and dipoles\n if survey_type.lower() in [\"pole-pole\", \"pole-dipole\", \"dipole-pole\"]:\n P = np.c_[stn_x, stn_y]\n if np.size(topo) == 1:\n P = np.c_[P, topo * np.ones((nstn))]\n else:\n fun_interp = LinearNDInterpolator(topo[:, 0:2], topo[:, -1])\n P = np.c_[P, fun_interp(P)]\n\n if survey_type.lower() in [\"pole-dipole\", \"dipole-pole\", \"dipole-dipole\"]:\n DP1 = np.c_[stn_x - 0.5 * dl_x * dh, stn_y - 0.5 * dl_y * dh]\n DP2 = np.c_[stn_x + 0.5 * dl_x * dh, stn_y + 0.5 * dl_y * dh]\n if np.size(topo) == 1:\n DP1 = np.c_[DP1, topo * np.ones((nstn))]\n DP2 = np.c_[DP2, topo * np.ones((nstn))]\n else:\n fun_interp = LinearNDInterpolator(topo[:, 0:2], topo[:, -1])\n DP1 = np.c_[DP1, fun_interp(DP1)]\n DP2 = np.c_[DP2, fun_interp(DP2)]\n\n else:\n\n # Station locations\n y1 = 0.0\n y2 = 0.0\n L = xy_2_r(x1, x2, y1, y2)\n nstn = int(np.floor(L / ds) + 1)\n stn_x = x1 + np.array(range(int(nstn))) * ds\n\n # Locations of poles and dipoles\n if survey_type.lower() in [\"pole-pole\", \"pole-dipole\", \"dipole-pole\"]:\n P = np.c_[stn_x, stn_y]\n if np.size(topo) == 1:\n P = np.c_[stn_x, topo * np.ones((nstn))]\n else:\n fun_interp = LinearNDInterpolator(topo[:, 0:2], topo[:, -1])\n P = np.c_[stn_x, fun_interp(stn_x)]\n\n if survey_type.lower() in [\"pole-dipole\", \"dipole-pole\", \"dipole-dipole\"]:\n DP1 = stn_x - 0.5 * dh\n DP2 = stn_x + 0.5 * dh\n if np.size(topo) == 1:\n DP1 = np.c_[DP1, topo * np.ones((nstn))]\n DP2 = np.c_[DP2, topo * np.ones((nstn))]\n else:\n fun_interp = interp1d(topo[:, 0], topo[:, -1])\n DP1 = np.c_[DP1, fun_interp(DP1)]\n DP2 = np.c_[DP2, fun_interp(DP2)]\n\n # Build list of Tx-Rx locations depending on survey type\n # Dipole-dipole: Moving tx with [a] spacing -> [AB a MN1 a MN2 ... a MNn]\n # Pole-dipole: Moving pole on one end -> [A a MN1 a MN2 ... MNn a B]\n SrcList = []\n\n for ii in range(0, int(nstn)):\n\n if dim_flag == \"3D\":\n D = xy_2_r(stn_x[ii], x2, stn_y[ii], y2)\n else:\n D = xy_2_r(stn_x[ii], x2, y1, y2)\n\n # Number of receivers to fit\n nrec = int(np.min([np.floor(D / ds), n]))\n\n # Check if there is enough space, else break the loop\n if nrec <= 0:\n continue\n\n # Create receivers\n if survey_type.lower() in [\"dipole-pole\", \"pole-pole\"]:\n rxClass = dc.receivers.Pole(\n P[ii + 1 : ii + nrec + 1, :], data_type=data_type\n )\n elif survey_type.lower() in [\"dipole-dipole\", \"pole-dipole\"]:\n rxClass = dc.receivers.Dipole(\n DP1[ii + 1 : ii + nrec + 1, :],\n DP2[ii + 1 : ii + nrec + 1, :],\n data_type=data_type,\n )\n\n # Create sources\n if survey_type.lower() in [\"pole-dipole\", \"pole-pole\"]:\n srcClass = dc.sources.Pole([rxClass], P[ii, :])\n elif survey_type.lower() in [\"dipole-dipole\", \"dipole-pole\"]:\n srcClass = dc.sources.Dipole([rxClass], DP1[ii, :], DP2[ii, :])\n\n SrcList.append(srcClass)\n\n if sources_only:\n\n return SrcList\n\n else:\n survey = dc.Survey(SrcList, survey_type=survey_type.lower())\n\n return survey",
"def test_portals_id_designs_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Discover degree level counts by discipline.
|
def discover_level_count(session, discipline_id):
r = session.get(SEARCH_FACETS_URL, params={"q": f"di-{discipline_id}", "facets": '["lv"]'})
r.raise_for_status()
for degree_level, count in r.json()['lv'].items():
yield degree_level, count
|
[
"def test_portals_id_designs_count_get(self):\n pass",
"def test_portals_id_designs_nk_members_count_get(self):\n pass",
"def test_portals_id_designs_nk_design_members_count_get(self):\n pass",
"def test_portals_id_designs_nk_comments_count_get(self):\n pass",
"def test_portals_id_designs_nk_commenters_count_get(self):\n pass",
"def compute_indegrees(digraph):\n keys = list(digraph.keys())\n degree_dic = {}\n for key in keys:\n count = 0\n for val_set in list(digraph.values()):\n for val in val_set:\n if val == key:\n count += 1\n degree_dic[key] = count\n return degree_dic",
"def chordStrengthProfile(delaunayMap, startDart = None, cs = None):\n\n if not startDart:\n startDart = delaunayMap.node(1).anchor()\n while startDart.endNodeLabel() != 2:\n startDart.nextSigma()\n\n assert startDart.edge().flag(CONTOUR_SEGMENT) and \\\n not startDart.leftFace().flag(OUTER_FACE), \\\n \"expecting startDart to be on the shape's contour,\" \\\n \"with the inner triangles on the left\"\n\n if not cs:\n cs = calculateChordStrengths(delaunayMap)\n\n dart = startDart.clone()\n result = []\n while True:\n #print dart\n while not dart.nextSigma().edge().flag(CONTOUR_SEGMENT):\n #print \"-\", dart\n result.append((cs[dart.edgeLabel()], dart.label()))\n if dart.nextAlpha() == startDart:\n #print \"AT BEGIN (%s)\" % dart.label()\n break\n return result",
"def test_portals_id_designs_nk_exports_count_get(self):\n pass",
"def degre_noeud(liste_arcs, noeud):\n cpt = 0\n for arc in liste_arcs:\n if noeud == arc[0] or noeud == arc[1]:\n cpt += 1 \n return cpt",
"def evalu(cld,details=False):\n all_clusts = set(cld.values())\n success,potential,failure,sk,pk,fk = 0,0,0,[],[],[]\n for clust in all_clusts:\n cc = cl.member_counts(clust)\n if len(cc)>1 and any(i>3 for i in cc.values()):\n potential+=1\n pk.append(clust)\n elif len(cc)>1:\n failure+=1\n fk.append(clust)\n else:\n sk.append(clust)\n success+=1\n if details:\n return success,potential,failure,sk,pk,fk\n return success,potential,failure",
"def degree(self, v):\n #self._validateVertex(v)\n return self._adj[v].size()",
"def degree(self, v):\n return len(self.neighbors(v))",
"def degree(self,u):\n return len(self.get_node(u))",
"def test_portals_count_get(self):\n pass",
"def test_get_scenario_count( self ):\n # Given\n quest = Quest()\n\n # When\n quest.add_level( ScenarioCoinCollect( False, 20, 1, [] ), 1 )\n quest.add_level( ScenarioCoinCollect( False, 20, 1, [] ), 1 )\n quest.add_level( ScenarioCoinCollect( False, 20, 1, [] ), 1 )\n\n # Then\n assert quest.get_level_count() == 3",
"def compute_out_degrees(digraph): # part of answer to question 3\n xgraph = {}\n print \"Processing the number of Out-Degrees\" # Status indicator for long processing times\n for node in iter(digraph.viewkeys()): # creates an iter of just the keys in the dict. increase performance for larger data sets maybe? IE only shows the keys\n xgraph[node] = len(digraph[node]) # creates node in graph and assigns it to the number of out degrees to begin count\n print \"Finished Counting Out-Degrees\" # Status indicator for long processing times\n\n return xgraph # return created graph with node + count of out degrees",
"def test_portals_id_design_folders_count_get(self):\n pass",
"def get_dep_count_df(data):\n characteristics = data.get_dataframe('characteristics')\n\n # Format dep\n # Remove 0 at the end of dep number if the dep number is not DOM-TOM\n characteristics['dep'] = characteristics.apply(lambda x: x.dep//10 if x.dep<971 else x.dep, axis=1)\n # Add 0 in front of dep number if dep number is lower than 10\n characteristics['dep'] = characteristics.apply(lambda x: '0'+str(x.dep) if x.dep<10 else str(x.dep), axis=1)\n\n # Count values per dep\n dep_count = pd.DataFrame(characteristics['dep'].value_counts())\n dep_count.reset_index(level=0, inplace=True)\n dep_count.rename(columns={'index':'dep',\n 'dep':'count'},\n inplace=True)\n return dep_count",
"def degree(self, nodes):\n\n adj = self._build_adjacency(nodes)\n\n degree = {}\n for node in adj:\n degree[node] = len(adj[node])\n if node in adj[node]:\n degree[node] += 1\n\n return degree"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Hidden method for discovering courses on studyportal.
|
def _discover_courses(session, di, lvl, total):
query_string = '|'.join((f'di-{di}', # Discipline
'en-3002', # Don't know what this is, could be a mechanism for rate limiting
f'lv-{lvl}', # Degree level
'tc-EUR', # Currency
'uc-30', # Don't know what this is
'ur-38')) # Don't know what this is
n_pages = (total // PAGE_SIZE) + (total % PAGE_SIZE > 0)
for page in range(0, n_pages):
r = session.get(SEARCH_URL, params={'start': page*PAGE_SIZE, 'q': query_string})
r.raise_for_status()
for course in r.json():
# Don't double count sublevels (e.g. preparation is a level & also incl under bachelor)
if course['level'] != lvl:
continue
yield course
|
[
"def api_courses_get():\n\tpass",
"def get_courses(self):\n return self.q(css='ul.listing-courses .course-item')",
"def search_courses(session):\n page = session.get(URL)\n bs = BeautifulSoup(page.text, 'lxml')\n colleges = get_college(bs)\n for college in colleges:\n terms = get_term(session, bs, college)\n for term in terms[1:]:\n majors = get_majors(session, bs, college, term)\n for major in majors:\n for career in CAREER:\n doc_ref = db.collection('colleges').document(college) \\\n .collection('majors').document(major) \\\n .collection('terms').document(term) \\\n .collection('career').document(career)\n\n values = get_param_for_courses(bs, college, term, career, major)\n page = session.post(URL, data=values, headers=headers)\n bs1 = BeautifulSoup(page.text, 'lxml')\n try:\n get_courses(bs1, doc_ref)\n except AttributeError as ex:\n print('No course found')\n time.sleep(randint(0, 1))",
"def get_courses_for_wiki(self, wiki_slug, **kwargs):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def get_courses(url, campus, dept_link):\n client = http.client.HTTPSConnection(url.netloc)\n client.request('GET', '%s%s' % (url.path, dept_link))\n response = client.getresponse()\n if response.status != 200:\n logging.warning('Error reading category (%s): %d %s', dept_link,\n response.status, response.read())\n return\n\n tree = lxml.html.fromstring(response.read())\n client.close()\n\n items = tree.xpath('/html/body/a/p')\n courses = []\n for i in items:\n course = parse_course(i, campus)\n if not course:\n logging.warning('Unable to parse course: %s', lxml.html.tostring(i))\n continue\n\n courses.append(course)\n\n return courses",
"def course_detail(request, internal_title):\n\n course = get_object_or_404(Course, internal_title=internal_title)\n term = Semester.objects.get(current=True)\n sections = Offering.objects.filter(course=course, sec_term=term).order_by('section')\n\n # For use in Programs sidebar\n # programs = Program.objects.all()\n\n return render_to_response(\n 'courses/course_detail.html',\n locals(),\n context_instance=RequestContext(request)\n )",
"def get_courses(bs, doc_ref):\n courses = bs.find(id=\"ACE_$ICField$4$$0\").tr.find_next_siblings('tr')\n for course in courses:\n title = course.find('a', {'class': 'PSHYPERLINK PTCOLLAPSE_ARROW'}).parent\n sections = course.find_all('table', {'class': 'PSLEVEL1GRIDNBONBO'})\n for section in sections:\n section = section.find('tr').find_next_sibling('tr')\n tds = section.find_all('td')\n\n doc_ref.collection('courses').document(title.get_text().strip().split('-')[0]) \\\n .collection('sections').document(tds[0].get_text().strip()).set({\n 'section': tds[1].get_text().split()[0].split('-')[1].strip(),\n 'time': tds[2].get_text().strip(),\n 'Instructor': tds[4].get_text().strip(),\n 'Status': tds[6].img['alt']\n }\n )",
"def courses(self):\n \n if \"_courses\" not in self.__dict__:\n self._courses = []\n courses_to_exclude = []\n\n course_df = pd.read_pickle(\"courses.pkl\")\n\n for parameter in [\"faculty\", \"institute\"]:\n if parameter in self._course_parameters:\n for element in self._course_parameters[parameter]:\n if element[0] == \"-\":\n course_list = courses_to_exclude\n parameter_value = element[1:]\n else:\n course_list = self._courses\n parameter_value = element\n \n indexes = course_df[parameter] == parameter_value\n course_list.extend(\n list(course_df.loc[indexes, \"coursecode\"].values)\n )\n\n if \"coursecode\" in self._course_parameters:\n for course in self._course_parameters[\"coursecode\"]:\n if course[0] == \"-\":\n courses_to_exclude.append(course[1:])\n else:\n self._courses.append(course)\n\n # Remove duplicates\n self._courses = list(dict.fromkeys(self._courses))\n\n for course_to_exclude in courses_to_exclude:\n if course_to_exclude in self._courses:\n self._courses.remove(course_to_exclude)\n\n if \"search\" in self._course_parameters:\n for search_query in self._course_parameters[\"search\"]:\n if search_query[0] == \"-\":\n course_list = courses_to_exclude\n regex = self.regexpify(search_query[1:])\n else:\n course_list = self._courses\n regex = self.regexpify(search_query)\n\n indexes = course_df[\"coursecode\"].str.contains(regex)\n course_list.extend(list(course_df[\"coursecode\"].loc[indexes]))\n\n return self._courses",
"def public_courses_list():\n\n # Get the (possibly cached) courses\n # that the current user is in.\n courses = get_courses(current_user.netid)\n\n # Give back the courses that the\n # student is in. This information\n # is possibly cached.\n return success_response({\n \"courses\": courses\n })",
"def get_courses():\n basedir = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(basedir, 'data.json')) as file:\n data = json.load(file)\n return data['courses']",
"def _get_courses(cls, spec, fields=None):\n try:\n cursor = cls.coll.find(\n spec, fields, sort=[('_id', ASCENDING)])\n\n courses = yield cursor.to_list(None)\n return courses\n\n except TypeError as te:\n if not isinstance(spec, dict):\n raise NotDictError('spec') from te\n\n if not isinstance(fields, (dict, list)) and \\\n fields is not None:\n e = TypeError(\n 'The fields parameter should be a '\n 'dictionary or a list.'\n )\n raise e from te\n\n else:\n raise",
"def get_viewed_courses(time, course_id):\n parameters = {\n 'verb': 'http://id.tincanapi.com/verb/viewed',\n 'since': time,\n 'activity': f'{settings.MOODLE_BASE_URL}/course/view.php?id={course_id}'\n }\n\n return get_statements(parameters)",
"def get_visible_courses(user):\n if user.is_superuser:\n return Course.objects.all()\n else:\n return Coordinator.objects.filter(user=user).values_list(\"course\")",
"def courses():\n current_date = date.today()\n if current_date.month > 6: # Jul, Aug, Sep, Oct, Nov, Dec\n start_year = current_date.year\n else: # Jan, Feb, Mar, Apr, May, Jun\n start_year = current_date.year - 1\n ending = (start_year + 1) % 100 # last two digits of end_date\n school_year = f\"{start_year}-{ending}\"\n return (\n db_session.query(CurriculumCourse)\n .filter(CurriculumCourse.school_year == school_year)\n .filter(CurriculumCourse.course_code != '000')\n .filter(CurriculumCourse.course_active == True)\n .filter(CurriculumCourse.school_code.in_(school_codes))\n .filter(CurriculumCourse.sections.any(CourseSection.assigned_seats > 0))\n )",
"def test_prolearn_extract_courses(mock_mitpe_courses_data):\n assert (\n prolearn.extract_courses(PlatformType.mitpe.value)\n == mock_mitpe_courses_data[\"data\"][\"searchAPISearch\"][\"documents\"]\n )",
"def test_course_discovery_on(self):\n response = self.client.get('/')\n assert response.status_code == 200\n # assert that the course discovery UI is not present\n self.assertContains(response, 'Search for a course')\n\n # check the /courses view\n response = self.client.get(reverse('courses'))\n assert response.status_code == 200\n\n # assert that the course discovery UI is present\n self.assertContains(response, 'Search for a course')\n self.assertContains(response, '<aside aria-label=\"Refine Your Search\" class=\"search-facets phone-menu\">')\n self.assertContains(response, '<div class=\"courses\"')",
"def get_courses(depId = None):\n\n if depId == None:\n response = urllib.urlopen(yacs_courses_url)\n else:\n response = urllib.urlopen(yacs_courses_url + \"?department_id=\" + str(depId))\n data = json.loads(response.read())\n return data[\"result\"]",
"def test_list_enrollments_courses(self):\r\n course_id = None # Change me!!\r\n\r\n r = self.client.list_enrollments_courses(course_id, grading_period_id=None, include=None, role=None, sis_account_id=None, sis_course_id=None, sis_section_id=None, sis_user_id=None, state=None, type=None, user_id=None)",
"def extract_courses(campuses, department_links):\n courses = []\n for campus in campuses:\n url = urllib.parse.urlparse(COURSE_INDICES[campus])\n depts = department_links if department_links else get_department_links(url)\n courses += list(\n itertools.chain.from_iterable(\n [get_courses(url, campus, i) for i in depts]))\n\n return courses"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write to disk under the path di/lvl/dilvlcount.json
|
def flush(courses, di, lvl, count):
path = f'{DATA_PATH}/{di}/{lvl}/'
if not os.path.exists(path):
os.makedirs(path)
write_json(courses[(di, lvl)], f'{path}/{di}-{lvl}-{count}.json')
|
[
"def write_counters_to_file(self):\n with open(os.path.join(self.cwd,'data/others/counters.txt'),'w') as outputfile:\n json.dump(CounterValues().last_counter,outputfile)\n return True \n return False",
"def write(self, parsed_data):\n file_path = self.data_dir / 'vic_status.json'\n with file_path.open('w') as file:\n file.write(json.dumps(parsed_data))\n\n self.logger.info('Data written to %s', file_path.resolve())",
"def writeToFileWithDepth(*args, **kwargs):\n \n pass",
"def write_json(d_, agent_id):\r\n tmp_prod_filepath = os.path.join(conf.PROD_DIRNAME, conf.TMP_PROD_FILENAME)\r\n if os.path.exists(tmp_prod_filepath):\r\n curr_prod_filename = rotate_prod_filename(conf.PROD_DIRNAME, conf.TMP_PROD_FILENAME, conf.ROTATE_PROD_TIME)\r\n else:\r\n curr_time = time.time()\r\n curr_prod_filename = time.strftime(conf.TIME_FORMAT, time.localtime(curr_time))\r\n with open(os.path.join(conf.PROD_DIRNAME, conf.TMP_PROD_FILENAME), 'w') as file:\r\n file.write(str(curr_time))\r\n\r\n with open(os.path.join(conf.PROD_DIRNAME, curr_prod_filename + conf.PROD_FILENAME), 'a', newline='') as fp:\r\n for i, (key, value) in enumerate(d_.items()):\r\n data = parse_dictionary_to_json_format(value[0], agent_id)\r\n json.dump(data, fp, default=decimal_default)\r\n fp.write(\",\\n\")",
"def write_level_to_dat(level, writer):\n #lower_layer is not reequired, so handle the case where it is None or 0 length\n # by making a default layer of all 0s\n if (level.lower_layer == None or len(level.lower_layer) == 0):\n level.lower_layer = [0]*1024\n level_bytes = calculate_level_byte_size(level)\n writer.write(level_bytes.to_bytes(2, cc_classes.BYTE_ORDER))\n writer.write(level.level_number.to_bytes(2, cc_classes.BYTE_ORDER))\n writer.write(level.time.to_bytes(2, cc_classes.BYTE_ORDER))\n writer.write(level.num_chips.to_bytes(2, cc_classes.BYTE_ORDER))\n writer.write(b'\\x01\\x00') # Write the \"map detail\" which is always a 2 byte number set to 1\n write_layer_to_dat(level.upper_layer, writer)\n write_layer_to_dat(level.lower_layer, writer)\n total_field_byte_size = calculate_total_optional_field_byte_size(level.optional_fields)\n writer.write(total_field_byte_size.to_bytes(2, cc_classes.BYTE_ORDER))\n for field in level.optional_fields:\n write_field_to_dat(field, writer)",
"def writejson(self):\t\t\n\t\twith open(self.filename, 'w+') as outfile:\n\t\t\tjson.dump(self.cache, outfile, sort_keys=True, indent=4)",
"def _write_location(self):\n file_loc = 'vbr.json'\n if (os.path.isfile(file_loc)):\n with open(file_loc, \"r+\") as location_data:\n self.loc_data = json.load(location_data)\n self.loc_data[self.thread_name][\"last_read\"] = self.post_messages[-1][\"postcounter\"]\n json.dump(self.loc_data, location_data)\n else:\n thread_info = {'url': self.thread, 'last_read': self.post_messages[-1]['postcounter']}\n data = {self.thread_name: thread_info}\n with open(file_loc, \"w\") as location_data:\n json.dump(data, location_data)",
"def write_size(self):\n data = {\"name\": str(self.source),\n \"size\": self.source.stat().st_size}\n with self.target_size_path.open(\"w\") as f:\n json.dump(data, f)\n return True",
"def save(self, level_name: str):\n # Compile level parts\n obj_list = []\n\n # Write layers\n for layer in self.engine.objects.tile.layers:\n info = [\"tile-layer\", layer.name, layer.array._array, layer.data]\n obj_list.append(info)\n\n # Write stcol\n col = self.engine.objects.col.st\n info = [\"static-collider\", col.array.array]\n obj_list.append(info)\n\n # Write each object to file\n for key, obj in self.engine.objects.ent.obj.items():\n info = [obj.name, obj.pos.ftup(), key, obj.data]\n obj_list.append(info)\n\n # Create json dump\n new_list = []\n for item in obj_list:\n new_list.append(NoIndent(item))\n data = json.dumps(new_list, indent=4, cls=NoIndentEncoder)\n data.replace(\",\\n\", \", \\n\")\n if data.endswith(\"]]\") or data.endswith(\"}]\"):\n data = data[1:-1] + \"\\n\" + data[-1]\n\n # Write to file\n level = open(\n path.join(self.paths[\"levels\"], level_name + \".json\"), \"w\"\n )\n level.write(data)\n level.close()\n cprint(\"successful level save!\", \"green\")",
"def write(self):\n \n hdulist = fits.HDUList()\n\n level0 = self.get_level0()\n hdulist.append(level0)\n \n level1 = self.get_level1()\n hdulist.append(level1)\n \n level2 = self.get_level2()\n hdulist.append(level2)\n \n level3 = self.get_level3()\n hdulist.append(level3)\n \n level4 = self.get_level4()\n hdulist.append(level4)\n \n hdulist.writeto(self.metadata_file,clobber=True)\n print('Output metadata to '+self.metadata_file)",
"def write():\n f = open(\"accounts.dat\", \"wb\")\n f.write((\"bank9000™ accounts data\\nwritten UTC \" + str(datetime.utcnow()) + '\\n').encode('utf-8'))\n s = base64.b64encode(json.dumps((users, accounts)).encode())\n f.write(zlib.compress(s) + b'\\n')\n f.close()",
"def to_json_file(self, file_path_name):\n save_dvh_json(self.dvh_data, file_path_name)",
"def write_lens(self, base_path):\n assert self.lens_data is not None\n fmt = [\"%d\", \"%.18f\", \"%.18f\", \"%.18f\", \"%d\"]\n np.savetxt(base_path + self.lens_name, self.lens_data, fmt=fmt)",
"def save_to_file(self):\n for key, value in self.object_metadata.items():\n print('Bucket: {} ====> {}'.format(key, value))\n file_name = os.path.join(\n self.output_dir, 'object_count_difference.json')\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n with open(file_name, 'w') as fp:\n json.dump(self.object_metadata, fp)\n print('File saved at: {}'.format(file_name))\n print('Prefix Path: {}, File Name: {}'.format(prefix_path, file_name))",
"def save_level(self):\n self.filename_copy = '%s.copy' % self.filename\n print(self.filename_copy)\n fd_write = open(self.filename_copy, 'w')\n for line in self.data:\n fd_write.write(line)\n fd_write.write('\\n')\n\n fd_write.close()\n print(\"saved correctly\")\n shutil.copy(self.filename_copy, self.filename)\n os.remove(self.filename_copy)",
"def write_status(path, code):\n with path.open(mode='wt') as status_fh:\n status_fh.write(\"%d\\n\" % code)",
"def save_traces(self, path: str):\n import json\n\n with open(path, \"w\") as f:\n json_traces = [t.to_dict() for t in self.memory_traces]\n json.dump({\"traces\": json_traces}, f)",
"def save(self):\n if not os.path.exists(f\"{options.get_base_dir()}/data\"):\n os.mkdir(f\"{options.get_base_dir()}/data\")\n if not os.path.exists(f\"{options.get_base_dir()}/data/{LimitedSeries.FOLDER}\"):\n os.mkdir(f\"{options.get_base_dir()}/data/{LimitedSeries.FOLDER}\")\n with open(\"{}/data/{}/{}.json\".format(options.get_base_dir(), LimitedSeries.FOLDER, self.get_id()), \"w\") as jsonfile:\n dump(self.to_json(), jsonfile, indent=4)",
"def writeCompcountFile(compcountpath,compcount): \n with open(compcountpath,\"w\") as outfile:\n outfile.write(\"{0}\\n\".format(compcount))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
An implementation of the local synthetic instances (LSI) method for oversampling a dataset.
|
def lsi(x, y, params=None):
assert isinstance(x, np.ndarray), 'Input x must be a Numpy array'
assert isinstance(y, np.ndarray), 'Input y must by a Numpy array'
n = x.shape[0]
max_num_weighted_samples = params.get('max_num_weighted_samples', np.inf)
num_synthetic_instances = params.get('num_synthetic_instances', n)
num_weighted_samples = min(n, max_num_weighted_samples)
if params is None:
params = {}
if 'p_range' in params:
p_range = params['p_range']
if not hasattr(p_range, '__getitem__'):
p_range = [p_range, p_range]
elif 'p' in params: # Backwards compatibility with prev version:
p = params.get('p', 2.0)
p_range = [p, p]
else:
p_range = (1.2, 3.0)
assert len(p_range) == 2, 'p_range must have exactly two elements: (p_min, p_max)'
p_min = p_range[0]
p_max = p_range[1]
b = np.array(range(num_weighted_samples)) + 1.0
synth_x = np.zeros([num_synthetic_instances] + list(x.shape[1:]))
synth_y = np.zeros([num_synthetic_instances] + list(y.shape[1:]))
for i in range(num_synthetic_instances):
t = float(i) / num_synthetic_instances
p_val = p_min + t * (p_max - p_min)
w = 1.0 / np.power(b, p_val)
w /= sum(w)
inds = random.sample(range(n), num_weighted_samples)
synth_x_components = np.array([w[j] * x[inds[j], ...]
for j in range(num_weighted_samples)])
synth_x[i, ...] = np.sum(synth_x_components, axis=0)
synth_y_components = np.array([w[j] * y[inds[j], ...]
for j in range(num_weighted_samples)])
synth_y[i, ...] = np.sum(synth_y_components, axis=0)
return synth_x, synth_y
|
[
"def lsi(\n adata: anndata.AnnData, n_components: int = 20,\n use_highly_variable: Optional[bool] = None, **kwargs\n) -> None:\n if use_highly_variable is None:\n use_highly_variable = \"highly_variable\" in adata.var\n adata_use = adata[:, adata.var[\"highly_variable\"]] if use_highly_variable else adata\n X = num.tfidf(adata_use.X)\n X_norm = sklearn.preprocessing.Normalizer(norm=\"l1\").fit_transform(X)\n X_norm = np.log1p(X_norm * 1e4)\n X_lsi = sklearn.utils.extmath.randomized_svd(X_norm, n_components, **kwargs)[0]\n X_lsi -= X_lsi.mean(axis=1, keepdims=True)\n X_lsi /= X_lsi.std(axis=1, ddof=1, keepdims=True)\n adata.obsm[\"X_lsi\"] = X_lsi",
"def oversample(self, *args, **kwargs):\n if args:\n assert len(args) in [1, len(self)]\n elif kwargs:\n for name in self.names:\n if name not in kwargs:\n kwargs[name] = 1\n factors = self._args_kwargs_to_list(*args, **kwargs)\n new_binning = [dim.oversample(f)\n for dim, f in zip(self._dimensions, factors)]\n return MultiDimBinning(new_binning)",
"def compute_NSI(signal, sampling_freq,\n p0=0,\n low_freqs = np.linspace(2,5,5),\n T_sliding_mean=500e-3,\n alpha=2.87,\n with_subquantities=False): \n sliding_mean = compute_sliding_mean(signal, sampling_freq, T=T_sliding_mean)\n \n low_freqs_envelope = compute_freq_envelope(signal, sampling_freq, low_freqs)\n\n if with_subquantities:\n return low_freqs_envelope, sliding_mean, NSI_func(low_freqs_envelope, sliding_mean,\n p0=p0,\n alpha=alpha)\n else:\n return NSI_func(low_freqs_envelope, sliding_mean,\n p0=p0,\n alpha=alpha)",
"def load_soil_sample_data2(sl1):\n # soil\n sl1.cohesion = 30 # [Pa]\n sl1.phi = 0 # [degrees]\n sl1.unit_dry_weight = 17000 # [N/m3]",
"def oversampling(self, data, labels):\n assert self.args.oversampling_ratio is not None, (\n \"When `--do_oversampling` is set, it also needs a proper value for `--oversampling_ratio`.\")\n\n samples_of_label = defaultdict(list)\n for sample, label in zip(data, labels):\n samples_of_label[label].append(sample)\n\n num_samples_of_label = {label: len(lst) for label, lst in samples_of_label.items()}\n max_num_samples = max(num_samples_of_label.values())\n min_num_samples = int(max_num_samples * self.args.oversampling_ratio)\n\n self.log.info(f\"Log for oversampling!\")\n for label, num_samples in sorted(num_samples_of_label.items()):\n # for approximation issue, let's put them at least `n` times\n n = 5\n # ratio = int(max(min_num_samples / num_samples, 1.0) * n / n + 0.5)\n ratio = int(max(min_num_samples / num_samples, 1.0) * n + 0.5)\n\n self.log.info(f\"{label}: {num_samples} x {ratio} => {num_samples * ratio}\")\n\n for i in range(ratio - 1):\n data.extend(samples_of_label[label])\n labels.extend(label for _ in range(num_samples))\n\n return data, labels",
"def test_sires():\n allele_freqs = np.random.uniform(0.3,0.5,50)\n adults = fp.make_parents(100, allele_freqs, family_name='a')\n # Example with a single family\n progeny = fp.make_sibships(adults, 0, [1,2,3], 5, 'x')\n mothers = adults.subset(progeny.mothers)\n patlik = fp.paternity_array(progeny, mothers, adults, mu = 0.0015, missing_parents=0.01)\n sc = fp.sibship_clustering(patlik)\n me = sc.sires()\n assert isinstance(me, pd.DataFrame)\n list(me['label'])",
"def oversample(self):\n return self._oversample",
"def bs_replicate(data, func=mn.mean):",
"def load_soil_sample_data(sl0):\n # soil\n sl0.height = 1.5 #[m]\n sl0.phi = 34 # [degrees]\n sl0.unit_dry_weight = 17000 # [N/m3]\n sl0.c_a = 0\n sl0.cohesion = 0 # [Pa]",
"def get_sample_source(prob_label):\n\n if prob_label not in label2fname:\n raise ValueError('Unknown problem label. Need to be one of %s'%str(list(label2fname.keys())) )\n fname = label2fname[prob_label]\n tst_data, n = load_nips_TSTData(fname)\n ss = data.SSResample(tst_data)\n return ss, n",
"def overrelaxedSample(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return SliceSamplerBase.overrelaxedSample(self)",
"def oversample(self, data, labels, labels2oversample, reps):\n assert data['R'].shape[0] == labels.shape[0], 'The input data and labels must have the same length.'\n assert len(labels2oversample) == len(reps), 'The length of labels2oversample and reps must be the same.'\n\n data = copy.deepcopy(data)\n labels = pd.DataFrame(labels)\n\n for i in range(len(labels2oversample)):\n over_inds = labels[labels[0].map(lambda x: x in [labels2oversample[i]])].index.tolist()\n for key in data:\n concat_list = [data[key]] + [data[key][over_inds]]*reps[i]\n data[key] = np.concatenate(concat_list, axis=0)\n\n return data",
"def collectSI(name, fov, frameCount=1, dwell=9, dims=(1024, 1024), vectorSet=None, subRaster=None, path=None, rotation=0.0, off=False):\n\t\tglobal terminated\n\t\tif terminated:\n\t\t\treturn\n\t\t_ts.chamberLed(False)\n\t\tsetResolution(_edsResolution)\n\t\tasi = sem.getAcquireSpectrumImage()\n\t\tci=asi.getConfigureImage()\n\t\tci.setImageDimensions(dims[0], dims[1])\n\t\tci.setScanSpeed(dwell)\n\t\tci.setFieldOfView(fov)\n\t\tci.setRotation(rotation)\n\t\tif subRaster:\n\t\t\tif not isinstance(subRaster, jawt.Rectangle):\n\t\t\t\tsubRaster = jawt.Rectangle(subRaster[0], subRaster[1], subRaster[2] - subRaster[0] - 1, subRaster[3] - subRaster[1] - 1)\n\t\t\tci.setSubRaster(subRaster)\n\t\tasi.setDesiredFrameCount(frameCount)\n\t\toutPath = \"%s/%s\" % ((path if path else defaultPath), name)\n\t\tjio.File(outPath).mkdirs()\n\t\tfos = jio.FileOutputStream(jio.File(outPath, \"map.ptx\"))\n\t\ttry:\n\t\t\tif vectorSet and isinstance(vectorSet, epq.VectorSet):\n\t\t\t\tasi.setVectorSet(vectorSet)\n\t\t\tasi.start(fos)\n\t\t\tid = 0\n\t\t\twhile True:\n\t\t\t\tif terminated:\n\t\t\t\t\tasi.cleanup()\n\t\t\t\t\tbreak\n\t\t\t\tmsg = sem.take()\n\t\t\t\tif isinstance(msg, semss.DataItems.ImageDatum):\n\t\t\t\t\twrite(msg, \"Image\", outPath)\n\t\t\t\t\tid = id + 1\n\t\t\t\telif isinstance(msg, semss.DataItems.SpectrumDatum):\n\t\t\t\t\twrite(msg, \"Spectrum\", outPath, fmt=\"msa\")\n\t\t\t\telif isinstance(msg, semss.DataItems.DoneDatum):\n\t\t\t\t\tbreak\n\t\tfinally:\n\t\t\tfos.close()\n\t\t_ts.chamberLed(defLED)\n\t\tlogImage((path if path else defaultPath), name, \"SI\", fov, dims, dwell, sem.beamEnergy, frameCount)\n\t\treport(\"<p>Collected SI <i>%s</i> %0.1f μm FOV %d × %d with %d frames at dwell %d </p>\" % (name, 1000.0*fov, dims[0], dims[1], frameCount, dwell ))\n\t\tif off:\n\t\t\tturnOff()",
"def _resample(self, data, labels):\n X_train, y_train = ADASYN(n_jobs=16).fit_resample(data, labels)\n return X_train, y_train",
"def build_synthetic_dataset(self):\n pass",
"def under_sampling(self, _X, _y):\n rus = RandomUnderSampler()\n return rus.fit_sample(_X, _y)",
"def _add_tfidf_lsi(self, data, istest):\n\n print('Vectorize with TFIDF-LSI...')\n if not istest:\n self.__vectorizer = TfidfVectorizer()\n x = self.__vectorizer.fit_transform(data['text'])\n self.__svd_model = TruncatedSVD(n_components=500,\n algorithm='randomized',\n n_iter=10,\n random_state=SEED)\n x = self.__svd_model.fit_transform(x)\n # Save the feature names\n words = self.__vectorizer.get_feature_names()\n self.__feature_names = [\n '+'.join([f'{coef:.1f}{word}' for coef, word in zip(component, words)])\n for component in self.__svd_model.components_]\n else:\n # Reuse the training representation\n x = self.__vectorizer.transform(data['text'])\n x = self.__svd_model.transform(x)\n tfidf_features = pd.DataFrame(x, columns=self.__feature_names) \\\n .reset_index(drop=True)\n data = pd.concat([data, tfidf_features], axis=1)\n # Need to return the new df since pd.concat is not an inplace method\n return data",
"def si_snr_loss(y_pred_batch, y_true_batch, lens, reduction=\"mean\"):\n\n y_pred_batch = torch.squeeze(y_pred_batch, dim=-1)\n y_true_batch = torch.squeeze(y_true_batch, dim=-1)\n\n batch_size = y_pred_batch.shape[0]\n SI_SNR = torch.zeros(batch_size)\n\n for i in range(0, batch_size): # Run over mini-batches\n s_target = y_true_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]\n s_estimate = y_pred_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]\n\n # s_target = <s', s>s / ||s||^2\n dot = torch.sum(s_estimate * s_target, dim=0, keepdim=True)\n s_target_energy = (\n torch.sum(s_target ** 2, dim=0, keepdim=True) + smallVal\n )\n proj = dot * s_target / s_target_energy\n\n # e_noise = s' - s_target\n e_noise = s_estimate - proj\n\n # SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)\n si_snr_beforelog = torch.sum(proj ** 2, dim=0) / (\n torch.sum(e_noise ** 2, dim=0) + smallVal\n )\n SI_SNR[i] = 10 * torch.log10(si_snr_beforelog + smallVal)\n\n if reduction == \"mean\":\n return -SI_SNR.mean()\n\n return -SI_SNR",
"def regress_mean_sic_indicies(self):\n\t\tself.mean_regressions = {}\n\t\tself.mean_pvalue = {}\n\t\tself.mean_rvalue = {}\n\t\tself.mean_b = {}\n\t\tself.mean_std_err = {}\n\n\t\tfor index in self.indicies:\n\t\t\tsic = self.seaice_data.copy()\n\t\t\tind = self.index_data[index].copy()\n\n\t\t\ttimes = list(set(set(sic.time.values) & set(ind.time.values)))\n\t\t\tsic = sic.sel(time=times)\n\t\t\tif self.seaice_source == 'nsidc':\n\t\t\t\tsic = seaice_area_mean(sic,1)\n\t\t\tif self.seaice_source == 'ecmwf':\n\t\t\t\tsic = sic.mean(dim = ('longitude','latitude'))\n\t\t\tind = ind.sel(time=times)\n\n\t\t\tsic = sic.sortby(sic.time)\n\t\t\tind = ind.sortby(ind.time)\n\n\t\t\tm, b, r_value, p_value, std_err = scipy.stats.linregress(ind, sic)\n\t\t\tself.mean_regressions[index] = m\n\t\t\tself.mean_pvalue[index] = p_value\n\t\t\tself.mean_rvalue[index] = r_value\n\t\t\tself.mean_b[index] = b\n\t\t\tself.mean_std_err[index] = std_err"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write Recaman's sequence to a text file.
|
def write_sequence(filename, num):
with open(filename, mode='wt', encoding='utf-8') as f:
f.writelines(f"{r}\n"
for r in islice(sequence(), num + 1))
|
[
"def write_fasta( filename, seq_name, sequence ):\n with FastaWriter( filename ) as writer:\n record = FastaRecord( seq_name, sequence )\n writer.writeRecord( record )",
"def write_genome_sequence_toFile(genomeId, genomeSequence = \"\"):\n\ttext_file = open(str(genomeId)+\".txt\", \"w\")\n\tif genomeSequence == \"\":\n\t\t genomeSequence = DataGenerator.getGenomeSequence(str(genomeId))\n\t\t genomeSequence = genomeSequence.replace('\\n','')\n\n\ttext_file.write(genomeSequence)\n\ttext_file.close()",
"def write_seq(filename, num):\n f = open(filename,mode = \"wt\", encoding=\"utf-8\")\n f.writelines(\"{0}\\n\".format(numb) \n for numb in islice(fibonacci(),num))\n f.close()",
"def saveSeqToRandomFile3(sequences ,ids):\n rf = getRandomFileName()\n O = open(rf, 'w')\n for (s, i) in zip(sequences, ids):\n O.write(\">\"+i+\"\\n\")\n O.write(string.strip(s))\n O.write(\"\\n\")\n O.close()\n return rf",
"def smiles_seq_to_textfile(self, property_seq=None):\n text_fpath = \"temp_smiles_seq.txt\"\n print(f\"Creating text file {text_fpath}\")\n with open(text_fpath, \"w\") as fp:\n for id, smiles in enumerate(self.test_smiles):\n write_txt = smiles\n if property_seq is not None:\n write_txt += \" \" + str(property_seq[id])\n if id < len(self.test_smiles) - 1:\n write_txt += \"\\n\"\n\n fp.write(write_txt)\n return text_fpath",
"def write_fasta(sequences, f):\n for name, sequence in sequences:\n f.write(\">%s\\n\"%name)\n f.write('\\n'.join(['\\n'.join(wrap(block, width=80)) for block in sequence.splitlines()]))",
"def write_end_sequence(fname, seqid):\n if fname is None:\n print(seqid)\n else:\n with open(fname, 'w') as fd:\n fd.write(str(seqid))",
"def write_output(Count_trigram, Count_bigram, input_file, output_name):\n output_file = file(output_name, \"w\")\n input_file.seek(0)\n l = input_file.readline()\n while l:\n line = l.strip()\n fields = line.split(\" \")\n assert len(fields)==3\n log_pr = cal_trigram_param(Count_trigram, Count_bigram, fields) # Calculate using naive estimator.\n l = line + \" \" + str(log_pr) + \"\\n\"\n output_file.write(l)\n l = input_file.readline()\n output_file.close()",
"def transcribe_DNA (filename):\n\n # open the file with the data\n data = open(filename, \"r\")\n\n # extract the info inside as a string\n dataset = data.read()\n\n RNA_dataset = dataset.replace('T', 'U')\n\n newfile = \"/Users/timothyroy/Documents/Rosalind/Problem 2/rosalind_rna2.txt\"\n RNA_file = open(newfile, \"w\")\n\n RNA_file.write(RNA_dataset)\n RNA_file.close()\n\n print \"try it now\"",
"def save_rr_file(filename, probs, domain, sequence,\n method='dm-contacts-resnet'):\n assert len(sequence) == probs.shape[0]\n assert len(sequence) == probs.shape[1]\n with tf.io.gfile.GFile(filename, 'w') as f:\n f.write(RR_FORMAT.format(domain, method, sequence))\n for i in range(probs.shape[0]):\n for j in range(i + 1, probs.shape[1]):\n f.write('{:d} {:d} {:d} {:d} {:f}\\n'.format(\n i + 1, j + 1, 0, 8, probs[j, i]))\n f.write('END\\n')",
"def write(text):",
"def write_fasta(sequence,PDB_file):\r\n\r\n\t#TODO : implement the writing of a fasta file from the sequence obtained from the PDB file.\r\n\r\n\t#return the name of the file.\r\n\tname = PDB_file.split('.')\r\n\t# fp = open(\"FASTAs/\" + 'all.fasta', \"a\")\r\n\t# # # print(type(sequence))\r\n\t# fp.write(\">\" + name[0] + \"\\n\" + str(sequence) + \"\\n\")\r\n\t# fp.close()\r\n\treturn str(name[0])\r\n\t# return \"FASTAs/\" + str(name[0]) + '.fasta'\r",
"def write_currfile(self, text, gen=0):\n\n with open(self.currpath, 'w') as currfile:\n currfile.write('{}\\nGeneration: {}\\n{}'.format(str(datetime.now())[:16],gen, text))\n currfile.flush()",
"def the_end(madlib):\n print(madlib)\n with open('madlib.txt', 'w') as fh:\n fh.write(madlib)",
"def save_as_txt(self, filename, output_dir=os.getcwd()):\n output_dir = os.path.abspath(output_dir)\n for key, val in self.generate_filenames(filename).iteritems():\n tmp = izip(self.data[key].x_list, self.data[key].y_list)\n saved = os.path.join(output_dir, val)\n with open(saved, 'w') as f:\n f.write('\\n'.join('%s %s' % x for x in tmp))\n logger.info('Saving correlator to {} as txt'.format(filename))",
"def write_to_file(phones:list, txt_path:str):\n with open(txt_path, 'w') as fid:\n phone_str = \" \".join(phones) \n fid.write(phone_str)",
"def to_fasta(self, filename):\n with open(filename, \"w\") as out_handle:\n self.to_fasta_file(out_handle)",
"def convert_file(in_file, out_file):\n sequences = SeqIO.parse(in_file, \"genbank\")\n g = open(out_file, \"w\")\n SeqIO.write(sequences, out_file, \"fasta\")",
"def write(self, file, experiment):\n self._write_generators(file)\n file.write('\\n')\n file.write('main =\\n')\n file.write(' do putStrLn \"Loading SVG fonts...\"\\n')\n file.write(' fonts <- loadCommonFonts\\n')\n file.write(' putStrLn \"Loaded.\"\\n')\n file.write(' putStrLn \"Started running the simulation and saving the results...\"\\n')\n file.write(' let renderer = DiagramsRenderer SVG (return fonts)\\n')\n file.write(' path = WritableFilePath ' + encode_str(experiment.get_path()) + '\\n')\n file.write(' runExperimentParallel experiment generators (WebPageRenderer renderer path) model\\n')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load RabbitMQ config from VOLTTRON_HOME
|
def load_rmq_config(self, volttron_home=None):
"""Loads the config file if the path exists."""
with open(self.volttron_rmq_config, 'r') as yaml_file:
self.config_opts = yaml.safe_load(yaml_file)
if self.config_opts.get('rmq-home'):
self.config_opts['rmq-home'] = os.path.expanduser(
self.config_opts['rmq-home'])
|
[
"def read_galaxy_amqp_config(galaxy_config, base_dir):\n galaxy_config = add_full_path(galaxy_config, base_dir)\n config = ConfigParser.ConfigParser()\n config.read(galaxy_config)\n amqp_config = {}\n for option in config.options(\"galaxy_amqp\"):\n amqp_config[option] = config.get(\"galaxy_amqp\", option)\n\n return amqp_config",
"def load_config():\n return config.load_config({})",
"def main():\n RabbitMQVhost()",
"def _kombu_configuration(conf):\n cfg_keys = ('max_retries',\n 'interval_start',\n 'interval_step',\n 'interval_max')\n return {k: getattr(conf.CONF.rabbit, k) for k in cfg_keys}",
"def load_config():\n f = open(os.path.expanduser(\"~/.baas/python_test_config.yaml\"), 'r')\n config = yaml.load(f)\n f.close()\n return config",
"def read_config(self):\n path = pathlib.Path.home() / '.config' / 'ccmk' / 'config.yaml'\n if not path.exists():\n msg = (f\"The configuration file storing your checkmk credentials \"\n f\"- {path} - does not exist. Please create it.\")\n raise RuntimeError(msg)\n with path.open(mode='rt') as f:\n self.config = yaml.load(f)",
"def get_rabbit_connection(config_path, include_password):\n vhost = CONFIG.broker.vhost\n LOG.debug(f\"Broker: vhost = {vhost}\")\n\n username = CONFIG.broker.username\n LOG.debug(f\"Broker: username = {username}\")\n\n server = CONFIG.broker.server\n LOG.debug(f\"Broker: server = {server}\")\n\n try:\n password_filepath = CONFIG.broker.password\n LOG.debug(f\"Broker password filepath = {password_filepath}\")\n password_filepath = os.path.abspath(expanduser(password_filepath))\n except KeyError:\n raise ValueError(\"No password provided for RabbitMQ\")\n\n try:\n password = read_file(password_filepath)\n except IOError:\n raise ValueError(f\"RabbitMQ password file {password_filepath} does not exist\")\n\n # Test configurations.\n rabbitmq_config = {\n \"vhost\": vhost,\n \"username\": username,\n \"password\": \"******\",\n \"server\": server,\n }\n\n if include_password:\n rabbitmq_config[\"password\"] = password\n\n return RABBITMQ_CONNECTION.format(**rabbitmq_config)",
"def _start_rabbitmq_without_ssl(rmq_config, conf_file, env=None):\n if not rmq_config.volttron_home:\n rmq_config.volttron_home = get_home()\n\n rmq_home = rmq_config.rmq_home\n if not rmq_home:\n rmq_home = os.path.join(os.path.expanduser(\"~\"),\n \"rabbitmq_server/rabbitmq_server-3.7.7\")\n if os.path.exists(rmq_home):\n os.environ['RABBITMQ_HOME'] = rmq_home\n else:\n print(\"\\nERROR:\\n\"\n \"Missing key 'rmq_home' in RabbitMQ config and RabbitMQ is \"\n \"not installed in default path: \\n\"\n \"~/rabbitmq_server/rabbitmq_server-3.7.7 \\n\"\n \"Please set the correct RabbitMQ installation path in \"\n \"rabbitmq_config.yml\")\n exit(1)\n else:\n if not os.path.exists(rmq_home) or not os.path.exists(os.path.join(\n rmq_home, 'sbin/rabbitmq-server')):\n print(\"\\nERROR:\\n\"\n \"Invalid rmq-home value ({}). Please fix rmq-home \"\n \"in {} and rerun this script\".format(\n rmq_home, rmq_config.volttron_rmq_config))\n exit(1)\n else:\n os.environ['RABBITMQ_HOME'] = rmq_home\n\n # attempt to stop\n stop_rabbit(rmq_home, env, quite=True)\n\n if rmq_config.amqp_port != 5672 and rmq_config.mgmt_port != 15672:\n # If ports if non ssl ports are not default write a rabbitmq.conf before\n # restarting\n new_conf = \"\"\"listeners.tcp.default = {}\nmanagement.listener.port = {}\"\"\".format(rmq_config.amqp_port, rmq_config.mgmt_port)\n\n with open(conf_file, 'w+') as r_conf:\n r_conf.write(new_conf)\n\n # Need to write env file even when starting without ssl mode since env file will provide the right node name,\n # tcp port and conf file to use. This is essential for tests as we don't use default port, paths or node name.\n # TODO - we should probably not use default node name even for non test use case to avoid node name class when\n # you have more than one instance of RMQ on the same machine\n write_env_file(rmq_config, conf_file, env)\n\n # Start RabbitMQ server\n _log.info(\"Starting RabbitMQ server\")\n start_rabbit(rmq_config.rmq_home, env=env)",
"def setup_rabbitmq_volttron(setup_type, verbose=False, prompt=False, instance_name=None,\n rmq_conf_file=None, env=None):\n if not instance_name:\n instance_name = get_platform_instance_name(prompt=True)\n # Store config this is checked at startup\n store_message_bus_config(message_bus='rmq', instance_name=instance_name)\n\n rmq_config = RMQConfig()\n if verbose:\n _log.setLevel(logging.DEBUG)\n _log.debug(\"verbose set to True\")\n _log.debug(get_home())\n logging.getLogger(\"requests.packages.urllib3.connectionpool\"\n \"\").setLevel(logging.DEBUG)\n else:\n _log.setLevel(logging.INFO)\n logging.getLogger(\"requests.packages.urllib3.connectionpool\"\n \"\").setLevel(logging.WARN)\n\n if prompt:\n # ignore any existing rabbitmq_config.yml in vhome. Prompt user and\n # generate a new rabbitmq_config.yml\n _create_rabbitmq_config(rmq_config, setup_type)\n\n # Load either the newly created config or config passed\n try:\n rmq_config.load_rmq_config()\n\n except (yaml.parser.ParserError, yaml.scanner.ScannerError, yaml.YAMLError) as exc:\n _log.error(\"Error: YAML file cannot parsed properly. Check the contents of the file\")\n return exc\n\n except IOError as exc:\n _log.error(\"Error opening {}. Please create a rabbitmq_config.yml \"\n \"file in your volttron home. If you want to point to a \"\n \"volttron home other than {} please set it as the \"\n \"environment variable VOLTTRON_HOME\".format(\n rmq_config.volttron_rmq_config, rmq_config.volttron_home))\n _log.error(\"\\nFor single setup, configuration file must at least \"\n \"contain host and ssl certificate details. For federation \"\n \"and shovel setup, config should contain details about the \"\n \"volttron instance with which communication needs \"\n \"to be established. Please refer to example config file \"\n \"at examples/configurations/rabbitmq/rabbitmq_config.yml\")\n raise\n\n if not rmq_conf_file:\n rmq_conf_file = os.path.join(rmq_config.rmq_home, \"etc/rabbitmq/rabbitmq.conf\")\n\n invalid = True\n if setup_type in [\"all\", \"single\"]:\n invalid = False\n # Verify that the rmq_conf_file if exists is removed before continuing.\n message = f\"A rabbitmq conf file {rmq_conf_file} already exists.\\n\" \\\n \"In order for setup to proceed it must be removed.\\n\"\n if os.path.exists(rmq_conf_file):\n print(message)\n while os.path.exists(rmq_conf_file):\n value = prompt_response(f\"Remove {rmq_conf_file}? \", y_or_n)\n if value in y:\n os.remove(rmq_conf_file)\n\n _start_rabbitmq_without_ssl(rmq_config, rmq_conf_file, env=env)\n _log.debug(\"Creating rabbitmq virtual hosts and required users for \"\n \"volttron\")\n # Create local RabbitMQ setup - vhost, exchange etc.\n # should be called after _start_rabbitmq_without_ssl\n rmq_mgmt = RabbitMQMgmt()\n success = rmq_mgmt.init_rabbitmq_setup()\n if success and rmq_config.is_ssl:\n _setup_for_ssl_auth(rmq_config, rmq_conf_file, env=env)\n\n # Create utility scripts\n script_path = os.path.dirname(os.path.realpath(__file__))\n src_home = os.path.dirname(os.path.dirname(script_path))\n start_script = os.path.join(src_home, 'start-rabbitmq')\n with open(start_script, 'w+') as f:\n f.write(os.path.join(rmq_config.rmq_home, 'sbin',\n 'rabbitmq-server') + ' -detached')\n f.write(os.linesep)\n f.write(\"sleep 5\") # give a few seconds for all plugins to be ready\n os.chmod(start_script, 0o755)\n\n stop_script = os.path.join(src_home, 'stop-rabbitmq')\n with open(stop_script, 'w+') as f:\n f.write(os.path.join(rmq_config.rmq_home, 'sbin',\n 'rabbitmqctl') + ' stop')\n os.chmod(stop_script, 0o755)\n\n # symlink to rmq log\n log_name = os.path.join(src_home, 'rabbitmq.log')\n if os.path.lexists(log_name):\n os.unlink(log_name)\n\n os.symlink(os.path.join(rmq_config.rmq_home,\n 'var/log/rabbitmq',\n rmq_config.node_name + \"@\" +\n rmq_config.hostname.split('.')[0] + \".log\"),\n log_name)\n\n if setup_type in [\"all\", \"federation\"]:\n # Create a multi-platform federation setup\n invalid = False\n _create_federation_setup(rmq_config.admin_user,\n rmq_config.admin_pwd,\n rmq_config.is_ssl,\n rmq_config.virtual_host,\n rmq_config.volttron_home)\n if setup_type in [\"all\", \"shovel\"]:\n # Create shovel setup\n invalid = False\n if rmq_config.is_ssl:\n port = rmq_config.amqp_port_ssl\n else:\n port = rmq_config.amqp_port\n _create_shovel_setup(rmq_config.instance_name,\n rmq_config.hostname,\n port,\n rmq_config.virtual_host,\n rmq_config.volttron_home,\n rmq_config.is_ssl)\n if invalid:\n _log.error(\"Unknown option. Exiting....\")",
"def load(self):\n self.config.read(\"config.py\")\n pass",
"def loadConfig(self):\n pass",
"def load_config(msg):\n import ujson as json\n try:\n config = json.loads(msg)\n except (OSError, ValueError):\n print(\"Couldn't load config from JSON.\")\n else:\n set_relay(config['power'])",
"def load_config(self):\n # Open the file at default lcoation, unless something else\n # is passed in instead\n self.logger.info('Running load_config() for HerdClient')\n if self.config is not None:\n self.logger.debug(\"There's a config file passed in\")\n f = file(self.config)\n self.cfg = Config(f)\n \n # Allow parameters passed on the command line to override the\n # config file\n if self.seed is None:\n self.logger.debug(\"There's no seed passed in\")\n self.seed = self.cfg.management.seed",
"def __setup_kombu_queue(self, config):\n configs = config[u'config']\n for item in configs:\n if item[u'group'] == u'queue':\n value = item[u'value']\n queue = value[u'queue']\n uri = value[u'uri']\n manager = RedisManager(uri)\n manager.server.set(u'_kombu.binding.%s' % queue, value)",
"def load_config(self):\n with open(self.IQRF_MQ_CONFIG, 'r') as f:\n cfg = json.load(f)\n cfg = cfg['Instances']\n for i in cfg: # iterate instances\n if i['Name'] == 'MqMessaging' and i['Enabled'] is True:\n return (i['Properties']['LocalMqName'],\n i['Properties']['RemoteMqName'])\n return (None, None)",
"def _setup_for_ssl_auth(rmq_config, rmq_conf_file, env=None):\n _log.info('\\nChecking for CA certificate\\n')\n root_ca_name, server_name, admin_client_name = \\\n certs.Certs.get_admin_cert_names(rmq_config.instance_name)\n vhome = get_home()\n white_list_dir = os.path.join(vhome, \"certificates\", \"whitelist\")\n if not os.path.exists(white_list_dir):\n os.mkdir(white_list_dir)\n\n _create_certs(rmq_config, admin_client_name, server_name)\n\n # if all was well, create the rabbitmq.conf file for user to copy\n # /etc/rabbitmq and update VOLTTRON_HOME/rabbitmq_config.json\n new_conf = \"\"\"listeners.tcp.default = {tcp_port}\nmanagement.listener.port = {mgmt_port}\nlisteners.ssl.default = {amqp_port_ssl}\nssl_options.cacertfile = {ca}\nssl_options.certfile = {server_cert}\nssl_options.keyfile = {server_key}\nssl_options.verify = verify_peer\nssl_options.fail_if_no_peer_cert = true\nauth_mechanisms.1 = EXTERNAL\nssl_cert_login_from = common_name\nssl_options.versions.1 = tlsv1.2\nssl_options.versions.2 = tlsv1.1\nssl_options.versions.3 = tlsv1\nmanagement.listener.port = {mgmt_port_ssl}\nmanagement.listener.ssl = true\nmanagement.listener.ssl_opts.cacertfile = {ca}\nmanagement.listener.ssl_opts.certfile = {server_cert}\nmanagement.listener.ssl_opts.keyfile = {server_key}\ntrust_store.directory={ca_dir}\ntrust_store.refresh_interval=0\"\"\".format(\n tcp_port=rmq_config.amqp_port,\n mgmt_port=rmq_config.mgmt_port,\n mgmt_port_ssl=rmq_config.mgmt_port_ssl,\n amqp_port_ssl=rmq_config.amqp_port_ssl,\n ca=rmq_config.crts.cert_file(rmq_config.crts.trusted_ca_name),\n server_cert=rmq_config.crts.cert_file(server_name),\n server_key=rmq_config.crts.private_key_file(server_name),\n ca_dir=white_list_dir\n )\n\n with open(rmq_conf_file, 'w') as rconf:\n rconf.write(new_conf)\n\n write_env_file(rmq_config, rmq_conf_file, env)\n\n # Stop server, move new config file with ssl params, start server\n stop_rabbit(rmq_config.rmq_home, env=env)\n\n start_rabbit(rmq_config.rmq_home, env=env)\n\n default_vhome = os.path.abspath(\n os.path.normpath(\n os.path.expanduser(\n os.path.expandvars('~/.volttron'))))\n\n additional_to_do = \"\"\n if vhome != default_vhome:\n additional_to_do = \"\\n - Please set environment variable \" \\\n \"VOLTTRON_HOME \" \\\n \"to {vhome} before starting volttron\"\n\n msg = \"\\n\\n#######################\\n\\nSetup complete for volttron home \" \\\n \"{vhome} \" \\\n \"with instance name={}\\nNotes:\" + additional_to_do + \\\n \"\\n - On production environments, restrict write access to {\" \\\n \"root_ca} to only admin user. For example: \" \\\n \"sudo chown root {root_ca} and {trusted_ca}\" \\\n \"\\n - A new admin user was created with user name: {} and \" \\\n \"password={}.\\n You could change this user's password by logging \" \\\n \"into https://{}:{}/ Please update {} if you change password\" \\\n \"\\n\\n#######################\"\n _log.info(msg.format(rmq_config.instance_name,\n rmq_config.admin_user,\n rmq_config.admin_pwd,\n rmq_config.hostname,\n rmq_config.mgmt_port_ssl,\n rmq_config.volttron_rmq_config,\n root_ca=rmq_config.crts.cert_file(\n rmq_config.crts.root_ca_name),\n trusted_ca=rmq_config.crts.cert_file(\n rmq_config.crts.trusted_ca_name),\n vhome=vhome))",
"def write_rmq_config(self, volttron_home=None):\n try:\n with open(self.volttron_rmq_config, 'w') as \\\n yaml_file:\n yaml.dump(self.config_opts, yaml_file, default_flow_style=False)\n # Explicitly give read access to group and others. RMQ user and\n # agents should be able to read this config file\n os.chmod(self.volttron_rmq_config, 0o744)\n except IOError as exc:\n _log.error(\"Error writing to rabbitmq_config.yml file. Please\"\n \"check VOLTTRON_HOME\".format(self.volttron_home))\n except yaml.YAMLError as exc:\n raise",
"def _load_configurations(self):\n with open(self.config_file) as f:\n configs = f.read()\n config = ConfigParser.RawConfigParser(allow_no_value=True)\n config.readfp(io.BytesIO(configs))\n self.config = config\n #\n self.cert_file = self.config.get(\"cert-paths\", \"cert_file\")",
"def init_app(self, app):\n # Instantiate celery and read config\n super(Celery, self).__init__(app.name,\n broker=app.config['CELERY_BROKER_URL'])\n # Update the config\n self.conf.update(app.config)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write new config options into $VOLTTRON_HOME/rabbitmq_config.yml
|
def write_rmq_config(self, volttron_home=None):
try:
with open(self.volttron_rmq_config, 'w') as \
yaml_file:
yaml.dump(self.config_opts, yaml_file, default_flow_style=False)
# Explicitly give read access to group and others. RMQ user and
# agents should be able to read this config file
os.chmod(self.volttron_rmq_config, 0o744)
except IOError as exc:
_log.error("Error writing to rabbitmq_config.yml file. Please"
"check VOLTTRON_HOME".format(self.volttron_home))
except yaml.YAMLError as exc:
raise
|
[
"def _create_rabbitmq_config(rmq_config, setup_type):\n\n if setup_type == 'single' or setup_type == 'all':\n if os.path.exists(rmq_config.volttron_rmq_config):\n prompt = \"rabbitmq_config.yml exists in {} Do you wish to \" \\\n \"use this file to configure the instance\".format(\n get_home())\n prompt = prompt_response(prompt,\n valid_answers=y_or_n,\n default='Y')\n if prompt in y:\n return\n else:\n _log.info(\"New input data will be used to overwrite existing \"\n \"{}\".format(rmq_config.volttron_rmq_config))\n # TODO: ideally we can load existing file and set values in it\n # default and the compare what changed. If rmq-home changed\n # and existing config those should get cleared. If cert details\n # get changed - overwrite ca, server, admin cert and delete all\n # other certs.\n\n rmq_config.rmq_home = _prompt_rmq_home(rmq_config.rabbitmq_server)\n\n prompt = 'Fully qualified domain name of the system:'\n new_host = prompt_response(prompt, default=getfqdn())\n rmq_config.hostname = new_host\n\n rmq_config.is_ssl = True\n\n if rmq_config.is_ssl:\n prompt = \"Would you like to create a new self signed root CA\" \\\n \"certificate for this instance:\"\n prompt = prompt_response(prompt,\n valid_answers=y_or_n,\n default='Y')\n if prompt in y:\n cert_data = {}\n print(\n \"\\nPlease enter the following details for root CA certificate\")\n prompt = '\\tCountry:'\n cert_data['country'] = prompt_response(prompt, default='US')\n prompt = '\\tState:'\n cert_data['state'] = prompt_response(prompt, mandatory=True)\n prompt = '\\tLocation:'\n cert_data['location'] = prompt_response(prompt, mandatory=True)\n prompt = '\\tOrganization:'\n cert_data['organization'] = prompt_response(prompt, mandatory=True)\n prompt = '\\tOrganization Unit:'\n cert_data['organization-unit'] = prompt_response(prompt,\n mandatory=True)\n cert_data['common-name'] = rmq_config.instance_name + '-root-ca'\n rmq_config.certificate_data = cert_data\n else:\n error = True\n while error:\n while True:\n prompt = 'Enter the root CA certificate public key file:'\n root_public = prompt_response(prompt, mandatory=True)\n if is_file_readable(root_public):\n break\n while True:\n prompt =\\\n 'Enter the root CA certificate private key file:'\n root_key = prompt_response(prompt, mandatory=True)\n if is_file_readable(root_key):\n break\n if certs.Certs.validate_key_pair(root_public, root_key):\n error = False\n cert_data = {\n 'ca-public-key': root_public,\n 'ca-private-key': root_key\n }\n rmq_config.certificate_data = cert_data\n else:\n print(\"Error: Given public key and private key do not \"\n \"match or is invalid. public and private key \"\n \"files should be PEM encoded and private key \"\n \"should use RSA encryption\")\n\n prompt = \"Do you want to use default values for RabbitMQ home, \" \\\n \"ports, and virtual host:\"\n prompt = prompt_response(prompt,\n valid_answers=y_or_n,\n default='Y')\n if prompt in y:\n rmq_config.amqp_port = '5672'\n rmq_config.mgmt_port = '15672'\n rmq_config.amqp_port_ssl = '5671'\n rmq_config.mgmt_port_ssl = '15671'\n rmq_config.virtual_host = 'volttron'\n else:\n rmq_config.virtual_host = _prompt_vhost(rmq_config.config_opts)\n\n prompt = 'AMQP port for RabbitMQ:'\n rmq_config.amqp_port = prompt_port(5672, prompt)\n\n prompt = 'http port for the RabbitMQ management plugin:'\n rmq_config.mgmt_port = prompt_port(15672, prompt)\n\n if rmq_config.is_ssl:\n prompt = 'AMQPS (SSL) port RabbitMQ address:'\n rmq_config.amqp_port_ssl = prompt_port(5671, prompt)\n\n prompt = 'https port for the RabbitMQ management plugin:'\n rmq_config.mgmt_port_ssl = prompt_port(15671, prompt)\n\n # Write the new config options back to config file\n rmq_config.write_rmq_config()\n if setup_type in ['federation', 'all']:\n # if option was all then config_opts would be not null\n # if this was called with just setup_type = federation, load existing\n # config so that we don't overwrite existing federation configs\n prompt_upstream_servers(rmq_config.volttron_home)\n if setup_type in ['shovel', 'all']:\n # if option was all then config_opts would be not null\n # if this was called with just setup_type = shovel, load existing\n # config so that we don't overwrite existing list\n prompt_shovels(rmq_config.volttron_home)",
"def add_configuration_options(self, new_options):\r\n for k, v in new_options.items():\r\n self.builder.config.values[k] = v",
"def setConfiguration(options):",
"def config():\n update_config_cli()",
"def configure(db_url, port, root_url, default_timezone):\n\n from odmf.config import conf\n conf.database_url = db_url\n conf.server_port = port\n conf.root_url = root_url\n conf.datetime_default_timezone = default_timezone\n\n from pathlib import Path\n conf_file = Path('config.yml')\n conf.to_yaml(conf_file.open('w'))\n logger.info('config.yml written')",
"def writeConfig(hostname, config, write_dir):\n # Create file\n # Close file handle",
"def add_config(self, pvs: List[str]):\n message_buffer = self.converter.create_forwarder_configuration(pvs)\n self.producer.send(self.topic, message_buffer)",
"def write_example_email_config():\n config = ConfigParser()\n config.add_section(\"email_lib\")\n config.set(\"email_lib\", \"server\", \"smtp.example.com\")\n config.set(\"email_lib\", \"port\", \"587\")\n config.set(\"email_lib\", \"username\", \"USERNAME\")\n config.set(\"email_lib\", \"password\", \"PASSWORD\")\n with open(EMAIL_CONFIG_FILE, \"w\") as fh:\n config.write(fh)",
"def update_file():\n with open(CONFIG_PATH, \"w\") as configfile:\n config.write(configfile)",
"def _kombu_configuration(conf):\n cfg_keys = ('max_retries',\n 'interval_start',\n 'interval_step',\n 'interval_max')\n return {k: getattr(conf.CONF.rabbit, k) for k in cfg_keys}",
"def create_celery_file(self):\n rates = {queue.name: queue.rate for queue in self.queues.values()\n if isinstance(queue, PushQueue)}\n with open(get_celery_configuration_path(self._app_id), 'w') as config_file:\n json.dump(rates, config_file)",
"def set_config(newc: dict) -> None:\n\n c = get_config()\n c.update(newc)\n\n # Configurations are stored in the package installation folder.\n filename = os.path.join(os.path.dirname(__file__), 'config.json')\n\n with open(filename, 'w') as fp:\n json.dump(c, fp, indent=1)",
"def __create_config_file(self):\n try:\n self.logger.debug(f'Create a new config file here: {self.config_file}')\n self.__check_dir(self.config_file.parent)\n fh = self.config_file.open('w', encoding='utf-8')\n for k in self.defaults:\n fh.write(f'{k}={self.defaults[k]}\\n')\n fh.close()\n except (IOError, OSError):\n raise",
"def configure():\n keys = get_keys()\n actualconfig = getconf()\n newconf = {}\n click.echo(\"Modify configuration\")\n for k in keys:\n newconf[k[0]] = click.prompt(\n f\"{slightlybeautify(k[0])}\".capitalize(),\n default=actualconfig.get(k[0], None),\n type=k[1],\n )\n setconf(newconf)",
"def save_config(self):\n if VERBOSE:\n print(self.host, \"| Saving running configuration to startup config with 'wr mem' command\")\n self.connection.save_config()",
"def _write_config(self):\n config = self._build_config()\n if self.debug:\n print(\"Config: %s \" % bin(config))\n self._smbus.write_word_data(self.address, self.__REGISTER_CONFIG, config)",
"def test_writeConfig(self):\n results = yield self.runCommand(\n command_writeConfig,\n script=\"calendarserver_config\")\n\n self.assertEquals(results[\"result\"][\"EnableCalDAV\"], False)\n self.assertEquals(results[\"result\"][\"EnableCardDAV\"], False)\n self.assertEquals(results[\"result\"][\"EnableSSL\"], True)\n self.assertEquals(results[\"result\"][\"Notifications\"][\"Services\"][\"APNS\"][\"Enabled\"], True)\n hostName = \"hostname_%s_%s\" % (unichr(208), u\"\\ud83d\\udca3\")\n self.assertTrue(results[\"result\"][\"ServerHostName\"].endswith(hostName))\n\n # We tried to modify ServerRoot, but make sure our change did not take\n self.assertNotEquals(results[\"result\"][\"ServerRoot\"], \"/You/Shall/Not/Pass\")\n\n # The static plist should still have EnableCalDAV = True\n staticPlist = plistlib.readPlist(self.configFileName)\n self.assertTrue(staticPlist[\"EnableCalDAV\"])",
"def _plugin_about(self, plugin_dir, settings_about):\n config_filename = join(plugin_dir, 'config.cfg')\n config = SafeConfigParser()\n\n with open(config_filename) as fh:\n config.readfp(fh)\n\n for key, value in settings_about.items():\n config.set('help', key, value)\n\n with open(config_filename, 'w') as fh:\n config.write(fh)",
"def print_config(config):\n log.debug('options: \\n' + yaml.dump(config.__dict__, explicit_start=True, explicit_end=True,\n default_flow_style=False))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Download ZIP archive from repository release.
|
async def download_zip(repository, validate):
contents = []
try:
for release in repository.releases.objects:
repository.logger.info(
f"ref: {repository.ref} --- tag: {release.tag_name}"
)
if release.tag_name == repository.ref.split("/")[1]:
contents = release.assets
if not contents:
return validate
for content in contents:
filecontent = await async_download_file(
repository.hass, content.download_url
)
if filecontent is None:
validate.errors.append(f"[{content.name}] was not downloaded.")
continue
result = await async_save_file(
f"{tempfile.gettempdir()}/{repository.repository_manifest.filename}",
filecontent,
)
with zipfile.ZipFile(
f"{tempfile.gettempdir()}/{repository.repository_manifest.filename}",
"r",
) as zip_file:
zip_file.extractall(repository.content.path.local)
if result:
repository.logger.info(f"download of {content.name} complete")
continue
validate.errors.append(f"[{content.name}] was not downloaded.")
except Exception as exception: # pylint: disable=broad-except
validate.errors.append(f"Download was not complete [{exception}]")
return validate
|
[
"def _download( self ):\n self._system.download_file(\"https://github.com/mastbaum/avalanche/tarball/\" + self._tar_name)",
"def repo_downloads(repo, releases, tags):\n downloads = {}\n # for release in repo.iter_releases():\n # name = release.tag_name\n for vers, release in releases.items():\n download_url = None\n download_asset = None\n download_asset_name = repo.name + \".zip\"\n for asset in release.iter_assets():\n if asset.name == download_asset_name:\n download_asset = asset\n break\n\n if not download_asset:\n # Create download... this will take a while\n _log.warning('Generating new release download zip for %s:%s' % (repo.name, vers))\n zip_url = tags[vers].zipball_url\n temp_dir = tempfile.mkdtemp()\n try:\n zip_dlfile = os.path.join(temp_dir, download_asset_name)\n _log.warning('downloading')\n download(zip_url, zip_dlfile)\n if os.path.exists(zip_dlfile):\n _log.warning('extracting')\n # outdir = extract(zip_dlfile)\n outdir = os.path.splitext(zip_dlfile)[0]\n subprocess.check_output(['/usr/bin/unzip', zip_dlfile, '-d', outdir])\n contents = os.listdir(outdir)\n _log.warning('renaming')\n if len(contents) == 1 and os.path.isdir(os.path.join(outdir,contents[0])):\n innerdir = contents[0]\n newdir = os.path.join(outdir,innerdir)\n if innerdir != repo.name:\n os.rename(newdir, os.path.join(outdir,repo.name))\n outdir = os.path.join(outdir,repo.name)\n os.rename(zip_dlfile, zip_dlfile+\".dl\")\n _log.warning('zipping')\n zipdir(dirPath=outdir, zipFilePath=zip_dlfile, includeDirInZip=True, excludeDotFiles=True)\n\n if os.path.exists(zip_dlfile):\n with open(zip_dlfile, 'rb') as assetfile:\n _log.warning('uploading')\n download_asset = release.upload_asset(\n content_type='application/zip, application/octet-stream',\n name=download_asset_name,\n asset=assetfile)\n _log.warning('Finished new release download zip for %s:%s' % (repo.name, vers))\n except:\n _log.exception(\"zip_url: %s\"%zip_url)\n finally:\n remove_tree(temp_dir)\n\n if download_asset:\n download_url = download_asset.browser_download_url\n\n downloads[vers] = download_url\n return downloads",
"def Download(self, project_name, project_version):\n download_url = self.GetDownloadURL(project_name, project_version)\n if not download_url:\n logging.warning(u'Unable to determine download URL for: {0:s}'.format(\n project_name))\n return\n\n filename = self.DownloadFile(download_url)\n\n # github archive package filenames can be:\n # {project version}.tar.gz\n # release-{project version}.tar.gz\n # v{project version}.tar.gz\n github_archive_filenames = [\n u'{0!s}.tar.gz'.format(project_version),\n u'release-{0!s}.tar.gz'.format(project_version),\n u'v{0!s}.tar.gz'.format(project_version)]\n\n if filename in github_archive_filenames:\n # The desired source package filename is:\n # {project name}-{project version}.tar.gz\n package_filename = u'{0:s}-{1:s}.tar.gz'.format(\n project_name, project_version)\n\n if os.path.exists(package_filename):\n os.remove(package_filename)\n\n os.rename(filename, package_filename)\n filename = package_filename\n\n return filename",
"def download(edition, version, path=\".\"):\n archive_name = dist_archive_name(edition, version)\n uri = \"%s://%s/%s\" % (DIST_SCHEME, DIST_HOST, archive_name)\n filename = os.path.join(os.path.abspath(path), archive_name)\n _download(uri, filename)\n return filename",
"def download(self, package, version):\n logging.info('Searching for package archive %s-%s' % (package, version))\n archive_base_name = '%s-%s' % (package, version)\n extensions = ['.tar.gz', '.tar.bz2', '.zip']\n for index in self.settings['find_links']:\n for archive_name in [archive_base_name + ext for ext in extensions]:\n try:\n download(os.path.join(index, archive_name), archive_name, verbose=self.settings['verbosity'] >= 2)\n return os.path.abspath(archive_name)\n except urllib2.URLError:\n pass\n\n raise RequirementException('Failed to find package archive %s-%s' % (package, version))",
"def download():\n # Download the zip\n target = 'https://github.com/downloads/banterability/pluggablemaps-congressionaldistricts/cd99_110_shp.zip'\n destination = os.path.join(data_dir, 'cd99_110_shp.zip')\n urllib.urlretrieve(target, destination)\n # Unzip it\n fh = open(destination, 'rb')\n zfile = zipfile.ZipFile(fh)\n for name in zfile.namelist():\n path = os.path.join(data_dir, name)\n out = open(path, 'wb')\n out.write(zfile.read(name))\n out.close()\n fh.close()",
"def download(version, project):\n\n # parse out the version info\n major, minor, release = version_info(version)\n\n first_letter = project[0]\n numpy_download_uri = 'https://pypi.python.org/packages/source/%s/%s/' % (first_letter, project)\n\n local_gzip = '%s-%s.tar.gz' % (project, version)\n numpy_download_uri += local_gzip\n print numpy_download_uri\n download_file(numpy_download_uri, local_gzip)\n\n return os.path.abspath(local_gzip)",
"def download_build(source, user, passwd, last_date, filename, outdir):\n\n print \"Downloading build file: {}\".format(filename)\n url = source + last_date + '/' + filename\n print \"Url: {}\".format(url)\n r = requests.get(url, stream=True, auth=(user, passwd))\n with open(outdir + '/' + filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=16384):\n if chunk:\n f.write(chunk)\n f.flush()",
"def test_archive_download(self):\n archive_obj = self._create_archive_obj()\n constructed_url = \"http://b/artifactory/api/archive/download/reponame/folder\"\n responses.add(\n responses.GET,\n constructed_url,\n status=200,\n json=self.dir_stat,\n )\n archive_obj.writeto(\"test.zip\")\n reference_params = {\"archiveType\": \"zip\", \"includeChecksumFiles\": \"True\"}\n # check that params were really added to the request\n self.assertDictEqual(responses.calls[1].request.params, reference_params)",
"def download(version: str = __version__, v_prefix: bool = True) -> bool:\r\n\r\n if v_prefix:\r\n version = \"v\" + version\r\n\r\n url, url_exists = Build.get_url(version)\r\n if not url_exists:\r\n return False\r\n\r\n if Build.BUILD_ROOT_DIR.exists():\r\n dir_util.remove_tree(str(Build.BUILD_ROOT_DIR.resolve()))\r\n print(\"Deleted old release.\")\r\n\r\n # Download the build.\r\n resp = get(url).content\r\n print(\"Downloaded the build.\")\r\n # Save the zip file.\r\n platform = system()\r\n filename = f\"{SYSTEM_TO_RELEASE[platform]}\"\r\n if platform == \"Windows\":\r\n filename += \".zip\"\r\n else:\r\n filename += \".tar.gz\"\r\n zip_path = Path().home().joinpath(filename)\r\n zip_path.write_bytes(resp)\r\n print(\"Saved the file.\")\r\n\r\n dst = str(Build.BUILD_ROOT_DIR.resolve())\r\n # Extract the zip file.\r\n if platform == \"Windows\":\r\n with ZipFile(str(zip_path.resolve()), 'r') as zip_ref:\r\n zip_ref.extractall(dst)\r\n else:\r\n tar = tarfile.open(str(zip_path.resolve()))\r\n tar.extractall(dst)\r\n tar.close()\r\n # Run this to fixed \"Damaged App\" errors.\r\n # Source: https://www.google.com/search?client=firefox-b-1-d&q=unity+damaged+app\r\n if platform == \"Darwin\":\r\n cwd = getcwd()\r\n chdir(str(Build.BUILD_ROOT_DIR.joinpath(\"TDW\").resolve()))\r\n call([\"xattr\", \"-r\", \"-d\", \"com.apple.quarantine\", \"TDW.app\"])\r\n chdir(cwd)\r\n print(f\"Extracted the file to: {dst}\")\r\n # Delete the zip file.\r\n zip_path.unlink()\r\n print(\"Deleted the download file.\")\r\n return True",
"def download_zip(ver=BGRID_VERSION, dataFolder='data'):\n\tif not os.path.exists(dataFolder):\n\t\tos.mkdir(dataFolder)\n\tif not os.path.exists(dataFolder + '/' + ver):\n\t\tos.mkdir(dataFolder + '/' + ver)\n\turllib.urlretrieve('http://thebiogrid.org/downloads/archives/Release%%20Archive/BIOGRID-%s/BIOGRID-ORGANISM-%s.tab2.zip' %(ver,ver),filename='%s/%s/BIOGRID-ORGANISM-%s.tab2.zip' %(dataFolder,ver,ver))",
"def download_and_uncompress_tarball(tarball_url, dataset_dir):",
"def download(artifact_id, version, group_id=None, repo=None, ext=None, classifier=None):\n if not group_id:\n group_id = DEFAULT_GROUP_ID\n if not repo:\n repo = 'releases'\n if not ext:\n [ext, classifier] = info(group_id, artifact_id, version)\n if 'SNAPSHOT' in version:\n repo = 'snapshots'\n try:\n url = NEXUS_URL + '/service/local/artifact/maven/content?r={repo}&g={group_id}&a={artifact_id}&v={version}&p={ext}'.format(\n **locals())\n if classifier:\n url += '&c={classifier}'.format(**locals())\n return requests.get(url)\n except urllib.error.HTTPError as e:\n return None",
"def download_release(download_file, release=None):\n if release is None:\n release = get_latest_release()\n url = 'http://viewvc.geneontology.org/viewvc/GO-SVN/ontology-releases/%s/go-basic.obo' % release\n #download_file = 'go-basic_%s.obo' % release\n misc.http_download(url, download_file)",
"def fetch(self, sha, download_path=\"/tmp\"):\n filename = \"{0}.tar.gz\".format(self.repo_name)\n local_filename = os.path.join(download_path, filename)\n download_url = self.get_archive_url(sha)\n\n LOGGER.info(\"Downloading to: %s\", local_filename)\n r = requests.get(download_url, stream=True)\n\n # Get the total size in bytes\n total_size = int(r.headers.get(\"content-length\", 0))\n\n with open(local_filename, 'wb') as f:\n for chunk in tqdm(r.iter_content(32 * 1024), total=total_size, unit=\"B\", unit_scale=True):\n if chunk:\n f.write(chunk)\n return local_filename",
"def descargar_repo (url, args, ruta):\n if args.verbose:\n print (\"Descargando repositiorio completo ...\")\n new_url, _= url.split(\"blob\")\n page=get(new_url)\n soup = BeautifulSoup(page.content, 'html.parser')\n for link in soup.find_all('a'):\n h = link.get('href')\n \n if \"zip\" in h:\n ruta_zip = f\"https://github.com/{h}\" \n _, user, repo, *_ = h.split (\"/\")\n \n *_, filename = ruta_zip.split (\"/\")\n \n fullname = f\"{ruta}/{user}_{repo}_{filename}\"\n \n r = get(ruta_zip)\n with open(fullname, \"wb\") as zip_file:\n zip_file.write(r.content)\n\n if args.verbose:\n print (f\"Repositorio {Fore.GREEN}{repo}{Fore.RESET} descargado en formato zip -> {fullname}\")",
"def download_data(self):\n res = requests.get(self.url, headers={'User-Agent': 'Mozilla 5.0'})\n soup = BeautifulSoup(res.text, 'html.parser')\n\n try:\n os.mkdir(self.folder)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n for link in soup.find_all('a', string=\"ZIP\"):\n name = link['href'].rsplit('/', 1)[-1]\n\n filename = os.path.join(self.folder, name)\n\n if os.path.isfile(filename):\n continue\n\n file_url = self.url + link['href']\n file = requests.get(file_url, headers={'User-Agent': 'Mozilla 5.0'})\n\n f = open(filename, 'wb')\n f.write(file.content)",
"def download_and_unzip_data(\n url = \"https://storage.googleapis.com/simpeg/bookpurnong/bookpurnong_inversion.tar.gz\"\n):\n # download the data\n downloads = Utils.download(url)\n\n # directory where the downloaded files are\n directory = downloads.split(\".\")[0]\n\n # unzip the tarfile\n tar = tarfile.open(downloads, \"r\")\n tar.extractall()\n tar.close()\n\n return downloads, directory",
"def download_extract(url, folder):\n r = requests.get(url)\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(folder)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert site to key. `state` is added to end of key.
|
def site_to_key(site, state=""):
if type(state) != str:
raise Exception("`state` must be a string.")
return ",".join([str(l) for l in site]) + state
|
[
"def generate_state_key(self, state, role):\n\n pass",
"def state_key():\n return jsonify(key=redis.get('state:key').decode('utf8'))",
"def create_state_id(self):\n for key, value in config.fips_dict.iteritems():\n if key == self.state.lower():\n state_num = value\n if state_num <=9:\n state_num = '0' + str(state_num)\n else:\n state_num = str(state_num)\n\n return 'st' + state_num",
"def get_key_id(self, code, state):\n return int(\"0x%s%s\"% (hex(code).replace('0x', ''),hex(state & 0xFE).replace('0x', '')),16)",
"def convert_key(self):\n key = \"\" # initiate\n for y in range(self.size):\n for x in range(self.size):\n key += str(self.board[x, y]) # read piece state {1/2/3}\n return key",
"def encode_plan_key(self, job_id, state):\n return \"%s@%s\" % (state, job_id)",
"def state2str(state: Union[dict, str]) -> str:\n\n if type(state) is str:\n return state\n\n return \"\".join([str(state[x]) for x in sorted(state)])",
"def key_to_sourcekey(self, key: str) -> str:\n ...",
"def sourcekey_to_key(self, sourcekey: str) -> str:\n ...",
"def short_state(state: str) -> str:\n return {\n \"idle in transaction\": \"idle in trans\",\n \"idle in transaction (aborted)\": \"idle in trans (a)\",\n }.get(state, state)",
"def encode(self, state: State) -> bytes:\n ...",
"def by_state():\n\n state = request.args.get(\"state\")\n\n # returns the site objects for the submitted state\n state_sites = Site.query.filter(Site.site_state==state).order_by('site_name').all()\n\n return jsonify(state_list=[i.serialize for i in state_sites])",
"def state_to_index(self, state):\n return self.inv_lookup_table.get(state)",
"def generateStateOCDID(state):\n ocdid = TURBOVOTE_BASEOCDID\n ocdid += TURBOVOTE_STATEOCDID\n ocdid += state.lower()\n\n return ocdid",
"def _save_new_state(uf, state):\n state = State(uf=uf.upper(), name=state)\n state.save()\n return state",
"def generate_state():\r\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\r\n for x in range(32))\r\n login_session['state'] = state",
"def getDeviceStateKey(self):\n state = self.treeSubdeviceState()\n devs = list(state.keys())\n devs.sort()\n return tuple([dev + \"__\" + state[dev] for dev in devs])",
"def normalize_state(state_val):\n if len(state_val) > 2:\n if state_val.capitalize() in STATE_MAP:\n return STATE_MAP[state_val.capitalize()]\n else:\n return state_val\n else:\n return state_val.upper()",
"def build_site_dictionary(page, site):\n headers, cookies, word_count = get_data_from(page)\n return {\n \"site_name\": site,\n \"headers\": headers,\n \"cookies\": cookies,\n \"word_count\": word_count}",
"def state_transform(state):\n if isinstance(state, str):\n return np.array([int(s) for s in state])\n else:\n return str(state)[1:-1].replace(' ', '')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List the possible epistatic coefficients (as label form) for a binary genotype up to a given order.
|
def genotype_coeffs(genotype, order=None):
if order is None:
order = len(genotype)
length = len(genotype)
mutations = [i + 1 for i in range(length) if genotype[i] == "1"]
params = [[0]]
for o in range(1, order + 1):
params += [list(z) for z in it.combinations(mutations, o)]
return params
|
[
"def CI(orders, numbers_of_states):\n\texcite_strings = []\n\tfor n in orders:\n\t\tif n>len(numbers_of_states): raise Exception(\"CI excitation order exceeds available number of molecules\")\n\t\texcite_strings += _n_tuple_substitutions(n, numbers_of_states)\n\treturn excite_strings",
"def get_order_str(self):\n values = []\n for value in self.order:\n if value == 1:\n values.append('S')\n elif value == 2:\n values.append('D')\n elif value == 3:\n values.append('T')\n elif value == 4:\n values.append('Q')\n elif value == 1.5:\n values.append('B')\n elif value == 0:\n values.append('vdW')\n elif value == 0.1:\n values.append('H')\n else:\n raise TypeError('Bond order number {} is not hardcoded as a string'.format(value))\n return values",
"def coefficients(polynomial):\n if not \"args\" in dir(polynomial):\n return [polynomial]\n if polynomial.args == ():\n return [polynomial]\n\n coeff_list = sorted(polynomial.args, key = extract_power)\n degree = extract_power(coeff_list[-1])\n\n pos = 0\n ret = []\n for d in range(0, degree + 1):\n if extract_power(coeff_list[pos]) == d:\n if d == 0:\n ret.append(RealMPFR(str(coeff_list[0]), prec))\n else:\n ret.append(RealMPFR(str(coeff_list[pos].args[0]), prec))\n pos += 1\n else:\n ret.append(0)\n return ret",
"def print_coeffs(ecm):\n func_otc = ecm.func_otc\n func_ttc = ecm.func_ttc\n\n coeffs_otc = ecm.curve_fit_coeff(func_otc, 3)\n coeffs_ttc = ecm.curve_fit_coeff(func_ttc, 5)\n\n print(f\"=== Curve Fit Coefficients ===\\n\")\n print('\\n--- Coefficients from OTC ---')\n print('a\\tb\\talpha')\n for c in coeffs_otc:\n print(f'{c[0]:.4f}\\t{c[1]:.4f}\\t{c[2]:.4f}')\n\n print('\\n--- Coefficients from TTC ---')\n print('\\na\\tb\\tc\\talpha\\tbeta')\n for c in coeffs_ttc:\n print(f'{c[0]:.4f}\\t{c[1]:.4f}\\t{c[2]:.4f}\\t{c[3]:.4f}\\t{c[4]:.4f}')",
"def genpoly(order, bits = 128):\n\t\n\t#the number of bytes that each coeficiant must have in order\n\t#to garantee that the entire polynomial contains enough bits:\n\tcoefBytes = (bits / (order + 1)) / 8\n\t#some coeficiants must contain an extra byte though, in order\n\t#to garantee that enough bits are generated\n\t#the first coeficiant not to contain an extra byte:\n\tfirst = (bits/8) % (order + 1)\n\t\n\trands = []\n\tpolynomial = []\n\tfor i in range(order+1):\n\t\tif(i < first):\n\t\t\trands.append(os.urandom(coefBytes + 1))\n\t\telse:\n\t\t\trands.append(os.urandom(coefBytes))\n\t\n\tfor s in rands:\n\t\ta = 0\n\t\tfor byte in s:\n\t\t\ta = a * 256 + ord(byte)\n\t\tpolynomial.append(a)\n\treturn polynomial",
"def get_recurrence_coefficients(self, order):\n w_pdf = self.get_pdf(self.x_range_for_pdf)\n ab = custom_recurrence_coefficients(self.x_range_for_pdf, w_pdf, order)\n return ab",
"def coeffs(self):\n\t\treturn [self.a,self.b,self.c,self.d]",
"def createExcitations(nocc,total,N):\n occ=range(1,nocc+1)\n vir=range(nocc+1,total+1)\n operators=[]\n for n in range(1,N+1):\n for cosa1 in itertools.combinations(occ,n):\n for cosa2 in itertools.combinations(vir,n):\n cosita=[]\n cosita.extend(cosa2[::-1])\n cosita.extend([x * -1 for x in cosa1[::-1]])\n operators.append(cosita)\n return operators",
"def _get_coefficients(est, table):\n def coefficient_for_category(predictors, category):\n predictor = [p for p in predictors if p.get('value') == category]\n\n if not predictor:\n return 0\n\n return float(predictor[0].get('coefficient'))\n\n def coefficients_for_field(name, field):\n predictors = table.findall(f\"*[@name='{name}']\")\n\n if field.get('optype') != 'categorical':\n if len(predictors) > 1:\n raise Exception('PMML model is not linear.')\n\n return [float(predictors[0].get('coefficient'))]\n\n return [\n coefficient_for_category(predictors, c)\n for c in est.field_mapping[name][1].categories\n ]\n\n return list(chain.from_iterable([\n coefficients_for_field(name, field)\n for name, field in est.fields.items()\n if table.find(f\"*[@name='{name}']\") is not None\n ]))",
"def indices(order):\n n = len(order)\n states = [format(x, '0%sb' % n) for x in range(2**n)]\n return [int(x, base=2) for x in sorted(states, key=lambda x: shuffle(x, order))]",
"def top_coefs(clf, label, n, vocab):\n ###TODO\n res = []\n if label == 1:\n for ind in np.argsort(clf.coef_[0])[::-1][:n]:\n for k, v in vocab.items():\n if ind == v:\n res.append((k, abs(clf.coef_[0][ind])))\n elif label == 0:\n for ind in np.argsort(clf.coef_[0])[:n]:\n for k, v in vocab.items():\n if ind == v:\n res.append((k, abs(clf.coef_[0][ind])))\n return res",
"def generateBasisSetOrders(self):\n\t\torderList = []\n\t\tL = self.L\n\t\ti = 0\n\t\tbasisSetOrder = infor.getBasisSetOrder()\n\t\tif basisSetOrder == \"libint\":\n\t\t\twhile i <= L:\n\t\t\t\tnx = L - i\n\t\t\t\tj = 0\n\t\t\t\twhile j<=i: \n\t\t\t\t\tny = i-j\n\t\t\t\t\tnz = j\n\t\t\t\t\torderList.append(nx)\n\t\t\t\t\torderList.append(ny)\n\t\t\t\t\torderList.append(nz)\n\t\t\t\t\tj = j + 1\n\t\t\t\ti = i + 1\n else:\n\t\t\tprint \"Unrecognized basis set ordering to generate basis sets\\n\"\n\t\t\tsys.exit()\n\n\t\treturn orderList",
"def show_orders(names, egg_order):\n PRICE_PER_DOZEN = 6.5\n print(\"\")\n print(\"Showing orders\")\n for i in range (len(egg_order)):\n price = egg_order [i] * PRICE_PER_DOZEN\n print(\"{} ordered {} eggs. The price is ${:.2f}\".format(names[i], egg_order[i], price))",
"def get_taylor_coeffs(s, order, dir_choice = 1):\n u, v = s.get_11_ss()\n coeffs = np.zeros(order)\n for i in range(order):\n if i == 0:\n coeffs[i] = v\n continue\n if i == 1:\n a = s.M[0][1]*u\n b = s.M[0][0]*u - s.M[1][1]*v\n c = -s.M[1][0]*v\n if dir_choice == 0:\n lin_val = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)\n else:\n lin_val = (-b - np.sqrt(b**2 - 4*a*c))/(2*a)\n coeffs[i] = lin_val\n continue\n if i == 2:\n # Eq 20 of supplement. In my terms:\n # c_m/m! * alpha = c_m-1/(m-1)! * beta\n alpha = i*u*s.M[0][0] + (i+1)*u*s.M[0][1]*coeffs[1] - s.M[1][1]*v\n beta = ( s.M[1][0] + s.M[1][1]*coeffs[1] - (i-1)*s.M[0][0] -\n (i-1)*s.M[0][1]*coeffs[1] )\n i_coeff = ( math.factorial(i) *\n (coeffs[i-1]/math.factorial(i-1)*beta) ) / alpha\n coeffs[i] = i_coeff\n continue\n # Eq 20 of supplement. In my terms:\n # c_m/m! * alpha = c_m-1/(m-1)! * beta + sum_i=2^m-1 gamma[i]\n #alpha = i*u*p.M[0][0] + (i+1)*u*p.M[0][1]*coeffs[1]\n alpha = i*u*s.M[0][0] + (i+1)*u*s.M[0][1]*coeffs[1] - s.M[1][1]*v\n beta = ( s.M[1][0] + s.M[1][1]*coeffs[1] - (i-1)*s.M[0][0] -\n (i-1)*s.M[0][1]*coeffs[1] )\n gamma = np.sum([ (coeffs[j]/(math.factorial(j) * math.factorial(i - j))\n * (s.M[1][1]*coeffs[i-j]\n - (i-j)*s.M[0][1]*coeffs[i-j]\n - u*s.M[0][1]*coeffs[i-j+1]))\n for j in range(2, i)])\n #print(alpha, beta, gamma)\n i_coeff = ( i / alpha * coeffs[i-1]*beta\n + math.factorial(i) / alpha * gamma)\n coeffs[i] = i_coeff\n return coeffs",
"def _construct_coefficients(self):\n coeffs = [0]*self.degree\n\n N = float(self.evalpts)\n\n lvals = np.arange(self.evalpts).astype('float')\n xpts = self._c2x(np.cos(np.pi*(lvals + 0.5)/N))\n fpts = np.rollaxis(self.func(xpts, *self.args), -1)\n\n for a in range(self.degree):\n inner = [\n fpts[b] * np.cos(np.pi*a*(lvals[b]+0.5)/N)\n for b in range(self.evalpts)\n ]\n coeffs[a] = 2.0/N * np.sum(inner, axis=0)\n\n coeffs[0] *= 0.5\n self._coeffs = np.array(coeffs)",
"def get_coefficients(n, exclude=[\"x\", \"X\"], first_nonzero=True, var_coeffs=False, \n reduce=True):\n if var_coeffs:\n selection = copy(digits_nozero + alpha)\n for i in exclude:\n selection.remove(i)\n else:\n selection = digits_nozero\n coeffs = []\n for i in xrange(n):\n c = random.choice(selection)\n if isinstance(c, str):\n c = sympy.Symbol(c)\n if reduce and random.randint(0,1):\n c = 0\n coeffs.append(c)\n if first_nonzero and coeffs[0] == 0:\n coeffs[0] = random.choice(selection)\n return coeffs",
"def ec_list(mod):\n ecs = []\n for top_reaction in mod.reactions:\n for base_species in mod.species:\n ec = 'ec%s_%s' % (top_reaction, base_species)\n ecs.append(ec)\n for base_param in mod.parameters:\n ec = 'ec%s_%s' % (top_reaction, base_param)\n ecs.append(ec)\n ecs.sort()\n return ecs",
"def get_coefficients(lin_op):\r\n # VARIABLE converts to a giant identity matrix.\r\n if lin_op.type is lo.VARIABLE:\r\n coeffs = var_coeffs(lin_op)\r\n # Constants convert directly to their value.\r\n elif lin_op.type is lo.PARAM:\r\n coeffs = [(lo.CONSTANT_ID, lin_op.size, lin_op.data.value)]\r\n elif lin_op.type in [lo.SCALAR_CONST, lo.DENSE_CONST, lo.SPARSE_CONST]:\r\n coeffs = [(lo.CONSTANT_ID, lin_op.size, lin_op.data)]\r\n # For non-leaves, recurse on args.\r\n elif lin_op.type in TYPE_TO_FUNC:\r\n coeffs = TYPE_TO_FUNC[lin_op.type](lin_op)\r\n else:\r\n raise Exception(\"Unknown linear operator.\")\r\n return coeffs",
"def __get_orders(self):\n orders = []\n\n # Get orders \n for l in self.GRISM_CONF:\n k = \"BEAM_\"\n if l[0:len(k)]==k:\n ws = l.split()\n order = ws[0].split(\"_\")[-1]\n orders.append(order)\n return orders",
"def fbessel_coeffs(f, N, order=0):\n import numpy as np\n import scipy.integrate as si\n import scipy.special as ss\n nx = len(f)\n x = np.linspace(0.0, 1.0, nx)\n zeros = ss.jn_zeros(order, N)\n a = np.zeros(N)\n for i in range(N):\n a[i] = ( 2.0 / ss.jn(order + 1, zeros[i])**2\n * si.simps(x * f * ss.jn(order, zeros[i] * x), x) )\n return a"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Dictionary that maps attr1 to attr2.
|
def map(self, attr1, attr2):
return dict(zip(getattr(self, attr1), getattr(self, attr2)))
|
[
"def get_attr_map():\n custom_attributes = get_custom_attrs()\n standard_attributes = get_standard_attrs()\n mapping = {}\n for attr in custom_attributes.keys():\n mapping[f'custom:{attr}'] = attr\n mapping.update(standard_attributes)\n return mapping",
"def _get_edge_attributes(self, link, attributes):\n\n attribute_map = {link: dict()}\n\n # map each attribute to edge value\n for idx, row in self.frame.iterrows():\n\n attribute_map[link].update({\n\n # reverse tuple since it gets ordered???\n (row[link[1]], row[link[0]]): {attribute: row[attribute] for attribute in attributes}\n })\n\n return attribute_map",
"def get_attrs(self, node1, node2):\n return self._graph[node1][node2]",
"def createAttrDicts():\n ret = {}\n # lfw v1.1\n ret['lfw_v1.1'] = d = {}\n fields = getmodelfields('lfw_v1.1')\n for l in open('attrnames.txt'):\n num, name = l.strip().split('\\t', 1)\n if name not in fields: continue\n d[num] = d[int(num)] = d[name] = name\n return ret",
"def get_sequentialAttrDict(self,attr = None):\n\t#log.debug(\">>> %s.get_sequentialAttrDict(attr = '%s') >> \"%(self.p_nameShort,attr) + \"=\"*75) \t\t\n\tuserAttrs = self.getUserAttrsAsDict()\n\td_attrList = {}\n\tfor key in userAttrs.keys():\n\t if '_' in key:\n\t\t_split = key.split('_')\n\t\t_int_ = _split[-1]\n\t\t_str_ = ('_').join(_split[:-1])\n\t\tif \"%s\"%attr == _str_:\n\t\t try:\n\t\t\td_attrList[int(_int_)] = key\n\t\t\t#log.debug(\"match: '%s'\"%key)\n\t\t except:log.warning(\"%s failed to int | int: %s\"%(key,_int_))\n\t\t \n\t#log.debug(\"-\"*100) \t \t\n\treturn d_attrList",
"def separate_by_attribute(listInst, ixAttr):\n dictInst = {}\n for inst in listInst:\n cAttr = inst.listAttrs[ixAttr]\n if cAttr not in dictInst:\n dictInst[cAttr] = []\n dictInst[cAttr].append(inst)\n return dictInst",
"def __get_measurement_attr(self, attr):\n return dict([(key, self.measurements[key][attr]) for key in self.measurements.keys()])",
"def fattr2dict(obj):\n\n result = {}\n unpacker = FancyNFS4Unpacker(obj.attr_vals)\n list = bitmap2list(obj.attrmask)\n for bitnum in list:\n result[bitnum] = get_attrunpacker(unpacker)[bitnum]()\n unpacker.done()\n return result",
"def translate_attribs(attribs):\r\n return dict((_attribute_translate(k, k), v) for k, v in attribs.iteritems())",
"def map_attr(attr, iterable):\n return map(mattr(attr), iterable)",
"def attribs(self):\r\n for key, val in self.dict.items():\r\n yield (key, val)",
"def __compute_attrs(self):\n attributes = {}\n for attr in self.policy.typeattributes():\n attributes[str(attr)] = set(str(x) for x in attr.expand())\n return attributes",
"def populate_attributes_table(conn):\n log_info('. populating \"attributes\" table')\n curs = conn.cursor()\n attributes_map = {}\n for attribute in ATTRIBUTES:\n internal_id, internal_name, label, hidden = attribute\n attributes_map[internal_name] = internal_id\n log_verbose('Adding attribute', internal_name, ':', internal_id)\n curs.execute(\"\"\"\n insert into attrs_tab(attr_id, attr_hidden, attr_value)\n values (?, ?, ?);\n \"\"\", (internal_id, 1 if hidden else 0, label,))\n curs.close()\n conn.commit()\n return attributes_map",
"def _get_distances_map(self, distances):\n distances_map = {}\n for a, b, d in distances:\n distances_map[(a, b)] = d\n distances_map[(b, a)] = d\n distances_map[(a, a)] = 0.0\n distances_map[(b, b)] = 0.0\n return distances_map",
"def get_value_map_for_update(self):\n value_map = dict()\n for attr_name in self.__attribute_names_lookup.keys():\n value = getattr(self, attr_name)\n if not value is None:\n self.check_attribute_validity(attr_name, value)\n value_map[attr_name] = value\n\n return value_map",
"def get_line_dict(x_1, y_1, x_2, y_2):\n line_dict = {'p_1': (x_1, y_1),\n 'p_2': (x_2, y_2),\n 'x_1': x_1, \n 'y_1': y_1,\n 'x_2': x_2,\n 'y_2': y_2,\n 'a': y_2 - y_1,\n 'b': x_1 - x_2,\n 'c': (y_2 - y_1) * x_1 + (x_1 - x_2) * y_1}\n return line_dict",
"def relationship_resource(resource1, resource2, **kwargs):\n resource1 = {k:v for k,v in resource1.iteritems()}\n resource1[\"data\"].pop(\"attributes\", None)\n resource2 = {k:v for k,v in resource2.iteritems()}\n resource2[\"data\"].pop(\"attributes\", None)\n\n attributes = {\n resource1[\"data\"][\"type\"]: resource1,\n resource2[\"data\"][\"type\"]: resource2,\n }\n attributes.update(kwargs)\n attributes.update(deletable())\n attributes.update(timestamp())\n return attributes",
"def _format_data(self) -> Dict[str, Any]:\n set_data = dict()\n for attribute_name, _ in self._attribute_mapping.items():\n attribute_value = self._get_attribute_data(attribute_name)\n set_data[attribute_name] = attribute_value\n\n return set_data",
"def getattrs(obj, attr_names=[], alias={}):\n return dict((alias.get(attr, attr), getattr(obj, attr)) for attr in attr_names)",
"def get_attributes(self):\n retdict = {}\n if self.lane_id == None:\n raise ValueError('lane id is not set correctly.')\n retdict['id'] = str(self.lane_id)\n retdict['type'] = enum2str(self.lane_type)\n retdict['level'] = 'false'\n return retdict"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.