query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
r""" Setter for the ground truth shape associated to the image.
|
def gt_shape(self, value):
self._gt_shape = value
|
[
"def _format_groundtruth_data(self, true_image_shapes):\n groundtruth_boxlists = [\n box_list_ops.to_absolute_coordinates(\n box_list.BoxList(boxes), true_image_shapes[i, 0],\n true_image_shapes[i, 1])\n for i, boxes in enumerate(\n self.groundtruth_lists(fields.BoxListFields.boxes))\n ]\n groundtruth_classes_with_background_list = [\n tf.cast(\n tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'),\n dtype=tf.float32)\n for one_hot_encoding in self.groundtruth_lists(\n fields.BoxListFields.classes)]\n\n groundtruth_masks_list = self._groundtruth_lists.get(\n fields.BoxListFields.masks)\n # TODO(rathodv): Remove mask resizing once the legacy pipeline is deleted.\n if groundtruth_masks_list is not None and self._resize_masks:\n resized_masks_list = []\n for mask in groundtruth_masks_list:\n _, resized_mask, _ = self._image_resizer_fn(\n # Reuse the given `image_resizer_fn` to resize groundtruth masks.\n # `mask` tensor for an image is of the shape [num_masks,\n # image_height, image_width]. Below we create a dummy image of the\n # the shape [image_height, image_width, 1] to use with\n # `image_resizer_fn`.\n image=tf.zeros(tf.stack([tf.shape(mask)[1], tf.shape(mask)[2], 1])),\n masks=mask)\n resized_masks_list.append(resized_mask)\n groundtruth_masks_list = resized_masks_list\n\n if self.groundtruth_has_field(fields.BoxListFields.weights):\n groundtruth_weights_list = self.groundtruth_lists(\n fields.BoxListFields.weights)\n else:\n # Set weights for all batch elements equally to 1.0\n groundtruth_weights_list = []\n for groundtruth_classes in groundtruth_classes_with_background_list:\n num_gt = tf.shape(groundtruth_classes)[0]\n groundtruth_weights = tf.ones(num_gt)\n groundtruth_weights_list.append(groundtruth_weights)\n\n return (groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_masks_list, groundtruth_weights_list)",
"def set_shape(self, shape, empty=False):\n super().set_shape(shape, empty=empty)\n self.coordinates = round_values(self.coordinates)",
"def state_shape(self):\n pass",
"def set_example_shape(self, shape):\n assert(len(shape) == 2)\n self.example_shape = numpy.asarray(shape, dtype=numpy.float32)",
"def set_shape(self, shape, empty=False):\n if isinstance(shape, int):\n shape = shape,\n\n shape_dim = len(shape)\n\n if self.ndim == 0:\n # Dimensionality is inferred from this shape if not set.\n if shape_dim == 1:\n new_shape = (1,) + shape\n else:\n new_shape = shape\n else:\n new_shape = (self.ndim,) + shape\n\n if empty:\n self.coordinates = np.empty(new_shape, dtype=float)\n else:\n self.coordinates = np.zeros(new_shape, dtype=float)\n\n if self.unit is not None:\n self.coordinates = self.coordinates * self.unit",
"def shape(self, new_shape):\n self.set_shape(new_shape)",
"def set_background(self):\r\n\r\n\t\tself.bg = self.get_background() if len(self) > 0 else np.array([0.25] * 4) # (A,C,G,T). Default is equal background if not overwritten by MEME (See OneMotif)\r",
"def shape_b(self):\r\n return self._fixture_b._shape",
"def create_ground_truth_optical_flow(self) -> None:\n pass",
"def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):\n if image_id in self._image_id_to_mask_shape_map:\n tf.logging.warning('Ignoring ground truth with image id %s since it was '\n 'previously added', image_id)\n return\n\n groundtruth_boxes = groundtruth_dict[fields.InputFields.gt_boxes]\n groundtruth_classes = groundtruth_dict[fields.InputFields.gt_classes]\n groundtruth_masks = groundtruth_dict[fields.InputFields.gt_masks]\n _check_mask_type_and_value(fields.InputFields.gt_masks, groundtruth_masks)\n\n if len(self._image_id_to_mask_shape_map) < self._max_examples_to_draw:\n image = visualization.visualize_boxes_and_labels_on_image_array(\n image=groundtruth_dict[fields.InputFields.image],\n boxes=groundtruth_boxes,\n classes=groundtruth_classes,\n scores=np.ones_like(groundtruth_classes),\n category_index=self._category_index,\n instance_masks=groundtruth_masks,\n min_score_thresh=self._min_visualization_score_thresh\n )\n self._summaries.append(\n tf.Summary.Value(\n tag=\"{}/Groudtruth/Detection\".format(image_id),\n image=tf.Summary.Image(\n encoded_image_string=visualization.encode_image_array_as_png_str(image)))\n )\n\n self._groundtruth_list.extend(\n coco_tools.\n ExportSingleImageGroundtruthToCoco(\n image_id=image_id,\n next_annotation_id=self._annotation_id,\n category_id_set=self._category_id_set,\n groundtruth_boxes=groundtruth_boxes,\n groundtruth_classes=groundtruth_classes,\n groundtruth_masks=groundtruth_masks))\n self._annotation_id += groundtruth_boxes.shape[0]\n self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[\n fields.InputFields.gt_masks].shape",
"def test_override_placeholder_shapes(self):\n graph = build_graph(nodes_attributes,\n [('node_1', 'node_2'),\n ('node_2', 'op_output')\n ],\n {'node_2': {'shape': None},\n 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Parameter'}\n },\n nodes_with_edges_only=True)\n\n ph_shape = np.array([1, 3, 224, 224])\n user_dict = {'node_1': [{'shape': ph_shape}]}\n override_placeholder_shapes(graph, user_dict)\n res_shape = graph.node['node_1']['shape']\n self.assertTrue(np.array_equal(ph_shape, res_shape))",
"def shape_a(self):\r\n return self._fixture_a._shape",
"def no_shape():\n if get_shape_path():\n return False\n return True",
"def setShape(self, shape):\n for pp in shape:\n if len(pp) != 3:\n raise ValueError('shape point must consist of x,y,z')\n self._shape3D = shape\n self._shape = [(x, y) for x, y, z in shape]",
"def set_data_shape(self, shape):\n raise NotImplementedError",
"def set_ground(self, ground_id):\n self._world_dict['ground'] = ground_id",
"def players_layer_shape(self):\n pass",
"def get_scaled_ground_truth_bounding_box(original_img,pre_processed_input_img):\n original_height, original_width, _ = original_img.shape\n pre_processed_img_height, pre_processed_img_width, _ = pre_processed_input_img.shape\n x_scale = original_width / pre_processed_img_width\n y_scale = original_height / pre_processed_img_height\n scaled_ground_truth_box = {\n \"x1\" : round(ground_truth_box[\"x1\"]/x_scale)\n ,\"y1\" : round(ground_truth_box[\"y1\"]/y_scale)\n ,\"x2\" : round(ground_truth_box[\"x2\"]/x_scale)\n ,\"y2\" : round(ground_truth_box[\"y2\"]/y_scale)\n }\n return scaled_ground_truth_box",
"def set_mask(self, mask):\n if type(mask) != type(None):\n if mask.shape[0] == self._img.shape[0] and mask.shape[1] == self._img.shape[1]:\n self._mask = mask\n else:\n raise ValueError(\"Mask has to have the same image dimension as the map!\")\n else:\n self._mask = None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns the final fitting cost.
|
def final_cost(self):
return self.algorithm_results[-1].final_cost
|
[
"def end_point_cost(self, ti, xi, tf, xf, f_prm):\n\t\tmf = xf[-1]\n\t\treturn - mf / self.mass0",
"def get_cost(self) -> float:\n if self.pulp_problem.status == pulp.LpStatusNotSolved: # Not solved\n raise ValueError(\"Cannot get the cost of an unsolved problem\")\n return sum(\n self.cooked.instance_prices[ic] * self.cooked.map_res[app, ic].varValue\n for ic in self.cooked.instances_res\n for app in self.system.apps\n ) + sum(\n self.cooked.instance_prices[ic]\n * self.cooked.map_dem[app, ic, wl].varValue\n * self.load_hist[wl]\n for ic in self.cooked.instances_dem\n for app in self.system.apps\n for wl in self.load_hist.keys()\n )",
"def compute_cost(self):\n b=np.log(self.A2)\n c=np.log(1-self.A2)\n self.cost=-(np.dot(self.y,b.T)+np.dot(1-self.y,c.T))/self.m\n self.cost=float(np.squeeze(self.cost))",
"def get_cost(self) -> float:\n if self.pulp_problem.status != pulp.LpStatusOptimal:\n raise ValueError(\"Cannot get the cost when the status is not optimal\")\n\n return pulp.value(self.pulp_problem.objective)",
"def current_cost(self) -> float:\n return calculate_cost(self.population[0], self.settings)",
"def cost(params):\n\n # get the F(x) response\n Fx = model(params)\n\n # compute goodness of fit\n return scale * (Fx - G)**2",
"def total_cost(self):\n return self.heuristic() + self.backward_cost",
"def operating_cost(self):\n return self._cost_data",
"def compute_cost(self): #computes the cost for all training examples\n self.Cost= -(np.dot(self.Y,np.log(self.A).T)+np.dot(1-self.Y,np.log(1-self.A).T))/self.m\n self.Cost=np.squeeze(self.Cost) #for calculation purposes so that the new shape is of the form ()",
"def cost(self):\n\n return self._cost",
"def _get_cost_function(self):\n self.weights = tf.placeholder(\n tf.float32, shape=[self.n_words, self.n_words])\n self.log_coincidence = tf.placeholder(\n tf.float32, shape=[self.n_words, self.n_words])\n self.diffs = tf.subtract(self.model, self.log_coincidence)\n cost = tf.reduce_sum(\n 0.5 * tf.multiply(self.weights, tf.square(self.diffs)))\n if self.mittens > 0:\n self.mittens = tf.constant(self.mittens, tf.float32)\n cost += self.mittens * tf.reduce_sum(\n tf.multiply(\n self.has_embedding,\n self._tf_squared_euclidean(\n tf.add(self.W, self.C),\n self.original_embedding)))\n tf.summary.scalar(\"cost\", cost)\n return cost",
"def special_total_cost(self):\n return round(self.packaging_cost + self.freight_cost + self.duty_cost + self.tooling_cost(), 2)",
"def cost(self,output,y):\r\n\r\n return np.mean(np.square(output - y))",
"def get_total_cost(self):\n dvs = [norm(dv) for dv in self._dvs]\n return sum(dvs, 0 * u.km / u.s)",
"def additional_cost_term(self, **kwargs):\n return 0.0",
"def _cost_function(self) -> None:\n workloads = {wl.app: wl.values[0] for wl in self.workloads}\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: maximize fulfilled workload fraction\",\n )",
"def total_cost(self, *args, **kwargs):\n return round(self.material_cost() + self.manufacturing_cost + self.overhead_cost() + self.special_cost() + self.profit(), 2)",
"def _calculate_cost(self):\n self.destination.set_parking_cost()\n self.cost_to_park = self.destination.parking_cost \n return self.distance * self.mile_rate + self.cost_to_park",
"def loss_cost(self):\n return round(self.bom_cost() * self.loss_rate / 100, 2)",
"def total_cost(self):\n\n if not self._outputs:\n raise Exception(\"Has OffshoreSubstationDesign been ran yet?\")\n\n return (self.substructure_cost + self.substation_cost) * self.num_substations"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns the initial fitting cost.
|
def initial_cost(self):
return self.algorithm_results[0].initial_cost
|
[
"def cost(params):\n\n # get the F(x) response\n Fx = model(params)\n\n # compute goodness of fit\n return scale * (Fx - G)**2",
"def current_cost(self) -> float:\n return calculate_cost(self.population[0], self.settings)",
"def __compute_initial_learning_rate__(self):\n eigen_values = np.linalg.eigvalsh(np.cov(self.x_train.T))\n lipschitz = eigen_values[-1] + self.lambd\n initial_learning_rate = 1 / lipschitz\n return initial_learning_rate",
"def cost_multiplier(self):\n return 1.0",
"def computeFirstStageCost(model):\n return 0.0",
"def compute_cost(self):\n b=np.log(self.A2)\n c=np.log(1-self.A2)\n self.cost=-(np.dot(self.y,b.T)+np.dot(1-self.y,c.T))/self.m\n self.cost=float(np.squeeze(self.cost))",
"def compute_cost(self): #computes the cost for all training examples\n self.Cost= -(np.dot(self.Y,np.log(self.A).T)+np.dot(1-self.Y,np.log(1-self.A).T))/self.m\n self.Cost=np.squeeze(self.Cost) #for calculation purposes so that the new shape is of the form ()",
"def get_cost(self) -> float:\n if self.pulp_problem.status == pulp.LpStatusNotSolved: # Not solved\n raise ValueError(\"Cannot get the cost of an unsolved problem\")\n return sum(\n self.cooked.instance_prices[ic] * self.cooked.map_res[app, ic].varValue\n for ic in self.cooked.instances_res\n for app in self.system.apps\n ) + sum(\n self.cooked.instance_prices[ic]\n * self.cooked.map_dem[app, ic, wl].varValue\n * self.load_hist[wl]\n for ic in self.cooked.instances_dem\n for app in self.system.apps\n for wl in self.load_hist.keys()\n )",
"def additional_cost_term(self, **kwargs):\n return 0.0",
"def initial_price(self) -> Optional[float]:\n return self.__initial_price",
"def get_cost(self) -> float:\n if self.pulp_problem.status != pulp.LpStatusOptimal:\n raise ValueError(\"Cannot get the cost when the status is not optimal\")\n\n return pulp.value(self.pulp_problem.objective)",
"def _cost_function(self) -> None:\n workloads = {wl.app: wl.values[0] for wl in self.workloads}\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: maximize fulfilled workload fraction\",\n )",
"def _cost_function(self) -> None:\n\n period_length = sum(self.load_hist.values())\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_prices[_ic]\n * period_length\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_prices[_ic]\n * self.load_hist[_l]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: minimize cost\",\n )",
"def startup_cost_rule(_m, g):\r\n\r\n if g in m.G_E_THERM:\r\n # Startup cost for existing thermal units\r\n startup_cost = (self.data.existing_units.loc[g, ('PARAMETERS', 'SU_COST_WARM')]\r\n / self.data.existing_units.loc[g, ('PARAMETERS', 'REG_CAP')])\r\n\r\n elif g in m.G_C_THERM:\r\n # Startup cost for candidate thermal units\r\n startup_cost = self.data.candidate_units.loc[g, ('PARAMETERS', 'SU_COST_WARM_MW')]\r\n\r\n else:\r\n raise Exception(f'Unexpected generator encountered: {g}')\r\n\r\n # Shutdown cost cannot be negative\r\n assert startup_cost >= 0, 'Negative startup cost'\r\n\r\n return float(startup_cost)",
"def _choose_initial_point(self) -> np.ndarray:\n if self._warm_start and self._fit_result is not None:\n self._initial_point = self._fit_result.x\n elif self._initial_point is None:\n self._initial_point = algorithm_globals.random.random(self._neural_network.num_weights)\n return self._initial_point",
"def end_point_cost(self, ti, xi, tf, xf, f_prm):\n\t\tmf = xf[-1]\n\t\treturn - mf / self.mass0",
"def __cost(training_set, test_set, knn_function, solution, adaptative=False):\n\n # Dado a solucao (configuracao de pesos/caracteristicas), transforma os , calcula o k-NN e retorna a taxa de acerto\n return training_machine(training_set, test_set, knn_function, solution, adaptative)",
"def h(self, state):\n return self.graph.least_costs[state]",
"def operating_cost(self):\n return self._cost_data",
"def get_optimal_price(self):\n res = scipy.optimize.minimize(self.compute_revenue, self.mu, method='nelder-mead')\n return res.x[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The list containing the warped images obtained at each fitting iteration.
|
def warped_images(self):
mask = self.algorithm_results[-1].fitter.template.mask
transform = self.algorithm_results[-1].fitter.transform
interpolator = self.algorithm_results[-1].fitter.interpolator
warped_images = []
for s in self.shapes():
transform.set_target(s)
image = self.image.warp_to(mask, transform,
interpolator=interpolator)
warped_images.append(image)
return warped_images
|
[
"def warped_images(self):\n mask = self.fitter.template.mask\n transform = self.fitter.transform\n interpolator = self.fitter.interpolator\n return [self.image.warp_to(mask, transform.from_vector(p),\n interpolator=interpolator)\n for p in self.shape_parameters]",
"def unmasked_model_image_of_galaxies(self) -> List[aa.Array2D]:\r\n return self.max_log_likelihood_fit.unmasked_blurred_image_of_galaxies_list",
"def get_images(self):\n return [env.render(mode='rgb_array') for env in self.list_env]",
"def make_image_list(self):\n return [\n tools.get_image(48, 0, 16, 16, self.sprite_sheet),\n tools.get_image(0, 0, 22, 16, setup.GFX['sword2'])\n ]",
"def get_images(self):\n pass",
"def get_images_mp(sample_size_total,SN_limits=[10,100],translation_distance=1,radius_limits=[1.5,3],parameter_function=0):\n\n\n if __name__ == '__main__' or __name__=='mp_images_new':\n import numpy as np\n\n from multiprocessing import Pool,cpu_count\n from functools import partial\n\n nbr_images = []\n nbr_workers = cpu_count()\n sample_size = sample_size_total//nbr_workers\n\n # Avoid sending the extra paramters multiple times by creating partial\n\n tmp = partial(f,SN_limits=SN_limits,translation_distance=translation_distance,radius_limits=radius_limits,parameter_function=parameter_function)\n\n # Do some load balancing\n\n for i in range(nbr_workers):\n nbr_images.append(sample_size)\n if(i<sample_size_total%nbr_workers):\n nbr_images[-1]+=1\n\n # Use a pool to efficiently generate lots of images\n\n with Pool(processes=cpu_count()) as pool:\n tmp = pool.map(tmp,nbr_images)\n pool.close()\n pool.join()\n\n # Reformat the results from the pool map to be the right shape\n\n final_images = np.zeros((sample_size_total,51,51,1))\n final_targets = np.zeros((sample_size_total,3))\n img_idx = 0\n for worker_no in range(nbr_workers):\n for image_no in range(len(tmp[worker_no][0][:])):\n\n final_images[img_idx] = np.reshape(tmp[worker_no][0][image_no],(51,51,1))\n final_targets[img_idx] = tmp[worker_no][1][image_no]\n img_idx+=1\n return final_images,final_targets",
"def getBandGapImages(params,resolution=32,getFlatness=False,band=None,processes=None):\n \n units = params['units']\n enLabel, en = units['energy']\n xLabel, x = units['xaxis']\n yLabel, y = units['yaxis']\n zLabel, z = units['zaxis']\n numImages = len(z)\n\n xyz = np.roll(np.array(list(product(z,x,y))),2,1)\n energy = en*np.ones(len(xyz))\n \n data = np.c_[energy,xyz]\n names = [enLabel,xLabel,yLabel,zLabel]\n \n iterFunc = partial(getBG,params['lattice'],params['cutoff'],names,resolution,getFlatness=getFlatness,band=band)\n \n #iterFunc = partial(getBG,params['lattice'],params['cutoff'],names,resolution,)\n \n pool = mp.Pool(processes)\n \n if getFlatness:\n band_output = pool.map(iterFunc,data)\n band_output = np.array(band_output)\n pool.close()\n \n bandGaps, bandFlats = np.array(band_output[:,0].tolist()), np.array(band_output[:,1].tolist()) #np.array(array.tolist()) fixes the data type to np.float instead of np.object\n \n #take only minimum flatness if more than 1 band\n if bandFlats.ndim > 1:\n bandFlats = np.amin(bandFlats,axis=1)\n \n return bandGaps.reshape(numImages,len(x),len(y)), bandFlats.reshape(numImages,len(x),len(y))\n \n else:\n bandGaps = pool.map(iterFunc,data)\n bandGaps = np.array(bandGaps)\n pool.close()\n return bandGaps.reshape(numImages,len(x),len(y))",
"def get_all_from_original(self, idx):\n return [self.image(i) for i in range(idx * self._count,\n (idx+1) * self._count)]",
"def get_all_posteriors(self):\n return numpy.array(\n [self.posterior_at_index(i) for i in range(self.sketch.m)])",
"def pruneImages(self):\r\n tmp_list = self.img_names\r\n self.img_names = list()\r\n factor = self.num_imgs / self.num_slices\r\n for i in range(self.num_slices):\r\n self.img_names.append(tmp_list[int(i * factor)])\r\n self.num_imgs = len(self.img_names)",
"def get_next_batch(self):\n images = []\n while len(images) < self._batch_size:\n line = self._catalog.readline()\n self._counter += 1\n if self._counter < self._skip:\n continue\n url, time = line.split(\",\")\n time1 = int(time.split('-')[0])\n if time1 < 1650: # only consider paintings after 1650\n continue\n url = \"https://www.wga.hu/art\" + url.split(\"html\")[1] + \"jpg\"\n try:\n img_arr = self._scrape_image(url)\n except:\n continue\n if img_arr.shape[2] != 3: # only consider RGB paintings\n continue\n img_arr = (img_arr - 127.5) / 127.5\n images.append(img_arr)\n\n result = np.stack(images, axis=0)\n assert result.shape == (self._batch_size, self._input_size[0], self._input_size[1], 3)\n return result",
"def make_image_list(image_dir):",
"def images(self) -> List[Image.Image]:\n return [page.image for page in self._pages]",
"def _resize_images(images: List) -> List:\n return list(\n map(\n lambda i: i.resize((64, 64)),\n images\n )\n )",
"def get_image_scale_list(self): \n return self.image_scale_list",
"def get_images(ibs, gid_list):\n gpath_list = ibs.get_image_paths(gid_list)\n image_list = [gtool.imread(gpath) for gpath in gpath_list]\n return image_list",
"def floppy_images(self):\n ret = self._get_attr(\"floppyImages\")\n return [IMedium(a) for a in ret]",
"def _create_images(self, genomes: Genomes) -> Images:\n return [self._image_creator.create_image(g) for g in genomes]",
"def get_bayer_images(\n self, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL\n ) -> typing.List[np.ndarray]:\n return [rbg_to_bayer_bg(img) for img in self.get_images(renderer)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The list containing the appearance reconstruction obtained at each fitting iteration.
|
def appearance_reconstructions(self):
return flatten_out(
[f.appearance_reconstructions for f in self.algorithm_results])
|
[
"def appearance_reconstructions(self):\n if self.appearance_parameters:\n return [self.fitter.appearance_model.instance(w)\n for w in self.appearance_parameters]\n else:\n return [self.fitter.template for _ in self.shapes]",
"def getDisplacements(self):\n return np.array(self.disps)",
"def get_representative_fits(self):\n\n\t\treturn self._repfit_list",
"def get_all_posteriors(self):\n return numpy.array(\n [self.posterior_at_index(i) for i in range(self.sketch.m)])",
"def extractParameters(self):\n return(self.matrices)",
"def flag_matriz(self):\n return self._flag_matriz",
"def extract(self):\n imgpaths = self.imgpaths\n object_nm = self.object_nm\n color_histograms = []\n progress = progressbar.ProgressBar(\n widgets=['{o}: '.format(o=object_nm), progressbar.Bar(),\n progressbar.Percentage(), ' ', progressbar.ETA()])\n for imgpath in progress(list(imgpaths)):\n if type(imgpath) is tuple:\n raw_path, mask_path = imgpath\n raw_img = cv2.imread(raw_path)\n mask_img = cv2.imread(mask_path)\n train_img = cv2.add(mask_img, raw_img)\n else:\n raw_path = imgpath\n train_img = cv2.imread(raw_path)\n\n color_hist_sub = rospy.Subscriber('single_channel_histogram_'\n + self.color + '/output', ColorHistogram, self.color_hist_cb)\n bridge = cv_bridge.CvBridge()\n train_imgmsg = bridge.cv2_to_imgmsg(train_img, encoding='bgr8')\n train_imgmsg.header.stamp = rospy.Time.now()\n # wait for histogram extracted from new image\n while not self.stamp or self.stamp < train_imgmsg.header.stamp:\n self.image_pub.publish(train_imgmsg)\n rospy.sleep(0.3)\n color_histograms.append(self.color_hist)\n return np.array(color_histograms)",
"def reveal_fitted_models(self):\n return [str(x) for x in self.fitted_models]",
"def descriptors(self):\n descs = []\n for x in xrange(0, 4):\n desc = self.GetDescriptor(x)\n if desc:\n descs.append(desc)\n return descs",
"def getFCs(self):\n return self.elements",
"def results(self) -> List:\n results = []\n for element in self.parameters.output:\n if \"concentration\" in element:\n results.append(self.concentration_to_latex(element))\n continue\n if element in self.__dict__.keys() or element in dir(self):\n results.append(getattr(self, element))\n else:\n results.append(getattr(self.parameters, element))\n return results",
"def atoms(self):\n if not hasattr(self, '_atoms'):\n self._atoms = [si.SingleImage(im[0], mask=im[1])\n for im in self.imglist]\n elif len(self._atoms) is not len(self.imglist):\n self._atoms = [si.SingleImage(im[0], mask=im[1])\n for im in self.imglist]\n return self._atoms",
"def return_possible_fitting_models():\n model_dictionary_keys = fitting_models.keys()\n for i,model_name in enumerate(model_dictionary_keys):\n print(\"%i: '%s'\" % (i+1, model_name))",
"def get_Rclasses(self):\r\n list_op = list(zip(sorted(self.morphisms.keys()),\r\n [0]*len(self.morphisms.keys())))\r\n R_classes = []\r\n for x,visited in list_op:\r\n if not visited:\r\n R_class = self.element_Rclass(x)\r\n R_classes.append(R_class)\r\n for i,(y,flag) in enumerate(list_op):\r\n if y in R_class:\r\n list_op[i]=(y,1)\r\n return R_classes",
"def _convert_rcfs(self):\n self.rcfs = []\n for i in range(self.get_nof_rcf()):\n edges = self.get_edges_for_rcf(i)\n nodes = self.get_nodes_for_rcf(i)\n weight = self.get_weight_for_rcf(i)\n rcf = RCF(i, nodes, edges, weight)\n self.rcfs.append(rcf)",
"def initPreds(self):\n mean_images = int(self.config['video']['mean_images'])\n preds = np.zeros((mean_images,len(self.contours)),dtype=np.float32)\n self.logger.debug('Mean images of prediction (time filtering): {}'.format(mean_images))\n return preds",
"def list(self):\n\n for arrname in list(self):\n print(arrname + ':', self[arrname].shape)",
"def aic(self):\n aics = []\n aics_bool = []\n for i, chain in enumerate(self.parent.chains):\n p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params\n if p is None or n_data is None or n_free is None:\n aics_bool.append(False)\n missing = \"\"\n if p is None:\n missing += \"posterior, \"\n if n_data is None:\n missing += \"num_eff_data_points, \"\n if n_free is None:\n missing += \"num_free_params, \"\n\n self._logger.warning(\"You need to set %s for chain %s to get the AIC\" % (missing[:-2], chain.name))\n else:\n aics_bool.append(True)\n c_cor = 1.0 * n_free * (n_free + 1) / (n_data - n_free - 1)\n aics.append(2.0 * (n_free + c_cor - np.max(p)))\n if len(aics) > 0:\n aics -= np.min(aics)\n aics_fin = []\n i = 0\n for b in aics_bool:\n if not b:\n aics_fin.append(None)\n else:\n aics_fin.append(aics[i])\n i += 1\n return aics_fin",
"def getRegistros(self):\n return self.__registros"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The list containing the error images obtained at each fitting iteration.
|
def error_images(self):
return flatten_out(
[f.error_images for f in self.algorithm_results])
|
[
"def errors(self):\n return [thread.err for thread in self._threads]",
"def get_errors(self):\n result = []\n for error in self.errors:\n result.append(os.path.basename(error[0]) +\n ':\\n ' + str(error[1]) + '\\n')\n return result",
"def get_all_errs(self):\n thiserr = self.get_err()\n errors = []\n while thiserr != '+0,\"No error\"':\n thiserr = self.get_err()\n errors.append(thiserr)\n return errors",
"def iter_failed(self):\n for awsnexradfile in self.failed:\n yield awsnexradfile",
"def errors(self):\n return (test for test in self.tests if test.error is not None)",
"def execution_errors(self):\n return (test for test in self.tests if test.execution_error is not None)",
"def errors(self):\n return Sequence.__errors",
"def error_list(self):\n all_errors = []\n for field_name, errors in self.errors.items():\n for error in errors:\n if isinstance(error, dict) and isinstance(self[field_name], FieldList):\n for field in self[field_name].entries:\n all_errors += ['{}: {}'.format(self[field_name].label.text, sub_error)\n for sub_error in field.form.error_list]\n else:\n all_errors.append('{}: {}'.format(self[field_name].label.text, error))\n return all_errors",
"def get_error_vector(self):\n return self.yerr",
"def getFitErr(self):\n return(self.fitSum2Err)",
"def get_images(self):\n return [env.render(mode='rgb_array') for env in self.list_env]",
"def get_errors(self, value):\n return list(self.errors(value))",
"def get_validation_errors(self):\n errors = []\n try:\n self.xsd_validate()\n except ValidationError, ex:\n errors.extend(ex.errors)\n return errors",
"def getImgUrls(self):\r\n return self.ImgUrls",
"def get_all_errors(self):\n if self.state == Check.State.NOT_RUN:\n return []\n dep_errors = [set(dependency.get_all_errors()) for dependency in self._dependencies]\n return list(set.union(set(self._errors), *dep_errors))",
"def calculate_errors(self, setup, errorfunc):\n errors = np.zeros((len(self.t_matrices), 2))\n\n for i, (wall_time, t_matrix) in enumerate(self.t_matrices):\n errors[i, 0] = wall_time\n errors[i, 1] = errorfunc(setup, t_matrix)\n\n self.errors = errors",
"def get_train_error(self):\n\n\t\tavg_err = 0.0\n\n\t\tfor i in range(len(self.X_train)):\n\n\t\t\tx = np.array([self.X_train[i]])\n\t\t\ty = self.Y_train[i]\n\n\t\t\ty_ = self.model.predict(x)\n\n\t\t\terr = loss(self.il_config['loss_type'],y,y_[0])\n\n\t\t\tavg_err += err\n\n\t\treturn avg_err/float(len(self.X_train))",
"def parse_dbl_error_files(self):\r\n error_list={}\r\n file_list=[]\r\n #Get the list of error files in all folders\r\n for dir in self.error_dir_list:\r\n file_list.extend(glob.glob(dir+\"/*_{INSTANCE_ID}_*.log\"\\\r\n .format(INSTANCE_ID=self.INSTANCE_ID)))\r\n #Parse all log files\r\n for filename in file_list:\r\n filename_arr=[set(),[]]\r\n with open(filename,'r') as file:\r\n filedata=file.read().split('\\n')\r\n for line in filedata:\r\n #Table name found\r\n if line.startswith('Table '):\r\n table_name='_'.join(line.split(',')[0].split(' ')[1]\\\r\n .split('.')[1].split('_')[:-1])\r\n if table_name not in error_list:\r\n error_list[table_name]={}\r\n #Error found\r\n elif line.startswith('ORA-'):\r\n #Oracle Error found\r\n filename_arr[0].add(line)\r\n elif line.startswith('Record '):\r\n #Oracle Error found\r\n filename_arr[0].add(line.split(':')[1])\r\n #Statistics found\r\n elif 'Rows' in line:\r\n #Adding the summary of data loaded\r\n filename_arr[1].append(line)\r\n if table_name in error_list:\r\n error_list[table_name][filename]=filename_arr\r\n return error_list",
"def get_hidden_errors(self):\n return self.scores['hidden_errors']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get schema name from table_name, the default schema name is public
|
def _get_schema_name(self, table_name):
items = table_name.split('.')
if len(items) == 2:
return items[0]
else:
return 'public'
|
[
"def get_schema_name(self):\n obj = self._get_db_obj_query().first()\n return obj.schema_name if obj else None",
"def get_schema_name(cls, url: URL) -> Optional[str]:\n schema = None\n database = url.database\n if cls.supports_schemas and database is not None and \"/\" in database:\n schema = database.split(\"/\")[1]\n return schema",
"def get_schema(self, table_name: str, database: str | None = None) -> sch.Schema:\n qualified_name = self._fully_qualified_name(table_name, database)\n table = self._table_env.from_path(qualified_name)\n schema = table.get_schema()\n return sch.Schema.from_pyarrow(\n create_arrow_schema(schema.get_field_names(), schema.get_field_data_types())\n )",
"def get_schema(self, schema_name):\n\n res = self.query(\"SHOW COLUMNS IN %s\" % (schema_name))\n cols = []\n keys = []\n for c in res:\n cols.append(MySQL_Table_Column(c[0],c[1],c[2]=='YES',c[5] if len(c[5])>0 else None))\n if c[3] == 'PRI':\n keys.append(c[0])\n return MySQL_Table_Schema(schema_name, cols, keys)",
"def get_schema(\n self,\n table_name: str,\n database: str | None = None,\n ) -> sch.Schema:\n qualified_name = self._fully_qualified_name(table_name, database)\n (column_names, types, *_), *_ = self._client_execute(\n f\"DESCRIBE {qualified_name}\"\n )\n\n return sch.Schema.from_tuples(zip(column_names, map(parse, types)))",
"def dynamo_table_name():\n if is_local_env():\n return LOCAL_TABLE_NAME\n\n # get data from parameter store with correct key\n # table_name = get_params_from_ssm()[\"CORRECT_KEY\"]\n return \"table_name\"",
"def table_name(self) -> str:\n return jsii.get(self, \"tableName\")",
"def get_dataset_schema(dataset):\n return dataset.table_meta[SINGLE_TABLE]",
"def _get_schema(name):\n item = datalab.utils.commands.get_notebook_item(name)\n if not item:\n item = _get_table(name)\n\n if isinstance(item, datalab.bigquery.Schema):\n return item\n if hasattr(item, 'schema') and isinstance(item.schema, datalab.bigquery._schema.Schema):\n return item.schema\n return None",
"def get_table_name(self):\r\n table = input(\"Name of table: \")\r\n return table",
"def schema( self ):\n if ( self._table ):\n return self._table.schema()\n return None",
"def form_table_name(self, bucket_name):\n table_name = bucket_name[3:-4].lower().replace('-', '_')\n return table_name",
"def GetInfoSchemaTableField(request, schema_table, name):\n # Get a connection\n connection = GetConnection(request)\n \n sql = \"SELECT * FROM `schema_table_field` WHERE schema_table_id = %s AND name = %s\"\n result_schema_table_field = connection.Query(sql, [schema['id'], table])\n if not result_schema_table_field:\n raise Exception('Unknown schema_table_field: %s: %s: %s' % (request.connection_data['datasource']['database'], table, name))\n \n schema_table = result_schema_table_field[0]\n \n return schema_table",
"def _table_name_for(content_type):\n return content_type.replace('.', '')",
"def get_table_name(self, object_name):\n return self.get_table_prefix() + \"Per_\" + object_name",
"def __generate_table_name__(self, table, username, end_point_type):\n # If private endpoint\n if end_point_type == 'private':\n if username != \"\":\n return table + \"_\" + username\n else:\n return table\n else:\n return table",
"def forecast_table_name():\n return None",
"def __tablename__(self) -> str:\n return gen_tablenames(self.__name__)",
"def get_table_name(engine, obj):\n # noinspection PyProtectedMember\n return engine._compute_table_name(obj.__class__)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Joue le tour de l'ennemi
|
def tour(self, lieu):
if random.randint(0, 100) <= self.agressivite: # si l'ennemi attaque
persos_l = self.game.get_all_persos_lieu(lieu) # on recupere tous les persos dans ce lieu
#
if len(persos_l) >= 1: # s'il y a des persos dans ce lieu
# on va d'abord choisir le type d'attaque de l'ennemi
# comme ca, si l'ennemi n'a pas d'attaques disponnibles,
# on ne va pas aller chercher tous les persos d'un lieu inutiliement
tp_att = "corps à corps"
cac = self.get_attaque("corps à corps")
dist = self.get_attaque("distance")
if cac is None and dist is None: # l'ennemi ne peut pas attaquer
return
elif cac is None:
tp_att = "distance"
elif dist is None:
tp_att = "corps à corps"
else:
if self.moy_lst(cac) > self.moy_lst(dist):
tp_att = "corps à corps"
else:
tp_att = "distance"
# on va chercher un personnage cible
p_cible = None
for p in persos_l:
if p.classe == "tank": # on attaque en priorité les tanks
p_cible = p
#
if p_cible is None: # s'il n'y a pas de tanks dans ce lieu, on prend la premiere perso dans ce lieu
p_cible = persos_l[0]
# l'ennemi va attaquer le perso cible
mess = self.attaque_cible(p_cible, tp_att)
mess = json.dumps({"type": "message", "value": mess})
self.game.server.send_all(mess)
|
[
"def jouer(self, joueur, colonne):\n colonne -= 1 # offset\n if colonne > self.largeur-1:\n colonne = self.largeur-1\n elif colonne < 0:\n colonne = 0\n\n for y in range(0, self.hauteur):\n if self.grille[y][colonne] == EMPTY:\n self.grille[y][colonne] = joueur\n return 0 # on a reussi a jouer dans la colonne\n\n return 1 # on a pas pu jouer dans cette colonne (pleine)",
"def enem():\r\n global esl, xyz, h, POW, punt2, puntaje2, livesl, m, punt1, puntaje1, ene, en, en1, n, esn, lives,contene, xz, x, contene2, esm \r\n for o in range(len(ene)):\r\n \r\n # golpear tortuga mario\r\n if((c.coords(mario)[0]> c.coords(ene[o][0])[0]-50 and c.coords(mario)[0]< c.coords(ene[o][0])[0]+50 and c.coords(mario)[1]> c.coords(ene[o][0])[1] and c.coords(mario)[1]< c.coords(ene[o][0])[1]+120 and ene[o][1]!=7 and ene[o][1]!=2 and ene[o][1]!=9) or xyz==True):\r\n ene[o][1]= 2\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg1, anchor=NW)\r\n # golpear tortuga luigo\r\n if(c.coords(luigi)[0]> c.coords(ene[o][0])[0]-50 and c.coords(luigi)[0]< c.coords(ene[o][0])[0]+50 and c.coords(luigi)[1]> c.coords(ene[o][0])[1] and c.coords(luigi)[1]< c.coords(ene[o][0])[1]+100 and ene[o][1]!=7 and ene[o][1]!=2 and ene[o][1]!=9):\r\n ene[o][1]= 2\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg1, anchor=NW)\r\n # morir mario\r\n if(c.coords(mario)[0]> c.coords(ene[o][0])[0]-25 and c.coords(mario)[0]< c.coords(ene[o][0])[0]+25 and c.coords(mario)[1]> c.coords(ene[o][0])[1]-30 and c.coords(mario)[1]< c.coords(ene[o][0])[1]+30 and ene[o][1]!=2 and esm==True):\r\n esm=False\r\n c.coords(mario, 750, 665)\r\n lives-=1\r\n if(lives==4):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n if(lives==3):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n if(lives==2):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n if(lives==1):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n if(lives==0):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n c.coords(mario, -1000, -1000)\r\n # morir luigi\r\n if(c.coords(luigi)[0]> c.coords(ene[o][0])[0]-25 and c.coords(luigi)[0]< c.coords(ene[o][0])[0]+25 and c.coords(luigi)[1]> c.coords(ene[o][0])[1]-30 and c.coords(luigi)[1]< c.coords(ene[o][0])[1]+30 and ene[o][1]!=2 and esl==True):\r\n esl=False\r\n c.coords(luigi, 750, 665)\r\n livesl-=1\r\n if(livesl==4):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n if(livesl==3):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n if(livesl==2):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n if(livesl==1):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n if(livesl==0):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n c.coords(luigi, -1000, -1000)\r\n # matar enemigo y puntuacion mario\r\n if(c.coords(mario)[0]> c.coords(ene[o][0])[0]-45 and c.coords(mario)[0]< c.coords(ene[o][0])[0]+45 and c.coords(mario)[1]> c.coords(ene[o][0])[1]-30 and c.coords(mario)[1]< c.coords(ene[o][0])[1]+30 and ene[o][1]==2):\r\n puntuacion()\r\n c.delete(ene[o][0])\r\n ene.remove(ene[o])\r\n break\r\n if(c.coords(mario)[0]> c.coords(ene[o][0])[0]-45 and c.coords(mario)[0]< c.coords(ene[o][0])[0]+45 and c.coords(mario)[1]> c.coords(ene[o][0])[1] and c.coords(mario)[1]< c.coords(ene[o][0])[1]+100 and (ene[o][1]==7 or ene[o][1]==9)):\r\n puntuacion()\r\n c.delete(ene[o][0])\r\n ene.remove(ene[o])\r\n break\r\n # matar enemigo y puntuacion luigi\r\n if(c.coords(luigi)[0]> c.coords(ene[o][0])[0]-45 and c.coords(luigi)[0]< c.coords(ene[o][0])[0]+45 and c.coords(luigi)[1]> c.coords(ene[o][0])[1]-30 and c.coords(luigi)[1]< c.coords(ene[o][0])[1]+30 and ene[o][1]==2):\r\n puntuacion2()\r\n c.delete(ene[o][0])\r\n ene.remove(ene[o])\r\n break\r\n if(c.coords(luigi)[0]> c.coords(ene[o][0])[0]-45 and c.coords(luigi)[0]< c.coords(ene[o][0])[0]+45 and c.coords(luigi)[1]> c.coords(ene[o][0])[1] and c.coords(luigi)[1]< c.coords(ene[o][0])[1]+100 and (ene[o][1]==7 or ene[o][1]==9)):\r\n puntuacion2()\r\n c.delete(ene[o][0])\r\n ene.remove(ene[o])\r\n break\r\n # movimiento enemigo 1\r\n if(ene[o][1]==0):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg, anchor=NW)\r\n c.move(ene[o][0],-3-x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=1\r\n elif(ene[o][1]==1):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg2, anchor=NW)\r\n c.move(ene[o][0],-3-x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=0\r\n # moviento enemigo 2\r\n if(ene[o][1]==3):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg3, anchor=NW)\r\n c.move(ene[o][0],3+x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=4\r\n elif(ene[o][1]==4):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg4, anchor=NW)\r\n c.move(ene[o][0],3+x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=3\r\n # movimiento enemigo 3\r\n if(contene<=5): \r\n if(ene[o][1]==5):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg6, anchor=NW)\r\n c.move(ene[o][0],-3-x,-10)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=5\r\n contene+=1\r\n elif(contene>=5):\r\n if(ene[o][1]==5):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg5, anchor=NW)\r\n c.move(ene[o][0],-3-x,10)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=5\r\n contene+=1\r\n if(contene==15):\r\n contene=0\r\n # movimiento enemigo 4\r\n if(contene2<=5): \r\n if(ene[o][1]==6):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg9, anchor=NW)\r\n c.move(ene[o][0],3+x,-10)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=6\r\n contene2+=1\r\n elif(contene2>=5):\r\n if(ene[o][1]==6):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg10, anchor=NW)\r\n c.move(ene[o][0],3+x,10)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=6\r\n contene2+=1\r\n if(contene2==15):\r\n contene2=0\r\n # movimiento enemigo 5\r\n if(ene[o][1]==7):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg7, anchor=NW)\r\n c.move(ene[o][0],-9-x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n if(ene[o][1]==9):\r\n ca=c.coords(ene[o][0])[0]\r\n ca1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(ca, ca1, image=eneimg11, anchor=NW)\r\n c.move(ene[o][0],9+x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ventana.after(85-y, enem)",
"def moverluigi():\r\n global estado, mario, q, a, te, te1,p, esm, luigi, yatu, estadol, y, u, cv, esl\r\n \r\n#=========================== mover izquierda luigi ==============================#\r\n # Direccion\r\n if(yatu['a']==True and y==False):\r\n esl=True\r\n te=c.coords(luigi)[0]\r\n te1=c.coords(luigi)[1]\r\n c.delete(luigi)\r\n luigi= c.create_image(te, te1, image=luimg, anchor=NW)\r\n c.move(luigi,-3,0)\r\n ventana.update()\r\n ventana.after(2)\r\n estadol=1\r\n y=True\r\n # Animacion\r\n elif(yatu['a']==True and y==True):\r\n esl=True\r\n if(u==False):\r\n te=c.coords(luigi)[0]\r\n te1=c.coords(luigi)[1]\r\n c.delete(luigi)\r\n luigi= c.create_image(te, te1, image=luimg3, anchor=NW)\r\n c.move(luigi,-9,0)\r\n ventana.update()\r\n ventana.after(2)\r\n estadol=1\r\n u=True\r\n elif(u==True):\r\n te=c.coords(luigi)[0]\r\n te1=c.coords(luigi)[1]\r\n c.delete(luigi)\r\n luigi= c.create_image(te, te1, image=luimg, anchor=NW)\r\n c.move(luigi,-9,0)\r\n ventana.update()\r\n ventana.after(2)\r\n estadol=1\r\n u=False\r\n \r\n \r\n \r\n\r\n\r\n \r\n#=================== mover derecha luigi ======================#\r\n # Direccion\r\n elif(yatu['d']==True and y==True):\r\n esl=True\r\n te=c.coords(luigi)[0]\r\n te1=c.coords(luigi)[1]\r\n c.delete(luigi)\r\n luigi= c.create_image(te, te1, image=luimg2, anchor=NW)\r\n c.move(luigi,9,0)\r\n ventana.update()\r\n ventana.after(2)\r\n estadol=3\r\n y=False\r\n # Animacion\r\n elif(yatu['d']==True and y==False):\r\n esl=True\r\n if(u==False):\r\n te=c.coords(luigi)[0]\r\n te1=c.coords(luigi)[1]\r\n c.delete(luigi)\r\n luigi= c.create_image(te, te1, image=luimg4, anchor=NW)\r\n c.move(luigi,9,0)\r\n ventana.update()\r\n ventana.after(2)\r\n estadol=3\r\n u=True\r\n elif(u==True):\r\n te=c.coords(luigi)[0]\r\n te1=c.coords(luigi)[1]\r\n c.delete(luigi)\r\n luigi= c.create_image(te, te1, image=luimg2, anchor=NW)\r\n c.move(luigi,9,0)\r\n ventana.update()\r\n ventana.after(2)\r\n estadol=3\r\n u=False\r\n \r\n \r\n \r\n ventana.after(50-cv, moverluigi)",
"def atororhasica(self):\n if self.Index > 2:\n if (self.Linary[self.Index - 2] == skta) and (self.Linary[self.Index - 1] == ru):\n if set_memberP(self.Linary[self.Index + 1], Hasch_and_skta):\n self.Linary[self.Index - 2] = skto # PMS: Linary[Index-1=sktu; adguna\n self.deletary(self.Index - 1)\n self.Index = self.Index - 1",
"def mouvements_simple_piece(self,i,j):\r\n \r\n simples = []\r\n etat_piece = self.cases[i][j].get_etat()\r\n coul_piece = self.cases[i][j].get_coul()\r\n \r\n if coul_piece==\"blanc\":\r\n #deplacer en avant\r\n if i!=7:\r\n #deplacer a gauche\r\n if j!=0:\r\n if self.cases[i+1][j-1].est_vide():\r\n simples.append((i+1,j-1))\r\n #deplacer a droite\r\n if j!=7:\r\n if self.cases[i+1][j+1].est_vide():\r\n simples.append((i+1,j+1))\r\n #deplacer en arriere (dames seulement) \r\n if i!=0 and etat_piece==\"dame\":\r\n #deplacer a gauche\r\n if j!=0:\r\n if self.cases[i-1][j-1].est_vide():\r\n simples.append((i-1,j-1))\r\n #deplacer a droite\r\n if j!=7:\r\n if self.cases[i-1][j+1].est_vide():\r\n simples.append((i-1,j+1))\r\n \r\n \r\n elif coul_piece==\"noir\":\r\n #deplacer en avant\r\n if i!=0:\r\n #deplacer a gauche\r\n if j!=0:\r\n if self.cases[i-1][j-1].est_vide():\r\n simples.append((i-1,j-1))\r\n #deplacer a droite\r\n if j!=7:\r\n if self.cases[i-1][j+1].est_vide():\r\n simples.append((i-1,j+1))\r\n #deplacer en arriere (dames seulement)\r\n if i!=7 and etat_piece==\"dame\":\r\n #deplacer a gauche\r\n if j!=0:\r\n if self.cases[i+1][j-1].est_vide():\r\n simples.append((i+1,j-1))\r\n #deplacer a droite\r\n if j!=7:\r\n if self.cases[i+1][j+1].est_vide():\r\n simples.append((i+1,j+1))\r\n return simples",
"def anneal(self):\n\n # starting from a random path may be more effective, because it could\n # cool very quickly with a nearestneighbor\n \n \"\"\"\n\n # take in a graph and run nnTSP on the graph \n currentBest = self.nnGraph.nn_best_reversed()\n if is_valid_tour(currentBest[0]): \n # the eventual ouput of cities, but intially the input list\n citiesBest = currentBest[0]\n # the prelimary weight of the path (second index of nn_graph tuple)\n currentBest_weight = currentBest[1]\n else: \n print(\"Error: NN Tour is invalid\") \n return None\n\n \"\"\"\n # naive solution to start\n naive = nnGraph(self.cities, self.colorList, self.numcities)\n citiesBest = naive.nn_best_reversed()[0]\n # print \"What is the length of citiesBest? --> \" + str(len(citiesBest))\n currentBest_weight = self.tour_cost(citiesBest)\n starting_weight = currentBest_weight\n \n distances_current = []\n distances_best = []\n\n try:\n for iteration in range(self.maxIterations):\n # search is restarted at every iteration from the best known solution\n temperature = self.start_temp\n cities_current = citiesBest\n cities_new = citiesBest\n distance_current = currentBest_weight\n distance_new = currentBest_weight\n \n ### TEST ###\n # print \"Initialized parameters: \"\n # print \"starting temperature --> \" , temperature\n \"\"\"\n print \"iteration #\" , iteration\n print \"input list of cities (should be a list of indices 0-49) -->\" + str(cities_current)\n print \"weight of the tour above --> \" , distance_current \n \"\"\"\n\n step = 0\n while temperature > self.end_temp:\n # computing indices of the two cities to swap\n # never move the first city (??)\n index = random.sample(xrange(self.numcities-1), 2)\n # print \"indices: \" , index\n ### TEST ###\n # print \"These are the indices of cities to be swapped \" + str(index)\n # why this? not sure that we need it\n # index[0] += 1\n # index[1] += 1\n # naming the swapped cities\n cityA = index[0]\n cityB = index[1]\n\n # optimize by recomputing only the changed distances\n \n ha = random.randint(0,1)\n\n # creating a new list of the swapped cities\n if (ha > .2):\n swap_before = self.distance_swap(cities_new, cityA, cityB)\n # ensure that this swap creates a valid path, otherwise start over\n if self.is_valid_tour(swap_before) == False:\n # print \"Does this part actually run?\"\n continue\n else:\n swap_before = self.reverse_cities(cities_new, cityA, cityB)\n if self.is_valid_tour(swap_before) == False:\n continue\n\n ### TESTING TO SEE IF THIS IS THE PROBLEM\n # cities_new[cityA], cities_new[cityB] = cities_new[cityB], cities_new[cityA]\n swap_after = cities_new\n \"\"\"\n print \"Step: \" , step\n print \"before: \" , str(swap_before)\n print \"after: \" , str(swap_after)\n \"\"\"\n\n # and their costs\n ### TEST ###\n # print \"Now, cities_current and cities_new should only differ in their indices, \" + str(cityA) + \", \" + str(cityB)\n # print \"cities_current / cost --> \" , cities_current , \" / \" , self.tour_cost(cities_current)\n # print \"cities_new / cost -->\" , cities_new , \" / \" , self.tour_cost(cities_new)\n\n # compute the distance of the swapped city list\n # not exactly sure why these additions and subtractions work this way\n distance_new = self.tour_cost(swap_before)\n distance_current = self.tour_cost(swap_after)\n \"\"\"\n print \"What are distance new and distance current?\"\n print \"current: \" , distance_current\n print \"new: \" , distance_new\n \"\"\"\n # Kirkpatrick acceptance probability\n \n diff = distance_new - distance_current\n \"\"\"\n current_cost = self.tour_cost(cities_current)\n new_cost = self.tour_cost(swap_before)\n\n diff = new_cost - current_cost\n \"\"\"\n\n # print \"What is diff? --> \" , diff\n if diff < 0 or math.exp( -diff / temperature ) > random.randint(0,1):\n # print \"Does this ever execute?\"\n cities_current = swap_before\n distance_current = distance_new\n\n \"\"\"\n else:\n # no improvement and worsened result not within alpha\n distance_new = distance_current\n cities_current = cities_current[:]\n \"\"\"\n\n # update the best known if solution is an improvement\n # not for the annealing, but for restarts (in which we start\n # with the best solution known)\n if distance_current < currentBest_weight:\n citiesBest = cities_current\n currentBest_weight = distance_current\n\n # decrease temperature by alpha, increment step counter\n distances_current.append(distance_current)\n distances_best.append(currentBest_weight)\n temperature = temperature * self.alpha\n step += 1\n\n self.bestScore = currentBest_weight\n self.bestTour = citiesBest\n\n except KeyboardInterrupt, e:\n print \"Interrupted on user demand\"\n print \"performed iterations: \" + str(iteration)\n print \"current best tour: \" + str(citiesBest)\n print \"cost of current best tour: \" + str(currentBest_weight)\n\n \n return citiesBest, distances_current, distances_best, starting_weight",
"def completeTour(self, rows, cols):\n count = 1 #a deep-level counter\n value = 1 #a counter to keep trace of the value of the knight during the \"visit\"\n #using a support list we will extend the fringe of the previous calculated moves in order\n #to accomplish an entire knight's tour, keeping trace of the level of the tour from the knight\n moves = [self.get_position()]\n start = 0\n stop = len(moves)\n #if no other move is possible the while ends\n while start != stop:\n for move in moves[start:stop]:\n new_moves = self.singleMove(move, rows, cols)\n for new in new_moves:\n self._moves.append((move, new, count))\n moves += new_moves\n\n start = stop\n stop = len(moves)\n\n value += 1\n if value > self.get_value():\n value = 1\n count += 1",
"def schlafen(self):",
"def quitter_partie():\n\n # Envoie du message depart\n connexion_avec_serveur.send(\"joueur_quitte\".encode())\n return",
"def nieuw(self):\n\n print(\"Nieuw spel\")\n\n # Start alle frames op\n self._topFrame.start()\n self._midFrame.start()\n self._botFrame.start()\n\n # Zet de focus op de stapel\n self._midFrame.focus()\n\n # Maak een generator met de spelers\n self._spelers = self._topFrame.get_spelers()\n\n # Zet de eerste speler aan beurt\n self._curspeler = next(self._spelers)\n\n # Een AI moet automatisch een zet maken\n if not self._curspeler.human:\n self.zet()",
"def tuer(self):\n self.est_mort = True # TODO : ajouter une animation pour chaque type de bloc",
"def solitaire():\n agent = MinimaxAgent()\n\n board = Board()\n board.print_board()\n\n while board.actions():\n action = agent.move(board)\n board, _ = board.successor(action)\n board.print_board()",
"def manu_graph(idul):\n partie = api.débuter_partie(ARGUMENTS.idul.lower())\n jeu = qr.Quoridor([idul.lower(), \"automate\"])\n jeu.état = partie[1]\n print(jeu)\n print(\"La méthode de jeu manuelle avec affichage graphique n'a pas encore été implémentée.\")",
"def juegodeahorcado(palabra,nletras):\n dummy= \"_\"*nletras\n intentos=0\n while palabra!=dummy:\n letra= control_de_inputs(input(\"Introduzca una letra: \")).upper()\n if palabra.find(letra)>=0:\n print(\"La letra si se encuentra, muy bien!!!\\n\")\n contador = 0\n while contador <nletras:\n if palabra[contador]!=letra:\n contador+=1\n continue\n else:\n if contador == 0:\n dummy2=letra+dummy[contador+1:]\n elif contador == nletras-1:\n dummy2=dummy[:contador]+letra\n else:\n dummy2=dummy[:contador]+letra+dummy[contador+1:]\n dummy=dummy2\n contador+=1\n else:\n intentos+=1\n if intentos==4:\n print(\"\\n\\tTe quedan \"+str(5-intentos)+\" intento\")\n else:\n print(\"\\n\\tTe quedan \"+str(5-intentos)+\" intentos\")\n if intentos == 5:\n print(\"\\n\\tLo siento: Has perdido! :(\\n\\n\\tLa palabra era: \"+palabra+\"\\n\\n\")\n break\n print(\"\\n\\t\\t\"+dummy+\"\\n\\n\") \n return dummy",
"def antalNaboer(i,j):\n global verden\n naboer = 0\n if levende(i-1, j+1):\n naboer += 1\n if levende(i , j+1):\n naboer += 1\n if levende(i+1, j+1):\n naboer += 1\n if levende(i-1, j):\n naboer += 1\n if levende(i+1, j):\n naboer += 1\n if levende(i-1, j-1):\n naboer += 1\n if levende(i , j-1):\n naboer += 1\n if levende(i+1, j-1):\n naboer += 1\n return naboer",
"def wheeliewiggle(self): \n \"Wheelie\"\n print(\"wheeliewiggle activated\")\n for x in range(3):\n self.turn_to_deg(0)\n self.fwd(left=100,right=100)\n time.sleep(1)\n self.fwd(left=-100,right=-100)\n time.sleep(.2)\n self.back()\n time.sleep(.8)\n self.fwd()\n time.sleep(.5)\n # Going to the right by wiggling\n for x in range(5):\n self.turn_to_deg(80)\n self.fwd()\n time.sleep(.5)\n self.turn_to_deg(100)\n self.back()\n time.sleep(.5)\n # Returning to original position before going to the right \n for x in range(5):\n self.turn_to_deg(100)\n self.fwd()\n time.sleep(.5)\n self.turn_to_deg(80)\n self.back()\n time.sleep(.5)\n self.stop()",
"def partie_terminee(nb_joueur, temple) :\r\n assert isinstance(nb_joueur, int), \"Type nb_joueur incorrect\" # controle des types\r\n assert isinstance(temple, temple_0), \"Type temple incorrect\" # des arguments \r\n\r\n #begin\r\n #print(\"temple.un_0\",temple.un_0)#\r\n if nb_joueur==2:\r\n if temple.un_0>=2:\r\n print(\"Le joueur 1 a gagne la partie.\")\r\n return True\r\n elif temple.deux_0>=2:\r\n print(\"Le joueur 2 a gagne la partie.\")\r\n return True\r\n elif temple.trois_0>=2:\r\n print(\"Le joueur 3 a gagne la partie.\")\r\n return True\r\n elif temple.quatre_0>=2:\r\n print(\"Le joueur 4 a gagne la partie.\")\r\n return True\r\n else:\r\n return False\r\n else:\r\n if temple.un_0>=1:\r\n print(\"Le joueur 1 a gagne la partie.\")\r\n return True\r\n elif temple.deux_0>=1:\r\n print(\"Le joueur 2 a gagne la partie.\")\r\n return True\r\n elif temple.trois_0>=1:\r\n print(\"Le joueur 3 a gagne la partie.\")\r\n return True\r\n elif temple.quatre_0>=1:\r\n print(\"Le joueur 4 a gagne la partie.\")\r\n return True\r\n else:\r\n return False \r\n #end\r",
"def phase_jeu():\n\n fin_du_jeu = False\n while not fin_du_jeu:\n # on attend un message du serveur\n message = attendre_message_serveur()\n\n if message['action'] == 'attente':\n print('Le serveur ce prepare, veuillez patienter')\n elif message['action'] == 'attente_joueur':\n print('Attente de l\\'autre joueur, veuillez patienter')\n elif message['action'] == 'commence':\n pret = 'non'\n while not (pret[0].lower() == 'c'):\n print('Tape c pour commencer')\n pret = input(\"> \")\n\n #envoie tu message pret au serveur\n connexion_avec_serveur.send(\"joueur_pret\".encode())\n elif message['action'] == 'ton_tour':\n\n # on montre la carte present dans message['carte']\n print(message['carte'], end=\"\")\n\n (action, direction, deplacement) = choix_mouvement()\n\n # on creer un dictionnaire pour envoyer les donnees en une fois\n mouvement = dict()\n mouvement['action'] = action\n mouvement['direction'] = direction\n mouvement['deplacement'] = deplacement\n\n # on formate le dictionnaire pour l'envoie\n data_string = pickle.dumps(mouvement)\n\n # envoie de l'action au serveur\n connexion_avec_serveur.send(data_string)\n\n elif message['action'] == 'carte':\n # on montre la carte present dans message['carte']\n print(message['carte'], end=\"\")",
"def evolution(self):\n global vitesse\n time.sleep(0.026) # Timer\n self.position[0] += vitesse[0]\n self.position[1] += vitesse[1]\n\n vitesse[1] += 4 # Apesanteur\n vitesse[0] *= 0.99 # Frottements\n vitesse[1] *= 0.99\n self.rect.x = self.position[0] # Application au coordonnees de la balle\n self.rect.y = self.position[1]\n if self.rect.y >= 720 or self.rect.x >= 1280: # Si la balle depasse le bas/cote de l'ecran => rejoue\n restart()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Execute command on node.
|
def execute(self, command):
Execution.__log.info('Executing command on node %s..' % self.hostname)
return self._execute(command)
|
[
"def run_command(self, host, command):\n return self.remote_executor.run(host, command)",
"def execute_basic(self, node_context):\n raise NotImplementedError('execute() must be implemented')",
"def execute(self, name, command):\n if name in [\"localhost\"]:\n r = '\\n'.join(sh.sh(\"-c\", command).split()[-1:])\n else:\n r = '\\n'.join(sh.ssh(name, command).split()[-1:])\n return r",
"def execute(self, command, args=None, cwd=None, env=None):\n pass",
"def _execute(self, command):\n if isinstance(command, str):\n command = [command]\n\n process = self.ansible_cmd.cli_cmd(host=self.hostname, module='shell', args=command)\n return process",
"def pythonCommandToExecute(*args, **kwargs):\n \n pass",
"def execute(self, *cmd):\n logger = logging.getLogger(self.__class__.__name__ + \".execute\")\n\n cmdstring = ' '.join(cmd)\n logger.debug(\" Command string = %s\" % cmdstring)\n\n self.connect()\n logger.debug(\"Connected: Sending command string\")\n self._session.sendline(cmdstring)\n\n # add this if you want to suppress the command input echo\n #self._session.expect(cmdstring)\n logger.debug(\"Waiting for prompt after command\")\n self._session.prompt(timeout=30)\n response = self._session.before\n logger.debug(\"Response = '%s'\" % response)\n logger.debug(\"session buffer = %s\" % self._session.buffer)\n \n\n #result = MsaResult(self._session.before)\n #node = MsaNode(self._session.before)\n logger.debug(\"Disconnecting\")\n self.disconnect()\n # return (result, node)\n return response",
"def _run_nodetool_command(self, cmd, *args, **kwargs):\n return utils.execute('nodetool', '-h', 'localhost',\n cmd, *args, **kwargs)",
"def execute(self, cluster, commands):\n raise NotImplementedError",
"def exec_command(self, command):\n cmd_pkt = RconPacket(self.pkt_id.next(), SERVERDATA_EXECCOMMAND,\n command)\n self._send_pkt(cmd_pkt)\n resp = self.read_response(cmd_pkt, True)\n return resp.body",
"def execCommand0(self, cmd, *args):\n self.conn.execute(cmd, *args)",
"def command(self, text) -> object:\n self._logger.info(\"Executing command '\"+str(text)+\"'...\")\n response = self._send(text)\n return response.content",
"def _execute_command(self, command, env=None, cwd=None, with_shell=False):\n raise NotImplementedError",
"async def invoke(self, ctx):\n if ctx.command is not None:\n self.dispatch('command', ctx)\n try:\n if (await self.can_run(ctx, call_once=True)):\n await ctx.command.invoke(ctx)\n except CommandError as e:\n await ctx.command.dispatch_error(ctx, e)\n else:\n self.dispatch('command_completion', ctx)\n elif ctx.invoked_with:\n exc = CommandNotFound('Command \"{}\" is not found'.format(ctx.invoked_with))\n self.dispatch('command_error', ctx, exc)",
"def run_command_on_host(cmd, host_resource):\n rc, out, err = host_resource.executor().run_cmd(cmd)\n if rc:\n logger.error(\n \"Failed to run cmd: %s on: %s, err: %s\"\n % (cmd, host_resource, err)\n )\n return None\n return out",
"def do_exec(self, statement):\n\n self.remote_exec(statement.args)",
"def invoke(self, command_line=None):\n self.call_with(self.parse(command_line))",
"def controlNode(self):\n\n # Process commmands\n self.processCommands()\n\n # Check status of other formation nodes\n self.monitorFormationStatus()\n \n # Run unique node behavior\n self.executeNode() \n\n # Log data\n self.logData()",
"def executor_cli():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return OrderedDict of the groups and variables in Fortran namelist files.
|
def nmldict(nmlfnames):
if isinstance(nmlfnames, str):
nmlfnames = [nmlfnames]
nmlall = collections.OrderedDict() # dict keys are nml paths, values are Namelist dicts
for nml in nmlfnames:
nmlall[nml] = f90nml.read(nml)
if len(nmlall[nml]) == 0:
warnings.warn('{} does not contain any namelist data'.format(nml))
for nml in nmlall:
for group in nmlall[nml]:
if isinstance(nmlall[nml][group], list):
# A list indicates group is defined more than once in nml file.
# The list contains the groups in order of occurrence.
# For the nth group's values to have any effect in f90,
# the namelist needs to be read n times from the input file,
# without closing the file in between.
# If the same variable name occurs in multiple instances of
# the same group, the last read instance is used.
# Since we don't know how many times the group is read in f90,
# ignoring all but the first seems the safest option.
# TODO: provide an option to consolidate all groups in list?
warnings.warn('&{} occurs {} times in {}. Using only the first instance of this group.'.format(group, str(len(nmlall[nml][group])), nml))
nmlall[nml][group] = nmlall[nml][group][0]
return nmlall
|
[
"def generate_namelist():\n dataset_path = os.path.abspath(os.path.join(cwd, os.pardir)) + \"/data/\"\n f = h5py.File(dataset_path + \"dexnet_2_database.hdf5\", 'r')\n dataset = f['datasets']\n dataset_names = ['3dnet', 'kit']\n obj_list = np.load(os.getcwd() + \"/name_list.npy\",\n allow_pickle=True).item()\n \n for dataset_name in dataset_names:\n obj_names = obj_list[dataset_name]\n group = dataset[dataset_name][\"objects\"]\n obj_grasp_list = []\n for obj_name in obj_names:\n obj_dataset = group[obj_name]\n grasps = obj_dataset[\"grasps\"][\"yumi_metal_spline\"]\n obj_grasp_list.append((obj_name, len(grasps)))\n obj_list[dataset_name] = obj_grasp_list\n np.save(os.getcwd() + \"/name_list.npy\", obj_list)",
"def configure_groups():\n from collections import OrderedDict \n\n order = 0.0\n group_config = OrderedDict()\n\n group_config['H5F'] = {}\n group_config['H5D'] = {}\n group_config['MPIIO'] = {}\n group_config['DXT_MPIIO'] = {}\n group_config['STDIO'] = {}\n group_config['POSIX'] = {}\n group_config['DXT_POSIX'] = {}\n group_config['LUSTRE'] = {}\n\n # apply order\n for k,v in group_config.items():\n v['order'] = order\n order += 1.0\n\n return group_config",
"def _get_names(dirs):\n alphabets = set()\n label_names = {}\n for d in dirs:\n for example in _walk_omniglot_dir(d):\n alphabet, alphabet_char_id, label, _, _ = example\n alphabets.add(alphabet)\n label_name = \"%s_%d\" % (alphabet, alphabet_char_id)\n if label in label_names:\n assert label_names[label] == label_name\n else:\n label_names[label] = label_name\n label_names = [label_names[k] for k in sorted(label_names)]\n return alphabets, label_names",
"def get_names ( self ):\n return self._evars.keys()",
"def make_name_dicts() -> list[dict[str, str]]:\n names = []\n df = fjc_create.load_file(instructions.NAMES_PATH)\n df['name_perm'] = df['name_perm'].str.upper()\n df['concat_name1'] = convert_judge_name_series(\n df['year'], \n df['court_num'], \n df['name_perm'], \n dict_type = 1)\n df['concat_name2'] = convert_judge_name_series(\n df['year'], \n df['circuit_num'], \n df['name_perm'], \n dict_type = 1)\n df['concat_name3'] = convert_judge_name_series(\n df['year'], \n df['court_num'], \n df['name_perm'], \n dict_type = 2)\n df['concat_name4'] = convert_judge_name_series(\n df['year'], \n df['circuit_num'], \n df['name_perm'], \n dict_type = 2)\n names.append(df.set_index('concat_name1').to_dict()['judge_name'])\n names.append(df.set_index('concat_name2').to_dict()['judge_name'])\n names.append(df.set_index('concat_name3').to_dict()['judge_name'])\n names.append(df.set_index('concat_name4').to_dict()['judge_name'])\n names.append(df.set_index('name_perm').to_dict()['judge_name'])\n return names",
"def keys(self):\n ds = self._nc_handle\n group_keys = list(ds.groups.keys())\n var_keys = list(ds.variables.keys())\n return tuple(group_keys) + tuple(var_keys)",
"def list_variables(ckpt_dir_or_file):\n reader = load_checkpoint(ckpt_dir_or_file)\n variable_map = reader.get_variable_to_shape_map()\n names = sorted(variable_map.keys())\n result = []\n for name in names:\n result.append((name, variable_map[name]))\n return result",
"def sortVars(varnames):\n esorteddict,isorteddict = {},{}\n for varname in varnames:\n ttimes = HistoryUtil.var2Times(varname)\n if ttimes.has_key(Trace.INFECTED):\n isorteddict.setdefault(ttimes[Trace.INFECTED],set()).add(varname)\n elif ttimes.has_key(Trace.EXPOSED):\n esorteddict.setdefault(ttimes[Trace.EXPOSED],set()).add(varname) \n sortedvars = []\n for time in sorted(isorteddict.keys()):\n sortedvars.extend(isorteddict[time]) \n for time in sorted(esorteddict.keys()):\n sortedvars.extend(esorteddict[time]) \n return sortedvars",
"def createAttrDicts():\n ret = {}\n # lfw v1.1\n ret['lfw_v1.1'] = d = {}\n fields = getmodelfields('lfw_v1.1')\n for l in open('attrnames.txt'):\n num, name = l.strip().split('\\t', 1)\n if name not in fields: continue\n d[num] = d[int(num)] = d[name] = name\n return ret",
"def variable_format(var_name: str, stata_fmt: str) -> OrderedDict:\n return OrderedDict([\n ('@varname', var_name),\n ('#text', stata_fmt)\n ])",
"def get_variable_names(self) -> list:\n prefix = self.PREFIX\n data_name = self.data_name\n if isinstance(data_name, str):\n data_name = data_name.lower()\n l_var = []\n for item in self.items:\n l_var.extend(item.get_variable_names())\n l_var_out = [((prefix, data_name), ) + var for var in l_var]\n return l_var_out",
"def create_pymol_dic(babel_format, file_list):\n pymol_dic = {}\n for f_name in file_list:\n pymol_dic[f_name] = pybel.readfile(babel_format, f_name).next()\n return pymol_dic",
"def __generateDefinedNamesList(self):\n definedNames = []\n for row in range(self.dnList.topLevelItemCount()):\n itm = self.dnList.topLevelItem(row)\n name = itm.text(0).strip()\n value = itm.text(1).strip()\n if value:\n definedNames.append(\"{0}={1}\".format(name, value))\n else:\n definedNames.append(name)\n \n return definedNames",
"def __collect_names():\n for c in constants.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in u_funs.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in vars.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in lsts.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in funs.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in commands.keys():\n if not all_names.__contains__(c):\n all_names.append(c)",
"def parse_vector(file_path,pattern,var_order='rtczyx'):\n\n # validate the variable order\n val_variables(var_order)\n\n # get regular expression from file pattern\n regex, variables = get_regex(pattern)\n\n # initialize the output\n if len(variables) == 0:\n file_ind = []\n else:\n file_ind = {}\n\n # Unique values for each variable\n uvals = {key:[] for key in var_order}\n\n # Build the output dictionary\n with open(file_path,'r') as fr:\n for f in fr:\n \n # Parse filename values\n variables = parse_vector_line(f,pattern)\n\n # If the filename doesn't match the patter, don't include it\n if variables == None:\n continue\n \n # Generate the layered dictionary using the specified ordering\n temp_dict = file_ind\n if isinstance(file_ind,dict):\n for key in var_order:\n if variables[key] not in temp_dict.keys():\n if variables[key] not in uvals[key]:\n uvals[key].append(variables[key])\n if var_order[-1] != key:\n temp_dict[variables[key]] = {}\n else:\n temp_dict[variables[key]] = []\n temp_dict = temp_dict[variables[key]]\n \n # Add the file information at the deepest layer\n temp_dict.append(variables)\n\n for key in uvals.keys():\n uvals[key].sort()\n \n return file_ind, uvals",
"def get_names_c(self):\n names=['\"'+f.get_name()+'\"' for f in self.fields]\n names = '{' + ','.join(names)+'}'\n return names",
"def get_vars():\n thevars = {\n 'kernel_version': {\n 'desc': \"Exact version of the Linux kernel used in the OS\",\n 'label': 'Kernel Version',\n 'unit': '',\n 'parents': ['version']\n },\n 'user': {\n 'desc': \"User who compiled the kernel, host name where it happened\",\n 'label': 'Username, hostname',\n 'unit': '',\n 'parents': ['version']\n },\n 'gcc_version': {\n 'desc': \"Version of the GCC compiler used for building the kernel\",\n 'label': 'GCC Version',\n 'unit': '',\n 'parents': ['version']\n },\n 'os_version': {\n 'desc': \"OS version\",\n 'label': 'OS Version',\n 'unit': '',\n 'parents': ['version']\n },\n 'kernel_type': {\n 'desc': \"Type of the kernel. SMP indicates Symmetric MultiProcessing\",\n 'label': 'Kernel Type',\n 'unit': '',\n 'parents': ['version']\n },\n 'kernel_date': {\n 'desc': \"Date and time when the kernel was built\",\n 'label': 'Date of compilation',\n 'unit': '',\n 'parents': ['version']\n }\n }\n return thevars",
"def _get_groupNames(self) -> \"std::vector< std::string,std::allocator< std::string > >\" :\n return _core.Attributes__get_groupNames(self)",
"def getData(self):\n return (\n self.__generateIncludeDirectoriesList(),\n self.__generateDefinedNamesList(),\n self.__generateUndefinedNamesList(),\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inplace remove all Namelists that are the same as the previous one in nmlall. Does nothing if nml is not an OrderedDict.
|
def nmlprune(nmlall, ignore={}):
if len(nmlall) > 1:
idx = 0
while True:
# need deepcopy to avoid in-place modification by nmldiff
pair = copy.deepcopy(collections.OrderedDict(
itertools.islice(nmlall.items(), idx, idx+2)))
for group in ignore:
for var in ignore[group]:
for fn in pair:
if group in pair[fn]:
if var in pair[fn][group]:
del pair[fn][group][var]
nmldiff(pair)
if max([len(x) for x in pair.values()]) == 0:
del nmlall[list(pair.keys())[1]] # remove 2nd of pair
else:
idx += 1 # 2nd of pair is different from first, so retain it
if idx > len(nmlall)-2:
break
return nmlall
|
[
"def remove_duplicates(self):\n seen = set()\n self.nodes = [x for x in self.nodes if x not in seen and not seen.add(x)]",
"def nmldict(nmlfnames):\n if isinstance(nmlfnames, str):\n nmlfnames = [nmlfnames]\n\n nmlall = collections.OrderedDict() # dict keys are nml paths, values are Namelist dicts\n for nml in nmlfnames:\n nmlall[nml] = f90nml.read(nml)\n if len(nmlall[nml]) == 0:\n warnings.warn('{} does not contain any namelist data'.format(nml))\n for nml in nmlall:\n for group in nmlall[nml]:\n if isinstance(nmlall[nml][group], list):\n # A list indicates group is defined more than once in nml file.\n # The list contains the groups in order of occurrence.\n # For the nth group's values to have any effect in f90,\n # the namelist needs to be read n times from the input file,\n # without closing the file in between.\n # If the same variable name occurs in multiple instances of \n # the same group, the last read instance is used.\n # Since we don't know how many times the group is read in f90, \n # ignoring all but the first seems the safest option.\n # TODO: provide an option to consolidate all groups in list?\n warnings.warn('&{} occurs {} times in {}. Using only the first instance of this group.'.format(group, str(len(nmlall[nml][group])), nml))\n nmlall[nml][group] = nmlall[nml][group][0]\n return nmlall",
"def _reconcile_names(self):\n debug(\"reconcile names\")\n treed_tax = set()\n for leaf in self.tre.leaf_nodes():\n treed_tax.add(leaf.taxon)\n aln_tax = set()\n for tax, seq in self.aln.items():\n aln_tax.add(tax)\n prune = treed_tax ^ aln_tax\n missing = [i.label for i in prune]\n if missing:\n errmf = 'NAME RECONCILIATION Some of the taxa in the tree are not in the alignment or vice versa' \\\n ' and will be pruned. Missing \"{}\"\\n'\n errm = errmf.format('\", \"'.join(missing))\n sys.stderr.write(errm)\n del_aln = []\n del_tre = []\n for taxon in prune:\n assert (taxon in aln_tax) or (taxon in treed_tax)\n if taxon in aln_tax:\n # debug(taxon)\n del_aln.append(taxon)\n if taxon in treed_tax:\n del_tre.append(taxon)\n # debug(del_aln)\n # debug(del_tre)\n self.aln.remove_sequences(del_aln) \n self.tre.prune_taxa(del_tre) \n for tax in prune:\n # potentially slow at large number of taxa and large numbers to be pruned\n found = 0\n for otu in self.otu_dict:\n if self.otu_dict[otu][u'^ot:originalLabel'] == tax.label:\n self.otu_dict[otu]['^physcraper:status'] = \"deleted in name reconciliation\"\n found = 1\n if found == 0:\n sys.stderr.write(\"lost taxon {} in name reconcilliation \\n\".format(tax.label))\n self.aln.taxon_namespace.remove_taxon(tax)\n assert self.aln.taxon_namespace == self.tre.taxon_namespace\n for tax in self.aln.taxon_namespace:\n if tax.label in self.otu_dict.keys():\n pass\n else:\n found_label = 0\n match = re.match(\"'n[0-9]{1,3}\", tax.label)\n newname = \"\"\n if match:\n newname = tax.label[2:]\n newname = newname[:-1]\n for otu in self.otu_dict:\n original = self.otu_dict[otu].get(\"^ot:originalLabel\")\n if original == tax.label or original == newname:\n tax.label = otu\n found_label = 1\n if found_label == 0:\n sys.stderr.write(\"could not match tiplabel {} or {} to an OTU\\n\".format(tax.label, newname))",
"def removeAll(self):\n\t\t# Remove bookmarks in reveresed order to avoid shifting issues\n\t\tfor bookmark in reversed(self.bookmarks):\n\t\t\tself.bookmarks.remove(bookmark)\n\t\tself.titles = list()",
"def remove_known(tree: dict, brands_in_results):\n\n for subcat, brands in tqdm(tree.items()):\n\n for brand, clean_names in brands.items():\n groups = []\n for name_group in clean_names:\n names = [\n remove_subcat_brand_barcode_from_clean_names(\n name, brand, subcat, brands_in_results\n )\n for name in name_group\n ]\n names = [n for n in names if n]\n groups.append(names)\n\n tree[subcat][brand] = groups\n\n return tree",
"def remove_duplicates(lnk):\n while lnk != Link.empty:\n while lnk.rest != Link.empty and lnk.first == lnk.rest.first:\n lnk.rest = lnk.rest.rest\n lnk = lnk.rest",
"def _4_no_duplicates(self):\n self.governor_set = []\n self.duplicates_set = []\n # print(\"old\",self.loaded_governors)\n # print(\"new\",self.new_governors)\n self.places = []\n for item in self.loaded_governors:\n self.places.append(item[0])\n for governor in self.new_governors:\n if governor[0] in self.places:\n self.duplicates_set.append( governor)\n else:\n self.governor_set.append( governor)",
"def strip_overridden_assignments(reslst):\n lines_to_remove = []\n previous_assign = {}\n for p, (varname, value, rotl) in enumerate(reslst): # pylint: disable-msg=W0612\n if varname is not None:\n line = previous_assign.get(varname)\n if line is not None:\n lines_to_remove.append(line)\n previous_assign[varname] = p\n lines_to_remove.sort(reverse=True)\n for line in lines_to_remove:\n del reslst[line]\n return reslst",
"def remove(self):\n for word in self.words:\n word._mwt = None # pylint: disable=W0212\n self.root.multiword_tokens = [tok for tok in self.root.multiword_tokens if tok != self]",
"def __remove_duplicates_fast_memory_heavy(self):\n for (item, start_node) in self.distinct_map.items():\n current = start_node.next_alike\n while current is not None:\n self.__remove_node(current) \n current = current.next_alike",
"def remove_dups(ll: LinkedList) -> LinkedList:\n n = ll.head\n unique_vals = {n.data} # set literal\n output_ll = LinkedList(n.data)\n while n is not None:\n if n.data not in unique_vals:\n output_ll.append_to_tail(n.data)\n unique_vals.add(n.data)\n n = n.next\n return output_ll",
"def deduplicate_document_list(self, docs):\n docs = self.flatten_nested_dict_lists(docs)\n return [ i for n, i in enumerate(docs) if i not in docs[n + 1:] ]",
"def _RemoveDuplicateNetworks(self, network_list):\n result_list = []\n result_dict = {}\n for node, comment in network_list:\n result_dict[str(node)] = (node, comment)\n for node in result_dict:\n result_list.append(result_dict[node])\n return result_list",
"def remove_duplicates(tag_list: List[dict]) -> List[dict]:\n no_duplicates = []\n for i in tag_list:\n if i not in no_duplicates:\n no_duplicates.append(i)\n return no_duplicates",
"def remove_identical_seqs(self):\n debug(\"remove identical seqs\")\n if len(self.new_seqs_otu_id) > 0:\n if _DEBUG:\n sys.stdout.write(\"running remove identical twice in a row\"\n \"without generating new alignment will cause errors. skipping\\n\")\n return\n tmp_dict = dict((taxon.label, self.data.aln[taxon].symbols_as_string()) for taxon in self.data.aln)\n old_seqs = tmp_dict.keys()\n # Adding seqs that are different, but needs to be maintained as diff than aln that the tree has been run on\n avg_seqlen = sum(self.data.orig_seqlen) / len(self.data.orig_seqlen) # HMMMMMMMM\n assert self.config.seq_len_perc <= 1\n seq_len_cutoff = avg_seqlen * self.config.seq_len_perc\n for gb_id, seq in self.new_seqs.items():\n if gb_id.split(\".\") == 1:\n debug(gb_id)\n if self.blacklist is not None and gb_id in self.blacklist:\n debug(\"gb_id in blacklist, not added\")\n pass\n elif gb_id in self.newseqs_acc: # added to increase speed. often seq was found in another blast file\n debug(\"passed, was already added\")\n pass\n else:\n if len(seq.replace(\"-\", \"\").replace(\"N\", \"\")) > seq_len_cutoff:\n if self.config.blast_loc != \"remote\":\n tax_name = None\n # ######################################################\n # ### new implementation of rank for delimitation\n if type(self.mrca_ncbi) is int:\n mrca_ncbi = self.mrca_ncbi\n elif len(self.mrca_ncbi) == 1:\n mrca_ncbi = list(self.mrca_ncbi)[0]\n else:\n debug(self.mrca_ncbi)\n debug(\"think about something to do!\")\n rank_mrca_ncbi = self.ids.ncbi_parser.get_rank(mrca_ncbi)\n # get rank to delimit seq to ingroup_mrca\n # get name first\n if gb_id[:6] == \"unpubl\":\n debug(\"unpubl data\")\n debug(self.data.gb_dict[gb_id])\n tax_name = self.data.gb_dict[gb_id][u\"^ot:ottTaxonName\"]\n ncbi_id = self.data.gb_dict[gb_id][u\"^ncbi:taxon\"]\n if tax_name is None:\n tax_name = self.data.gb_dict[gb_id][u'^user:TaxonName']\n if ncbi_id is None:\n debug(tax_name.split(\" \")[0])\n tax_lin_name = tax_name.split(\" \")[0]\n tax_lin_name = tax_lin_name.split(\"_\")[0]\n print(tax_lin_name)\n ncbi_id = self.ids.ncbi_parser.get_id_from_name(tax_lin_name)\n # ncbi_id = 00000\n elif len(gb_id.split(\".\")) >= 2:\n if gb_id in self.data.gb_dict.keys() and 'staxids' in self.data.gb_dict[gb_id].keys():\n tax_name = self.data.gb_dict[gb_id]['sscinames']\n ncbi_id = self.data.gb_dict[gb_id]['staxids']\n else:\n tax_name = self.ids.find_name(acc=gb_id)\n if tax_name is None:\n sys.stderr.write(\"no species name returned for {}\".format(gb_id))\n ncbi_id = self.ids.map_acc_ncbi(gb_id)\n assert tax_name is not None\n assert ncbi_id is not None\n tax_name = str(tax_name).replace(\" \", \"_\")\n input_rank_id = self.ids.ncbi_parser.get_downtorank_id(ncbi_id, rank_mrca_ncbi)\n # #######################################################\n if input_rank_id == mrca_ncbi: # belongs to ingroup mrca -> add to data, if not, leave it out\n # debug(\"input belongs to same mrca\")\n self.newseqs_acc.append(gb_id)\n otu_id = self.data.add_otu(gb_id, self.ids)\n self.seq_dict_build(seq, otu_id, tmp_dict)\n else:\n self.newseqs_acc.append(gb_id)\n otu_id = self.data.add_otu(gb_id, self.ids)\n self.seq_dict_build(seq, otu_id, tmp_dict)\n old_seqs_ids = set()\n for tax in old_seqs:\n old_seqs_ids.add(tax)\n assert old_seqs_ids.issubset(tmp_dict.keys())\n for tax in old_seqs:\n del tmp_dict[tax]\n self.new_seqs_otu_id = tmp_dict # renamed new seq to their otu_ids from GI's, but all info is in self.otu_dict\n debug(\"len new seqs dict after remove identical\")\n debug(len(self.new_seqs_otu_id))\n with open(self.logfile, \"a\") as log:\n log.write(\"{} new sequences added from genbank after removing identical seq, \"\n \"of {} before filtering\\n\".format(len(self.new_seqs_otu_id), len(self.new_seqs)))\n self.data.dump()",
"def list_dedup(in_list):\n seen = set()\n return [s for s in in_list if s not in seen and not seen.add(s)]",
"def removeDuplicates(self, nodes):\n dict = {}\n [dict.setdefault(n.myLabel(),[]).append(n) for n in nodes]\n\n for label in dict.keys():\n list = dict[label]\n if len(list)>1:\n testKit = list[0]\n for kit in list:\n if not testKit.equivalent(kit):\n msg = 'Node with label %s has non-equivalent '% label\n msg += 'duplicates: removing all such nodes'\n logger.error(msg)\n [nodes.remove(n) for n in list]\n break",
"def __remove_duplicates_memory_heavy(self):\n m = {self.item: 1} \n current = self.next\n prev = self\n nodes_to_remove = []\n\n while current is not None:\n\n if current.item not in m:\n m[current.item] = 1\n else:\n nodes_to_remove.append((prev, current))\n\n prev = current \n current = current.next\n \n for i in range(len(nodes_to_remove) - 1):\n this = nodes_to_remove[i]\n nx = nodes_to_remove[i + 1] \n if this[1] == nx[0]:\n nodes_to_remove[i + 1] = (this[0], nx[1]) \n continue\n \n self.__remove(this) \n\n if len(nodes_to_remove) != 0:\n self.__remove(nodes_to_remove[-1])",
"def remove_duplicates(lnk): \n # cool problem!\n def helper(lnk):\n if lnk is Link.empty or lnk.rest is Link.empty:\n pass\n else:\n if lnk.rest.first == lnk.first:\n lnk.rest = lnk.rest.rest\n helper(lnk)\n else:\n helper(lnk.rest)\n helper(lnk)\n return lnk"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
reduces a list of DNA sequences to ones with CG content in the range (lower,upper). Verbose prints the actual content
|
def CGContent(lst,lower,upper, verbose=True):
newlist=[]
for item in lst:
cont=0
for char in item:
if char=="G" or char=="C":cont=cont+1
cont=cont/float(len(item))
if (lower<cont) & (cont<upper):
newlist=newlist+[item]
if verbose==True: print cont
return newlist
|
[
"def cleanSeq(seq, db):\n #print repr(seq)\n if seq.startswith(\"random\"):\n seq = rndSeq(800)\n lines = seq.strip().splitlines()\n #print \"<br>\"\n #print \"before fasta cleaning\", \"|\".join(lines)\n if len(lines)>0 and lines[0].startswith(\">\"):\n line1 = lines.pop(0)\n #print \"<br>\"\n #print \"after fasta cleaning\", \"|\".join(lines)\n #print \"<br>\"\n\n newSeq = []\n nCount = 0\n for l in lines:\n if len(l)==0:\n continue\n for c in l:\n if c not in \"actgACTGNn\":\n nCount +=1\n else:\n newSeq.append(c)\n seq = \"\".join(newSeq)\n\n msgs = []\n if len(seq)>MAXSEQLEN and db!=\"noGenome\":\n msgs.append(\"<strong>Sorry, this tool cannot handle sequences longer than %d bp</strong><br>Below you find the results for the first %d bp of your input sequence.<br>\" % (MAXSEQLEN, MAXSEQLEN))\n seq = seq[:MAXSEQLEN]\n if len(seq)>MAXSEQLEN_NOGENOME and db==\"noGenome\":\n msgs.append(\"<strong>Sorry, this tool cannot handle sequences longer than %d bp when specifying 'No Genome'.</strong><br>Below you find the results for the first %d bp of your input sequence.<br>\" % (MAXSEQLEN_NOGENOME, MAXSEQLEN_NOGENOME))\n seq = seq[:MAXSEQLEN_NOGENOME]\n\n if nCount!=0:\n msgs.append(\"Sequence contained %d non-ACTGN letters. They were removed.\" % nCount)\n\n return seq, \"<br>\".join(msgs)",
"def chop_dna(dna):\n read_len = 150\n max_ovl = 50\n min_coverage = 5\n out = []\n\n dna_len = len(dna)\n base_id = dna.id\n starts = []\n start = 0\n read_n = math.floor((dna_len - max_ovl)/(read_len - max_ovl))\n if read_n > 1:\n ovl_len = (read_len * read_n - dna_len)/(read_n - 1)\n else:\n ovl_len = max_ovl\n\n cnt = 0\n for i in range(read_n):\n for ii in range(min_coverage):\n if i == read_n - 1:\n out_seq = dna[int(start) : ]\n else:\n out_seq = dna[int(start) : int(start + read_len)]\n\n out_seq.id = base_id + \"_\" + str(cnt)\n out_seq.letter_annotations[\"phred_quality\"] = [40] * len(out_seq)\n out.append(out_seq)\n cnt += 1\n\n start += (read_len - ovl_len)\n\n return out",
"def align_contigs(scaffold, contigs_data, contigs_seq):\n\n #print \"scaffold:\", scaffold\n #print \"contigs_data:\", contigs_data\n #print \"contigs_seq:\", contigs_seq\n\n scaffold_list = list(scaffold)\n for cd in contigs_data:\n remapped_Ns = 0\n #print cd\n\n sequence = contigs_seq[cd[\"contig_id\"]]\n pos_initial = cd[\"contig_pos_initial\"]\n pos_final = cd[\"contig_pos_final\"]\n orientation = cd[\"orientation\"]\n\n if orientation == '+':\n #print \"orientacion +\"\n contig_position = len(sequence)-1\n scaffold_position = pos_initial + pos_final - 1\n while scaffold_position > pos_initial:\n if sequence[contig_position] == \"N\":\n scaffold_list[scaffold_position] = \"N\"\n remapped_Ns += 1\n contig_position -= 1\n scaffold_position -= 1\n\n elif orientation == '-':\n #print \"orientacion -\"\n contig_position = 0\n scaffold_position = pos_initial + pos_final - 1\n while scaffold_position > pos_initial: \n if sequence[contig_position] == \"N\":\n scaffold_list[scaffold_position] = \"N\"\n remapped_Ns += 1\n scaffold_position -= 1\n contig_position += 1\n\n return \"\".join(scaffold_list)",
"def trim_g4_chr_with_seq(base_dir):\n #base_dir='/Users/Yun/Documents/bacteria_G4/D_thermus/'\n G4_dir = base_dir + \"all_G4/\"\n if not os.path.isdir(base_dir + 'all_G4_with_seq'):\n os.mkdir(base_dir + 'all_G4_with_seq/')\n for i in os.listdir(G4_dir):\n if i.startswith('.'):\n continue ## ignore the hidden files from apple\n with open(G4_dir+i, 'r') as fp:\n lines = fp.readlines()\n newlines = []\n for line in lines:\n line = line.split('\\t')\n seq_name = line[0].split(' ')[0]\n newlines.append((seq_name, line[1], line[2], line[6].split()[0], \\\n line[4], line[5]))\n ## save as bed6 format later\n if len(newlines) > 0:\n with open(base_dir+'all_G4_with_seq/' + i, 'w') as f0:\n ## substitude GCF with GCA to match GFF files\n f0.write('\\n'.join('{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(\\\n x[0], x[1], x[2], x[3], x[4], x[5]) for x in newlines))\n else:\n continue",
"def test_gc_map_short_sequences():\n assert gc_map.gc_map('ATGACTACGT', 4, 0.4) == 'atgaCTAC'\n assert gc_map.gc_map('ATGACTACGT', 4, 0.5) == 'atgaCTAC' # Must be greater than or equal to the threshold",
"def gc_map(seq, block_size, gc_thresh):\n\n new_seq = ''\n for i in range(len(seq) // block_size):\n if gc_blocks(seq, block_size)[i] < gc_thresh:\n new_seq += seq[i*block_size:i*block_size + block_size].lower()\n else:\n new_seq += seq[i*block_size:i*block_size + block_size].upper()\n\n return new_seq",
"def to_mrna(seq):\n start_codon = \"AUG\"\n stop = [\"UAG\", \"UGA\", \"UAA\"]\n start_positions = []\n final_mrnas = []\n i = 0\n while i < len(seq) - 2:\n if seq[i:i+3] == start_codon: # At start codon\n start_positions.append(i)\n i += 3\n\n for pos in start_positions:\n mrna = \"\"\n i = pos\n is_orf = True\n while i < (len(seq)-2) and is_orf:\n if seq[i:i+3] in stop: # Stop codon reached\n is_orf = False\n final_mrnas.append(mrna)\n else:\n mrna += seq[i:i+3]\n i += 3\n\n return final_mrnas",
"def GC_content(dna):\n g = dna.count('G')\n c = dna.count('C')\n ret = (g+c)/len(dna)\n return ret",
"def collapse_gaps(tmp_file, output):\n\n print \"** Collapsing repeats around gaps **\"\n\n seq_count = 0\n collapse_count = 0\n not_collapse_count = 0\n\n # open output file\n fout = open(output, 'w')\n\n seqiter = SeqIO.parse(open(tmp_file), 'fasta')\n for seq in seqiter:\n #print \"checking\", seq.id, \"length\", len(seq.seq)\n\n seq_count = seq_count + 1\n new_seq = \"\"\n prev_gap_end = 0\n\n # find gaps and get start and end co-ords\n p = re.compile(\"N+\")\n for m in p.finditer(str(seq.seq)):\n #print \"start=\", m.start(), \"end=\", m.end()\n gap_start = m.start()\n gap_end = m.end()\n\n #print \"first N at\", gap_start + 1\n #print \"last N at\", gap_end\n\n gap_length = int(gap_end) - int(gap_start)\n\n # get 200 bases before and after the gap\n before_gap_seq = seq.seq[gap_start - 200:gap_start - 1]\n after_gap_seq = seq.seq[gap_end:gap_end + 200]\n if collapse(before_gap_seq, after_gap_seq, gap_length) == 1:\t# collapse\n # record seq from end of prev gap to start of current gap (which includes the collapsed repeat)\n new_seq = new_seq + seq.seq[prev_gap_end:gap_start]\n collapse_count = collapse_count + 1\n else:\t# don\\t collapse\n # record seq from end of prev gap to end of current gap\n new_seq = new_seq + seq.seq[prev_gap_end:gap_end]\n not_collapse_count = not_collapse_count + 1\n\n # record the prev gap end\n prev_gap_end = gap_end\n\n # add the sequence after the final gap\n new_seq = new_seq + seq.seq[prev_gap_end:]\n\n # write the new seq to a file\n fout.write(\">{0}\\n{1}\\n\".format(seq.id, new_seq))\n\n fout.close\n\n print \"DONE - {0} sequences processed, {1} collapsed, {2} not collapsed\".format(seq_count, collapse_count, not_collapse_count)",
"def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa",
"def fix_bases_mask(bases_mask,barcode_sequence):\n # Split barcode sequence string into components\n indexes = barcode_sequence.split('-')\n # Check input reads\n reads = []\n i = 0\n for read in bases_mask.split(','):\n new_read = read\n if read.startswith('I'):\n input_index_length = int(read[1:])\n try:\n actual_index_length = len(indexes[i])\n new_read = \"I%d\" % actual_index_length\n except IndexError:\n # No barcode for this read\n actual_index_length = 0\n new_read = \"\"\n if input_index_length > actual_index_length:\n # Actual index sequence is shorter so adjust\n # bases mask and pad with 'n's\n new_read = new_read + \\\n 'n'*(input_index_length-actual_index_length)\n i += 1\n reads.append(new_read)\n # Assemble and return updated index tags\n return ','.join(reads)",
"def capture_flanking_coordinates(gene_list_dict, chr_contigs):\n\n # If gene is on - strand, START from higher number and move down to end.\n # Therefore, UPSTREAM is +500 from highest end to start.\n # If gene is on + strand, start from LOWEST number and move up to end.\n # Therefore, UPSTREAM is -500 from lowest end to start.\n\n # For testing purposes, will print out 500bp upstream + whole CDS in order to\n # compare to traditional sequence retrieval techniques. Can chop off later\n # if only interested in upstream regions.\n\n # Because the chromosomal sequences are large, we will iterate through each one\n # in turn, and then search for the particular genes in gene_list_dict that need\n # to reference the given contig.\n\n\n output_handle = open(outfilename, \"w\")\n\n START_BUFFER = 500 # Number of bp to capture before transcription start\n END_BUFFER = 0 # Number of bp to capture after transcription end\n\n for seq_record in SeqIO.parse(chr_contigs, \"fasta\"):\n fasta_chr = seq_record.description\n fasta_to_gff_dict, gff_to_fasta_dict = build_fasta_to_gff_chr_names()\n gff_chr = fasta_to_gff_chr_names(fasta_to_gff_dict, seq_record.description)\n for gene in gene_list_dict:\n if gene_list_dict[gene][\"Chromosome\"] == gff_chr:\n # Create new SeqRecord\n out_handle = gene\n if gene_list_dict[gene][\"Strand\"] == \"+\":\n start_cut = int(gene_list_dict[gene][\"Start\"]) - 1 - START_BUFFER\n end_cut = int(gene_list_dict[gene][\"End\"]) + END_BUFFER\n out_sequence = seq_record.seq[start_cut:end_cut]\n elif gene_list_dict[gene][\"Strand\"] == \"-\":\n start_cut = int(gene_list_dict[gene][\"Start\"]) - 1 - END_BUFFER\n end_cut = int(gene_list_dict[gene][\"End\"]) + START_BUFFER\n out_sequence = seq_record.seq[start_cut:end_cut]\n out_sequence = out_sequence.reverse_complement()\n if only_upstream == \"True\":\n out_sequence = out_sequence[:500]\n out_record = SeqRecord(out_sequence, id= out_handle)\n SeqIO.write(out_record, output_handle, \"fasta\")\n output_handle.close()",
"def parse_nucleotides(seq, number, map_file):\n\n # Store the arguments\n current_nucleotides = seq\n i = number\n map = map_file\n # If this sequence is a contig, store its content in a new file and write the roadmap\n if(IsContig(current_nucleotides)):\n # Write the info about the current sequence in the map file\n map.write(\">Sequence\" + str(i) + \"\\t\"\n + str(len(current_nucleotides)) + \"\\n\")\n # Parse the contig\n parse_cntg(seq=current_nucleotides, number=i,\n map_file=map, folder=\".\", seq_index=1)\n # If this sequence is a scaffold, store its contigs in a new folder\n elif(IsScaffold(current_nucleotides)):\n # Create a directory and write the map\n current_dir = \"./Sequence\" + str(i)\n os.mkdir(current_dir)\n map.write(\">Sequence\" + str(i) + \"\\t\"\n + str(len(current_nucleotides)) + \"\\n\")\n # Parse the scaffold\n parse_scf(seq=current_nucleotides, folder=current_dir,\n map_file=map, seq_index=1)\n # If it is not a contig or a scaffold, it is a chromosome\n else:\n # Create a directory and write the map\n current_dir = \"./Sequence\" + str(i)\n os.mkdir(current_dir)\n map.write(\">Sequence\" + str(i) + \"\\t\"\n + str(len(current_nucleotides)) + \"\\n\")\n # Store all the subsequences separated by N-10000+ gaps in a list\n pattern = \"N\" * 10000\n super_subseq_list = re.split(pattern + \"+\", current_nucleotides)\n # Store all the N-10000+ gaps in one list, remove empty elements and those which are lower than 10.000\n super_gaps_list = re.split(\"[^N]+\", current_nucleotides)\n super_gaps_list = filter(None, super_gaps_list)\n super_gaps_list = [g for g in super_gaps_list if len(g) >= 10000]\n # If there are no super gaps, the whole sequence is like a large scaffold\n if(len(super_gaps_list) == 0):\n # All the sequence is a large scaffold\n parse_scf(seq=current_nucleotides, folder=current_dir,\n map_file=map, seq_index=1)\n # Otherwise parse each super subsequence\n else:\n # Parse each of the super subsequences\n parse_super(seq_list=super_subseq_list,\n gap_list=super_gaps_list, folder=current_dir, map_file=map)",
"def convert2aa(sequence):\r\n\r\n # sequence = \"\".join([x.upper() for x in sequence]) # converts lowercase to uppercase\r\n\r\n number_of_codons = len(sequence)/3\r\n aa_seq = []\r\n\r\n for nmbr in list(range(1, int(number_of_codons)+1)): # goes through each codon converting it to an aa\r\n\r\n if \"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3] in codon2aa:\r\n aa_seq.append(codon2aa[\"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3]])\r\n else:\r\n aa_seq.append(\"XXX\")\r\n\r\n return \"\".join(aa_seq)",
"def trim(aligned_headers_seqs):\n if not (isinstance(aligned_headers_seqs, list) and len(aligned_headers_seqs) >= 2):\n raise ValueError, \"Input does not specify at least two aligned sequences.\"\n ref_seq = aligned_headers_seqs[0].seq# str yields the sequence\n #print(ref_seq)\n # Getting the positions to strip from the start\n go=True\n i=0\n start_excess=0\n while (go==True):\n if (ref_seq[i]=='-'):\n start_excess=i # strip 0 to i\n else:\n go=False\n i=i+1\n # Getting the posisiton to remove from the end\n start_excess=start_excess+1 # slicing is inclusive on this end\n end=True\n i=len(ref_seq)-1\n end_excess=i\n print(i)\n while (end==True):\n if (ref_seq[i]=='-'):\n end_excess=i # strip 0 to i\n else:\n end=False\n i=i-1\n\n print \"%s bases taken off the 5' end\" % str(start_excess)\n print \"%s bases taken off the 3' end \" % str(len(ref_seq)-1-end_excess)\n\n\n\n samp_seq=aligned_headers_seqs[1]\n samp_seq.seq=samp_seq.seq[start_excess:end_excess]\n\n return([samp_seq,start_excess,end_excess+1]) # In a 1 base system (like R) The start will be the last base to not be exclued on the 5' and end is the last base off the end to be included.",
"def fastaextract(inputfile, fastafile):\n subsequences = {}\n\n if os.path.isfile(inputfile):\n file = open(inputfile, \"r\") \n for line in file:\n if line.startswith(\"#\"): continue\n tokens = line.strip().split(\"\\t\")\n if len(tokens) == 1:\n chrom = tokens[0]\n strand = \"+\"\n zstart = \"-1\"\n end = \"-1\"\n else:\n assert len(tokens) == 4\n chrom,strand,zstart,end = tokens\n \n assert strand in [\"+\", \"-\"], \"%s\" % line\n zstart = int(zstart)\n end = int(end)\n if chrom not in subsequences: subsequences[chrom] = []\n subsequences[chrom].append((strand,zstart,end))\n else:\n if inputfile.find(\":\") == -1:\n chrom = inputfile\n strand = \"+\"\n zstart = \"-1\"\n end = \"-1\" \n else:\n pattern = re.compile(\"(\\S+):([+,-]):(\\d+)-(\\d+)\")\n interval = pattern.match(inputfile)\n chrom,strand,zstart,end = interval.groups()\n\n assert strand in [\"+\", \"-\"]\n zstart = int(zstart)\n end = int(end)\n if chrom not in subsequences: subsequences[chrom] = []\n subsequences[chrom].append((strand,zstart,end))\n\n for k,vs in subsequences.items():\n vs.sort()\n\n records = fasta(fastafile)\n for r in records:\n s = r.fastasequence\n \n if s.name in subsequences:\n for strand,zstart,end in subsequences[s.name]:\n if strand == \"+\":\n if zstart == -1 and end == -1:\n print \">%s\" % (s.name) \n print fastasequence.prettyprint_dna(s.seq, 60)\n else:\n print \">%s:%d-%d\" % (s.name,zstart,end)\n print fastasequence.prettyprint_dna(s.seq[zstart:end], 60)\n else:\n assert zstart >= 0\n assert end >= 0\n print \">%s:%d-%d (reverse-complement)\" % (s.name,zstart,end)\n print fastasequence.prettyprint_dna(fastasequence.reverse_complement(s.seq[zstart:end]), 60)\n\n records.close()",
"def ranges_to_coverage(rngs,threads=1):\n def do_chr(rngs):\n \"\"\"do one chromosomes sorting\n :param rngs:\n :type rngs: GenomicRange[]\n \"\"\"\n #starts = sorted(range(0,len(rngs)), key=lambda x: rngs[x].start)\n #print starts\n #ends = sorted(range(0,len(rngs)), key=lambda x: rngs[x].end)\n start_events = [x.start for x in rngs]\n end_events = [x.end+1 for x in rngs]\n indexed_events = {}\n for e in start_events:\n if e not in indexed_events: indexed_events[e] = {'starts':0,'ends':0}\n indexed_events[e]['starts']+=1\n for e in end_events:\n if e not in indexed_events: indexed_events[e] = {'starts':0,'ends':0}\n indexed_events[e]['ends']+=1\n cdepth = 0\n pstart = None\n pend = None\n outputs = []\n ordered_events = sorted(indexed_events.keys())\n for loc in ordered_events:\n prev_depth = cdepth # where we were\n # see where we are before the change\n cdepth += indexed_events[loc]['starts']\n cdepth -= indexed_events[loc]['ends']\n if prev_depth > 0 and prev_depth != cdepth:\n outputs.append([rngs[0].chr,pstart,loc-1,prev_depth]) # output what was before this if we are in something\n if prev_depth != cdepth or cdepth == 0:\n pstart = loc\n #print outputs\n return outputs\n \n class Queue:\n \"\"\"Simple class to be able to use get function to retreive a value\"\"\"\n def __init__(self,val):\n self.val = [val]\n def get(self):\n return self.val.pop(0)\n\n ### START MAIN ####\n srngs = sort_genomic_ranges(rngs)\n # get the leftmost unique range\n chr = srngs[0].chr\n buffer = []\n results = []\n prelim = []\n for b in srngs:\n if b.chr != chr:\n rs = do_chr(buffer[:])\n for r in rs: \n results.append(GenomicRange(r[0],r[1],r[2]))\n results[-1].set_payload(r[3])\n buffer = []\n buffer.append(b)\n chr = b.chr\n if len(buffer) > 0:\n rs = do_chr(buffer[:])\n for r in rs: \n results.append(GenomicRange(r[0],r[1],r[2]))\n results[-1].set_payload(r[3])\n return results",
"def sequence_cleanser(self):\n non_nucleotide_characters_removed = []\n dict_results = {}\n nucleotide_sequence = self.nucleotide_sequence\n nucleotide_list = self.nucleotide_list\n try:\n nucleotide_sequence = nucleotide_sequence.upper()\n for character in nucleotide_sequence:\n if character not in nucleotide_list:\n nucleotide_sequence = nucleotide_sequence.replace(character,\"\")\n non_nucleotide_characters_removed.append(character)\n dict_results[\"cleansed_nucleotide_sequence\"] = nucleotide_sequence\n dict_results[\"non_nucleotide_characters_removed\"] = non_nucleotide_characters_removed\n self.nucleotide_sequence = nucleotide_sequence\n return(dict_results)\n except:\n return(\"There was an error clensing the sequence, please retry.\")",
"def GC_content(sequence, recLength = 5, overhang = 12, window = 3,\n\t\t\t ymax = 1, ymin = -1):\n\tGC_array = []\n\tmaxGC = 100\n\tminGC = 0\n\n\t# GC percentages\n\tfor bp in sequence:\n\t\tif bp.capitalize() in ['G', 'C']:\n\t\t\tGC_array.append(100)\n\t\telse:\n\t\t\tGC_array.append(0)\n\n\t# window weighting\n\tweights = np.repeat(1.0, window)/float(window)\n\trunningAverage = np.convolve(GC_array, weights, 'valid')\n\n\t# normalizing data\n\tnormalize = (ymax - ymin)*(runningAverage - minGC)/(maxGC - minGC) + ymin;\n\n\t# pulling out feature indecies\n\tmiddle = len(runningAverage)/2 + len(runningAverage)%2 - 1\n\tstart = middle - recLength/2\n\tend = middle + recLength/2 + 1\n\n\treturn GC_array, runningAverage[start-overhang:end+overhang], normalize[start-overhang:end+overhang]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return the key of dictionary dic given the value
|
def find_key(dic, val):
#
#
return [k for k, v in dic.iteritems() if v == val][0]
|
[
"def find_key(dic, val):\n\treturn [k for k, v in dic.items() if v == val][0]",
"def find_first_key_of_a_value(val: int, d: dict) -> str:\n\n for k, v in d.items():\n if v == val:\n return k\n\n raise IndexError(\"Variable, val, must be a key in dictionary, d\")",
"def key(dict_, key_):\n try:\n return dict_[key_]\n except KeyError as e:\n log.info(e)",
"def keyvalue(dictionary, key):\n\n return dictionary[key]",
"def __find_dict_key_from_value(self, value):\n word_key = None\n for k, v in self.__words_translation_dict.iteritems():\n try:\n if v == value:\n word_key = k\n break\n except UnicodeDecodeError, e:\n import pdb; pdb.set_trace()\n\t\tpass\n \n\n return word_key",
"def values_to_keys(self, want_value, dictio):\n for key, is_value in dictio.items():\n if (is_value == want_value):\n return key",
"def _get_matching_dict_key(d: Mapping[T, Any], k: T) -> Tuple[T, T]:\n for key in d:\n if key == k:\n return k, key\n raise ValueError(\"%s not in %s\" % (k, d))",
"def _getKeyVal(char, desasc_dict):\n val = 0\n for key in desasc_dict.keys():\n if key.find(char) != -1:\n val = desasc_dict[key]\n break \n return (val)",
"def dictfind(dictionary, element):\r\n for (key, value) in dictionary.iteritems():\r\n if element is value:\r\n return key",
"def get_case_insensitive_dict_key(d: Dict, k: str) -> Optional[str]:\n for key in d.keys():\n if k.lower() == key.lower():\n return key\n return None",
"def find_keys(dic, val):\n return [k for k, v in dic.iteritems() if v == val]",
"def _find_dict_key(input_key):\n if \"probability\" in input_key:\n # this avoids duplicate results from key matching below\n return \"probability\"\n\n matching_keys = []\n for key in DEFAULT_UNITS.keys():\n if key in input_key:\n matching_keys.append(key)\n if len(matching_keys) != 1:\n msg = (\"Name '{}' is not uniquely defined in units.py; \"\n \"matching keys: {}\")\n raise KeyError(msg.format(input_key, matching_keys))\n\n return matching_keys[0]",
"def get_pk_from_dict(_dict, key):\n try:\n return int(_dict[key])\n except (TypeError, KeyError, ValueError):\n return None",
"def reverse_lookup(dictionary, value):\n reverse_dictionary = key_mashing(dictionary)\n\n try:\n return reverse_dictionary[value]\n\n except:\n print(\"Value not in dictionary!\")",
"def keywithminval (self, d):\n v=list(d.values())\n print(v)\n k=list(d.keys())\n return k[v.index(min(v))]",
"def getKey(self, key, val=None):\n print('crusherdict.py CrusherDict.getKey()')\n try:\n f=self.safeFetch(indexName(self.name,key))\n dbkey=entryName(self.name,f)\n if(val!=None):\n self.safeStore(dbkey, (key,val))\n return dbkey\n except KeyError:\n try:\n n=self.safeFetch(countName(self.name))\n if isinstance(n, str):\n tempn = int(n)\n if str(tempn) != str(n):\n return self.getKey(key, val)\n n = int(n)\n except KeyError:\n n=0\n dbkey=entryName(self.name,n)\n self.safeStore(dbkey, (key,val))\n self.safeStore(indexName(self.name,key), n)\n self.safeStore(countName(self.name),n+1)\n return dbkey",
"def get_key_from_id(species_id):\n definition = _get_by_id(species_id)\n return definition['key']",
"def get_key(struc):\r\n\r\n if not struc:\r\n return None\r\n\r\n key = struc.string_key()\r\n if not key:\r\n return None\r\n if key is True:\r\n key = struc.label\r\n else:\r\n key = struc.label + '/' + key\r\n return key",
"def get_label(value, dictionary):\n is_in_dictionary = False\n for v in dictionary.values():\n if numpy.array_equal(v, value):\n is_in_dictionary = True\n\n if is_in_dictionary:\n return [\n k for k, v in dictionary.iteritems() if numpy.array_equal(v, value)\n ][0]\n else:\n return 'Not a root: {}'.format(value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
sort a dictionary's values by the order of the keys in it.
|
def sortedDictValues(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
|
[
"def sort_dictionary(dct):\r\n key_list = []\r\n for i in range(0, len(dct)):\r\n dct[dct.keys()[i]].sort()\r\n key_list.append(dct.keys()[i])\r\n key_list.sort()\r\n return key_list",
"def sort_dict_by_keys(adict):\n return collections.OrderedDict(sorted(adict.items(), key=lambda t: t[0]))",
"def dict_items_sorted(d):\n result = []\n sorted_keys = sorted(list(d.keys()))\n for k in sorted_keys:\n result.append((k, d[k]))\n return result",
"def sort_dictionary_by_desc_value(dictionary):\n return {k: v for k, v in sorted(dictionary.items(), key=lambda item: item[1], reverse=True)}",
"def sort_dict(dict_= Dict[str, int]):\n alpha_sorted_dict = {}\n keylist = list(dict_.keys())\n keylist.sort()\n for key in keylist:\n alpha_sorted_dict[key] = dict_[key]\n\n score_sorted_dict = {}\n list_ = [alpha_sorted_dict[key] for key in alpha_sorted_dict]\n list_.sort(reverse=True)\n for score in list_:\n for key in alpha_sorted_dict:\n if score == alpha_sorted_dict[key]:\n score_sorted_dict[key] = score\n\n return score_sorted_dict",
"def sort_by_value(d):\n return sorted(d.iteritems(), key=lambda (k, v): (v, k), reverse=True)",
"def sort_dictionary_vals(predictions_dict):\n # {'n001.jpg': [('dog', 0.5),...], ...}\n val_sorted_dict = {}\n for key, cate_score_pairs in predictions_dict.items():\n val_sorted_dict[key] = sorted(cate_score_pairs, key=lambda x: x[1], reverse=True)\n return val_sorted_dict",
"def sortByValues(self):\n\t\tself._dKeys = sorted(self._dKeys, key=lambda tupl: self._dValues[tupl[1]])\n\t\treturn self",
"def sort_dict_values(family_dict: Dict[str, List[str]]) -> Dict[str, List[str]]:\n\n for last_name in family_dict:\n family_dict[last_name].sort()\n\n return family_dict",
"def sort_by_value(dct, reverse=True):\n return sorted(dct.items(), key=operator.itemgetter(1), reverse=reverse)",
"def _sorted(dictionary):\n d = dictionary.copy()\n case_insensitive = lambda k: str.lower(k) if isinstance(k, str) else k\n nonevalue = d.pop(None) if None in d else None\n values = [d[key] for key in sorted(d.keys(), key=case_insensitive)]\n if nonevalue:\n values.insert(0, nonevalue)\n return values",
"def _ordering(names, dict_):\n if not names:\n return sorted(dict_)\n else:\n out, d = [], dict_.copy()\n for name in names:\n if name in dict_: # add names only if present in dict_\n out.append(name)\n d.pop(name)\n return out + sorted(d) # append the rest of dict_ keys",
"def sortByKeys(self):\n\t\tself._dKeys = sorted(self._dKeys, key=lambda tupl: tupl[0])\n\t\treturn self",
"def sort_dict_by_key(func):\n copy_dict(func)\n\n # On my machine at least, this dict does not look sorted\n original_dict = {'a': 0, 's': 1, 'd': 2, 'f': 3}\n sorted_dict = func(original_dict)\n\n # Iteration is now ordered\n assertEqual(sorted_dict.items(), [('a', 0), ('d', 2), ('f', 3), ('s', 1)])\n\n # Larger test\n sorted_keys = list(itertools.product(string.ascii_lowercase, string.ascii_lowercase))\n shuffled_keys = list(sorted_keys)\n for i in xrange(10):\n random.shuffle(shuffled_keys)\n original_dict = dict(itertools.izip(shuffled_keys, itertools.count()))\n sorted_dict = func(original_dict)\n assertEqualIters(sorted_keys, sorted_dict.iterkeys())",
"def arrange(l: Dict[str, List[str]]) -> None:\n for key in l:\n l[key].sort()",
"def print_sorted_dictionary(my_dict):\n sorted_keys = sorted(my_dict)\n\n for k in sorted_keys:\n print(\"{}: {}\".format(k, my_dict[k]))",
"def canonsort_items(dict1, canonical_order=None):\r\n return [(k, dict1[k]) for \\\r\n k in canonsort_keys(dict1.keys(), canonical_order)]",
"def realign_dict_to_list(dict):\n sorted_list = []\n od_fp = sorted(dict.keys(), cmp=p_cmp)\n for index in od_fp:\n sorted_list.append(dict[index])\n return sorted_list",
"def sort_counts(counts_dict):\n return sorted(counts_dict.items(), key=lambda item: item[1], reverse=True)",
"def sort_by_frequency(frequency_dict):\n frequency_list = []\n for key, value in frequency_dict.items():\n frequency_list.append({\n 'key':key,\n 'count':value\n })\n return sorted(frequency_list,key=lambda r:r['count'],reverse=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
passes a DNA sequence to the external MELTING Java app and returns the approximate melting temperature, entropy and enthalpy. Default values for Mg++ and DNA concentration can be adjusted as well as the location of the melting5.jar
|
def TMelt(string, mg="12.5e-3", conc="5e-7", meltdir=testdir+'../MELTING5.0.3/executable', comp='', verbose=False):
thisdir=os.getcwd()
#os.chdir(meltdir)
#if os.path.exists('melttemp.txt'): os.remove('melttemp.txt')
if len(comp) >0: os.system('melting -S%s -C%s -G%s -P%s -Hdnadna -Omelttemp.txt'%(string,comp[::-1],mg,conc))
#os.system('java -jar melting5.jar -S %s -C %s -E Mg=%s -P %s -H dnadna -O melttemp.txt'%(string,comp[::-1],mg,conc))
else:os.system('melting -S%s -G%s -P%s -Hdnadna -Omelttemp.txt'%(string,mg,conc))
#os.system('java -jar melting5.jar -S %s -E Mg=%s -P %s -H dnadna -O melttemp.txt'%(string,mg,conc))
f=open('melttemp.txt')
dat=f.read()
f.close()
if verbose:
print os.getcwd()
print 'melting -S%s -G%s -P%s -Hdnadna -Omelttemp.txt'%(string,mg,conc)
print dat
dat=dat.replace(',','')
if dat.find('SEVERE') > -1: res={'temp':-300,'S':-1,'H':-1}
else:
res={}
se=re.finditer(': .*? J.mol', dat)
res['H']=float(se.next().group(0)[2:-6])
res['S']=float(se.next().group(0)[2:-6])
res["temp"]=float(re.search('temperature: .*? deg C', dat).group(0)[13:-6])
os.chdir(thisdir)
if os.path.exists('melttemp.txt'): os.remove('melttemp.txt')
return res
|
[
"def getMRna(self, sequence):\n assert(self.chromosomeInterval.chromosome == sequence.name)\n assert(self.chromosomeInterval.stop <= sequence.getLength())\n s = ''\n # chromosome ttttTTTTTTTTTTTtttt t: thin T: THICK\n # exon eeeeee eeee eee\n # mrna mmmm mmmm m\n for e in self.exons:\n if self.thickStart < e.start and e.stop < self.thickEnd:\n # squarely in the CDS\n s += sequence.sliceSequence(e.start, e.stop)\n elif (e.start <= self.thickStart and e.stop < self.thickEnd\n and self.thickStart < e.stop):\n # thickStart marks the start of the mRNA\n s += sequence.sliceSequence(self.thickStart, e.stop)\n elif e.start <= self.thickStart and self.thickEnd <= e.stop:\n # thickStart and thickEnd mark the whole mRNA\n s += sequence.sliceSequence(self.thickStart, self.thickEnd)\n elif (self.thickStart < e.start and self.thickEnd <= e.stop\n and e.start < self.thickEnd):\n # thickEnd marks the end of the mRNA\n s += sequence.sliceSequence(e.start, self.thickEnd)\n if not self.chromosomeInterval.strand:\n s = reverseComplement(s)\n return s",
"def mlats(self):\n self.mlatA, self.mlongA, self.mltA = self.mlat(self.latA, self.longA, self.altA/1000, self.stamps)\n self.mlatB, self.mlongB, self.mltB = self.mlat(self.latB, self.longB, self.altB/1000, self.stamps)\n self.mlatC, self.mlongC, self.mltC = self.mlat(self.latC, self.longC, self.altC/1000, self.stamps)",
"def main():\n\n # Set up basic logging\n logging.basicConfig(filename=\"EmbedMotif.log\",filemode=\"w\",\n level=logging.INFO,format=\"%(levelname)s: %(asctime)s: %(message)s\")\n logging.info(\"Starting EmbedMotif\")\n\n # Process command line arguments\n argparser = argparse.ArgumentParser(\n description=\"Generate sequences with an embedded motif.\")\n argparser.add_argument(\"FASTAfile\",type=str,help=\"Output FASTA file\")\n argparser.add_argument(\"-k\",dest=\"k\",type=int,default=6,\n help=\"Motif length\")\n argparser.add_argument(\"-d\",dest=\"d\",type=int,default=0,\n help=\"Number of don't cares\")\n argparser.add_argument(\"-n\",dest=\"n\",type=int,default=10,\n help=\"Number of sequences\")\n argparser.add_argument(\"-m\",dest=\"m\",type=int,default=250,\n help=\"Average sequence length\")\n argparser.add_argument(\"--mu\",dest=\"mu\",type=float,default=0.20,\n help=\"Probability of nucleotide mutation\")\n argparser.add_argument(\"-a\",\"-A\",dest=\"A\",type=float,default=0.25,\n help=\"Target probability for amino acid A\")\n argparser.add_argument(\"-c\",\"-C\",dest=\"C\",type=float,default=0.25,\n help=\"Target probability for amino acid C\")\n argparser.add_argument(\"-g\",\"-G\",dest=\"G\",type=float,default=0.25,\n help=\"Target probability for amino acid G\")\n args = argparser.parse_args()\n logging.info(\"Arguments sucessfully parsed\")\n FASTAfile = args.FASTAfile\n out_file = open(FASTAfile,mode=\"w\")\n logging.info(\"Output FASTA file is \" + FASTAfile)\n k = args.k\n logging.info(\"Motif length is \" + str(k))\n if k <= 1:\n raise ValueError(\"k must be at least 2\")\n d = args.d\n logging.info(\"Number of don't cares is \" + str(d))\n if d < 0 or d > k-2:\n raise ValueError(\"d must be between 0 and k-2\")\n n = args.n\n logging.info(\"Number of sequences is \" + str(n))\n if n <= 1:\n raise ValueError(\"n must be at least 2\")\n m = args.m\n logging.info(\"Average sequence length is \" + str(m))\n if math.ceil(0.75 * m) < k:\n raise ValueError(\"k cannot exceed 0.75*m\")\n mu = args.mu\n logging.info(\"Probability of nucleotide mutation is \" + str(mu))\n if mu < 0.0:\n raise ValueError(\"mu must be non-negative\")\n A = args.A\n logging.info(\"Probability of A is \" + str(A))\n if A <= 0.0 or A >= 1.0:\n raise ValueError(\"A must be strictly between 0 and 1\")\n C = args.C\n logging.info(\"Probability of C is \" + str(C))\n if C <= 0.0 or C >= 1.0:\n raise ValueError(\"C must be strictly between 0 and 1\")\n G = args.G\n logging.info(\"Probability of G is \" + str(G))\n if G <= 0.0 or G >= 1.0:\n raise ValueError(\"G must be strictly between 0 and 1\")\n T = 1 - (A+C+G)\n logging.info(\"Probability of T is \" + str(T))\n if T <= 0.0:\n raise ValueError(\"A+C+G must be strictly less than 1\")\n\n # Generate set of sequences with embedded motif\n S = EmbedMotif(k,d,n,m,mu,A,C,G,T)\n\n # Counting nucleotides generates some basic statistics\n count = {nucleotide:0.0 for nucleotide in [\"A\",\"C\",\"G\",\"T\"]}\n # Output FASTA file while accumulating statistics\n for i in range(len(S)):\n sequence = S[i]\n print(\">Sequence{0} length {1}\".format(i+1,len(sequence)),\n file=out_file)\n print(\"\\n\".join(wrap(sequence)),file=out_file)\n for nucleotide in sequence:\n count[nucleotide] += 1.0\n total = 0.0\n for nucleotide in [\"A\",\"C\",\"G\",\"T\"]:\n logging.info(\"Count of {0}'s is \".format(nucleotide)\n + str(int(count[nucleotide])))\n total += count[nucleotide]\n logging.info(\"Total nucleotide count is \" + str(int(total)))\n probability = {x:count[x]/total for x in [\"A\",\"C\",\"G\",\"T\"]}\n for nucleotide in [\"A\",\"C\",\"G\",\"T\"]:\n logging.info(\"Probability of {0} is \".format(nucleotide)\n + str(probability[nucleotide]))",
"def _get_article_values(self):\n # https://arxiv.org/pdf/hep-lat/0510074.pdf\n gb1_Mr0 = 6.25\n gb1_Mr0_error = 0.06\n gb1_Mr0_syserror = 0.06\n gb1_M = 2.560\n gb1_M_error = 0.035\n gb1_M_syserror = 0.120\n gb1_label = r\"Chen et al.\"\n gb1_color = \"#ff7f00\"\n gb1_ls = \"--\"\n\n # https://arxiv.org/pdf/1409.6459.pdf\n gb2_M = 2.563\n gb2_M_error = 0.034\n gb2_label = r\"Chowdhury et al.\"\n gb2_color = \"#ffff33\"\n gb2_ls = \"-.\"\n\n # https://arxiv.org/pdf/hep-lat/9901004.pdf\n gb3_M = 2.590\n gb3_M_error = 0.040\n gb3_M_syserror = 0.130\n gb3_Mr0 = 6.33\n gb3_Mr0_error = 0.07\n gb3_Mr0_syserror = 0.06\n gb3_label = r\"Morningstar et al.\"\n gb3_color = \"#a65628\"\n gb3_ls = \":\"\n\n eff_masses = []\n if self.meff_plot_type == \"m\":\n print \"No data for 'm'. Continuing.\"\n\n elif self.meff_plot_type == \"ma\":\n eff_masses.append({\n \"mass\": gb1_M,\n \"mass_error\": gb1_M_error,\n \"label\": gb1_label,\n \"color\": gb1_color,\n \"ls\": gb1_ls,\n })\n eff_masses.append({\n \"mass\": gb2_M,\n \"mass_error\": gb2_M_error,\n \"label\": gb2_label,\n \"color\": gb2_color,\n \"ls\": gb2_ls,\n })\n eff_masses.append({\n \"mass\": gb3_M,\n \"mass_error\": gb3_M_error,\n \"label\": gb3_label,\n \"color\": gb3_color,\n \"ls\": gb3_ls,\n })\n\n elif self.meff_plot_type == \"r0ma\":\n eff_masses.append({\n \"mass\": gb1_Mr0,\n \"mass_error\": gb1_Mr0_error,\n \"label\": gb1_label,\n \"color\": gb1_color,\n \"ls\": gb1_ls,\n })\n eff_masses.append({\n \"mass\": gb3_Mr0,\n \"mass_error\": gb3_Mr0_error,\n \"label\": gb3_label,\n \"color\": gb3_color,\n \"ls\": gb3_ls,\n })\n\n return eff_masses",
"def calibrate(self):\n\t\tself.stats = []\n\t\t#Work around the fact that hmmsim demands an amino alphabet\n\t\th = copy.deepcopy(self)\n\t\tif self.alpha.upper() != 'AMINO':\n\t\t\th.alph = h.alphabet = h.alpha = 'amino'\n\t\t\th.K = len(ALPHABETS['AMINO'])\n\t\t\tl5 = math.log(5)\n\t\t\tdef n(p):\n\t\t\t\ttry:\n\t\t\t\t\treturn p + 5\n\t\t\t\texcept TypeError:\n\t\t\t\t\treturn '*'\n\t\t\tfor state in h.states:\n\t\t\t\t#ATGC expanded to fill 1/4 of the amino space each, normalizing so that\n\t\t\t\t#they still sum to one \n\t\t\t\tif state.me:\n\t\t\t\t\tstate.me = (\n\t\t\t\t\t\t\t[n(state.me[0])] * 5 +\n\t\t\t\t\t\t\t[n(state.me[1])] * 5 +\n\t\t\t\t\t\t\t[n(state.me[2])] * 5 + \n\t\t\t\t\t\t\t[n(state.me[3])] * 5)\n\t\t\t\tstate.ie = (\n\t\t\t\t\t\t[n(state.ie[0])] * 5 +\n\t\t\t\t\t\t[n(state.ie[1])] * 5 +\n\t\t\t\t\t\t[n(state.ie[2])] * 5 + \n\t\t\t\t\t\t[n(state.ie[3])] * 5)\n\n\t\ts = StringIO()\n\t\twrite(h,s)\n\t\ts = s.getvalue()\n\n\t\t#Call hmmsim 3 times\n\t\tfor switch,stat in [('--vit', 'VITERBI'), ('--fwd', 'FORWARD'), \n\t\t\t\t('--msv', 'MSV')]:\n\t\t\tp = Popen(['hmmsim',switch,'-',], stdin=PIPE, stdout=PIPE, stderr=PIPE)\n\t\t\t(stdout, stderr) = p.communicate(s)\n\t\t\tif p.returncode != 0:\n\t\t\t\traise RuntimeError('hmmsim error: ' + stderr)\n\n\t\t\tfor line in stdout.split('\\n'):\n\t\t\t\tif len(line.strip()) <= 0 or line.strip()[0] == '#':\n\t\t\t\t\tcontinue\n\t\t\t\tl = line.split()\n\t\t\t\tself.stats.append(('LOCAL', stat, float(l[2]), float(l[3])))",
"def get_tdwarf_mag():\n unicorn.catalogs.read_catalogs()\n from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit\n \n object = 'AEGIS-3-G141_00195'\n ra = phot.x_world[phot.id == object][0]\n dec = phot.y_world[phot.id == object][0]\n m140 = phot.mag_f1392w[phot.id == object][0]\n \n nmbs_cat, nmbs_zout, nmbs_fout = unicorn.analysis.read_catalogs(root=object)\n dr = np.sqrt((nmbs_cat.ra-ra)**2*np.cos(dec/360*2*np.pi)**2+(nmbs_cat.dec-dec)**2)*3600.\n h1mag = 25-2.5*np.log10((nmbs_cat.H1*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])\n h2mag = 25-2.5*np.log10((nmbs_cat.H2*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])\n hmag = 25-2.5*np.log10(((nmbs_cat.H1+nmbs_cat.H2)/2.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])\n jmag = 25-2.5*np.log10(((nmbs_cat.J2+nmbs_cat.J3)/2.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])\n jmag = 25-2.5*np.log10(((nmbs_cat.J3)/1.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])\n \n wirds = catIO.Readfile('/Users/gbrammer/research/drg/PHOTZ/EAZY/WIRDS/WIRDS_D3-95_Ks_ugrizJHKs_141927+524056_T0002.cat.candels')\n dr = np.sqrt((wirds.ra-ra)**2*np.cos(dec/360.*2*np.pi)**2+(wirds.dec-dec)**2)*3600.\n jwirds = wirds.jtot[dr == dr.min()][0]\n hwirds = wirds.htot[dr == dr.min()][0]\n \n print ' J H J-H H1 H2'\n print 'NMBS %5.2f %5.2f %5.2f %5.2f %5.2f' %(jmag, hmag, jmag-hmag, h1mag, h2mag)\n print 'WIRDS %5.2f %5.2f %5.2f' %(jwirds, hwirds, jwirds-hwirds)\n \n #### Vrba et al. (2004)\n #absH = np.array([14.52,14.78,15.07])\n #d =",
"def calculate_t():\n Data = pd.read_csv(path_csv) # Read Gaia query results\n \n # Focus on the indices that we are interested in:\n Data1 = Data.loc[ : , ['ra', 'dec', 'pmra', 'pmdec', 'pm', 'dist_arcsec', 'dist_to_host', 'z_pc', 'designation', 'ruwe']]\n \n # location and proper motion information of the host star(Gaia DR3 epoch)\n x0 = Data1.iloc[0, 0] # Right Ascension\n y0 = Data1.iloc[0, 1] # Declination\n cos_DEC0 = np.cos( np.radians( y0 ) ) # Cosine value of the declination\n pmra0 = Data1.iloc[0, 2] # Proper Motion in Right Ascension direction times cosine value of the declination\n pmdec0 = Data1.iloc[0, 3] # Proper Motion in Declination diretion\n z0 = Data1.iloc[0, 7] # the parallax of the host star (unit: pc)\n \n # location and proper motion information of all stars\n DEC = np.array( Data1['dec'] ) # Declination\n cos_DEC = np.cos( np.radians( DEC ) ) # Cosine value of the declination\n RA = np.array( Data1['ra'] ) # Right Ascension\n ID = np.array( Data1['designation'] ) # Gaia ID of all stars\n RUWE = np.array( Data1['ruwe'] ) # the renormalized unit weight error of all stars\n\n # relative Right Ascension and Declination between the host star and all neighboring stars(Gaia DR3 epoch)\n unit_c = 3600 * 1000 # Unit conversion:deg. to mas\n RA_r = ( RA - x0 ) * unit_c # unit: mas\n Dec_r = ( DEC - y0 ) * unit_c # unit: mas\n\n # relative proper motion between the host star and all neighboring stars(Gaia DR3 epoch)\n PMRA_r = np.array( Data1['pmra']/cos_DEC - pmra0/cos_DEC0 ) # relative Proper Motion in Right Ascension direction (unit: mas/yr)\n PMDEC_r = np.array( Data1['pmdec'] - pmdec0 ) # relative Proper Motion in Declination direction (unit: mas/yr)\n \n # calculate the closest approach distance\n # refer to Equation (B10) in paper for a, b, and c\n a = PMDEC_r\n b = - PMRA_r\n c = PMRA_r * Dec_r - PMDEC_r * RA_r\n \n f_mas = abs( c )/( ( a ** 2 + b ** 2 ) ** 0.5 ) # The distance of the closest approach (unit: mas)\n f_deg = f_mas/unit_c # Unit conversion:mas to deg\n d_pc = z0 * np.radians( f_deg ) * au.pc # Unit conversion:deg to pc\n d_au = d_pc.to( au.au ) # unit: au\n \n # Right Ascension of neighboring stars at the closest distance\n alpha = ( - a * c )/( a ** 2 + b ** 2 ) # unit: mas\n \n # the time of the closest encounter\n # refer to Equation (B12) in paper for tt\n t = - ( ( RA_r - alpha )/PMRA_r ) # unit: year\n \n # the radius of protoplanetary disk\n r = float( sys.argv[2] ) * au.AU\n \n Data0 = pd.DataFrame( {'designation':ID,'dd/au':d_au,'t/yr':t,'ra/deg':RA, 'dec/deg':DEC,'RUWE':RUWE} )\n \n # limit the time t within past t_traceback years\n # and limit the distance d within 10 times protoplanetary disk radius\n Data00 = Data0[( t <= 0 )&( t > -t_traceback )&( d_au < 10 * r )]\n \n # the number of stars that meet the constraints\n num = Data00.index.tolist()\n \n #return these values\n # num[0]: the row number in Gaia query result .csv file that meets the constraints\n # t[num[0]]: closest encounter time that meets the constraints\n \n return num[0],t[num[0]]",
"def calc_motive(self):\n # For brevity, \"dimensionless\" prefix omitted from \"position\" and \"motive\" variable names.\n \n self[\"motive_data\"] = {}\n self[\"motive_data\"][\"dps\"] = DimensionlessLangmuirPoissonSoln()\n\n self[\"motive_data\"][\"spclmbs_max_dist\"] = self.calc_spclmbs_max_dist()\n self[\"motive_data\"][\"saturation_pt\"] = self.calc_saturation_pt()\n self[\"motive_data\"][\"virt_critical_pt\"] = self.calc_virt_critical_pt()\n\n if self.calc_output_voltage() < self[\"motive_data\"][\"saturation_pt\"][\"output_voltage\"]:\n # Accelerating mode.\n self[\"motive_data\"][\"max_motive_ht\"] = self[\"Emitter\"].calc_barrier_ht()\n elif self.calc_output_voltage() > self[\"motive_data\"][\"virt_critical_pt\"][\"output_voltage\"]:\n # Retarding mode.\n self[\"motive_data\"][\"max_motive_ht\"] = self[\"Collector\"].calc_barrier_ht()\n else:\n # Space charge limited mode.\n output_current_density = optimize.brentq(self.output_voltage_target_function,\\\n self[\"motive_data\"][\"saturation_pt\"][\"output_current_density\"],\\\n self[\"motive_data\"][\"virt_critical_pt\"][\"output_current_density\"])\n \n barrier = physical_constants[\"boltzmann\"] * self[\"Emitter\"][\"temp\"] * \\\n np.log(self[\"Emitter\"].calc_saturation_current_density()/output_current_density)\n self[\"motive_data\"][\"max_motive_ht\"] = barrier + self[\"Emitter\"].calc_barrier_ht()",
"def getJ():\r\n ge=1.9985\r\n muB=cp.e*cp.hbar/(2*cp.m_e) #9.27e-24\r\n J=cp.mu_0*ge**2 * muB**2/(4*np.pi)/cp.hbar #in units of frequency\r\n return J",
"def get_fit_instructions():\n\n dust = {}\n dust[\"type\"] = \"CF00\"\n dust[\"eta\"] = 2.\n dust[\"Av\"] = (0., 8.0)\n dust[\"n\"] = (0.3, 1.5)\n dust[\"n_prior\"] = \"Gaussian\"\n dust[\"n_prior_mu\"] = 0.7\n dust[\"n_prior_sigma\"] = 0.3\n\n nebular = {}\n nebular[\"logU\"] = -3.\n\n dblplaw = {}\n dblplaw[\"massformed\"] = (0., 13.)\n dblplaw[\"metallicity\"] = (0.01, 2.5)\n dblplaw[\"metallicity_prior\"] = \"log_10\"\n dblplaw[\"alpha\"] = (0.01, 1000.)\n dblplaw[\"alpha_prior\"] = \"log_10\"\n dblplaw[\"beta\"] = (0.01, 1000.)\n dblplaw[\"beta_prior\"] = \"log_10\"\n dblplaw[\"tau\"] = (0.1, 15.)\n\n noise = {}\n noise[\"type\"] = \"GP_exp_squared\"\n noise[\"scaling\"] = (0.1, 10.)\n noise[\"scaling_prior\"] = \"log_10\"\n noise[\"norm\"] = (0.0001, 1.)\n noise[\"norm_prior\"] = \"log_10\"\n noise[\"length\"] = (0.01, 1.)\n noise[\"length_prior\"] = \"log_10\"\n\n calib = {}\n calib[\"type\"] = \"polynomial_bayesian\"\n\n calib[\"0\"] = (0.5, 1.5)\n calib[\"0_prior\"] = \"Gaussian\"\n calib[\"0_prior_mu\"] = 1.\n calib[\"0_prior_sigma\"] = 0.25\n\n calib[\"1\"] = (-0.5, 0.5)\n calib[\"1_prior\"] = \"Gaussian\"\n calib[\"1_prior_mu\"] = 0.\n calib[\"1_prior_sigma\"] = 0.25\n\n calib[\"2\"] = (-0.5, 0.5)\n calib[\"2_prior\"] = \"Gaussian\"\n calib[\"2_prior_mu\"] = 0.\n calib[\"2_prior_sigma\"] = 0.25\n\n fit_instructions = {}\n fit_instructions[\"dust\"] = dust\n fit_instructions[\"dblplaw\"] = dblplaw\n fit_instructions[\"noise\"] = noise\n fit_instructions[\"calib\"] = calib\n fit_instructions[\"nebular\"] = nebular\n fit_instructions[\"redshift\"] = (0., 10.)\n fit_instructions[\"t_bc\"] = 0.01\n fit_instructions[\"veldisp\"] = (40., 400.)\n fit_instructions[\"veldisp_prior\"] = \"log_10\"\n\n return fit_instructions",
"def genMET(self):\n metx = 0.\n mety = 0.\n for mp in self.genParts:\n if mp.particle.status()==1:\n pdgId = abs(mp.particle.pdgId())\n if pdgId==1000022 or pdgId==12 or pdgId==14 or pdgId==16:\n pt = mp.particle.pt()\n phi = mp.particle.phi()\n metx += pt*math.cos(phi)\n mety += pt*math.sin(phi)\n return MyMET(metx,mety)",
"def main():\n #\n # initialize variables\n #\n version_num=pmag.get_version()\n orient_file,samp_file = \"orient\",\"er_samples.txt\"\n args=sys.argv\n dir_path,out_path='.','.'\n default_outfile = True\n #\n #\n if '-WD' in args:\n ind=args.index('-WD')\n dir_path=args[ind+1]\n if '-OD' in args:\n ind=args.index('-OD')\n out_path=args[ind+1]\n if \"-h\" in args:\n print(main.__doc__)\n sys.exit()\n if \"-F\" in args:\n ind=args.index(\"-F\")\n orient_file=sys.argv[ind+1]\n default_outfile = False\n if \"-f\" in args:\n ind=args.index(\"-f\")\n samp_file=sys.argv[ind+1]\n orient_file=out_path+'/'+orient_file\n samp_file=dir_path+'/'+samp_file\n #\n # read in file to convert\n #\n ErSamples=[]\n Required=['sample_class','sample_type','sample_lithology','lat','long']\n Samps,file_type=pmag.magic_read(samp_file)\n Locs=[]\n OrKeys=['sample_name','site_name','mag_azimuth','field_dip','sample_class','sample_type','sample_lithology','lat','long','stratigraphic_height','method_codes','site_description']\n print(\"file_type\", file_type) # LJ\n if file_type.lower()=='er_samples':\n SampKeys=['er_sample_name','er_site_name','sample_azimuth','sample_dip','sample_class','sample_type','sample_lithology','sample_lat','sample_lon','sample_height','magic_method_codes','er_sample_description']\n elif file_type.lower()=='magic_measurements':\n SampKeys=['er_sample_name','er_site_name']\n else:\n print('wrong file format; must be er_samples or magic_measurements only')\n for samp in Samps:\n if samp['er_location_name'] not in Locs:Locs.append(samp['er_location_name']) # get all the location names\n for location_name in Locs:\n loc_samps=pmag.get_dictitem(Samps,'er_location_name',location_name,'T')\n OrOut=[]\n for samp in loc_samps:\n if samp['er_sample_name'] not in ErSamples:\n ErSamples.append(samp['er_sample_name'])\n OrRec={}\n if 'sample_date' in list(samp.keys()) and samp['sample_date'].strip()!=\"\":\n date=samp['sample_date'].split(':')\n OrRec['date']=date[1]+'/'+date[2]+'/'+date[0][2:4]\n for i in range(len(SampKeys)): \n if SampKeys[i] in list(samp.keys()):OrRec[OrKeys[i]]=samp[SampKeys[i]]\n for key in Required:\n if key not in list(OrRec.keys()):OrRec[key]=\"\" # fill in blank required keys \n OrOut.append(OrRec)\n loc=location_name.replace(\" \",\"_\") \n if default_outfile:\n outfile=orient_file+'_'+loc+'.txt'\n else:\n outfile=orient_file\n pmag.magic_write(outfile,OrOut,location_name)\n print(\"Data saved in: \", outfile)",
"def __init__( self, sequences, spEnergies, bgEnergy, chemicalPotential, numCells, names=[], unboundEnergy=1.59, controlCellRatio=0.1, secondTFspEnergies=[], secondTFchemicalPotential=0, secondTFintEnergies=[], indirectLocations=[], chromAccessibility=[]):\n\n #Total number of cells used in the ChIP sample.\n self.numCells = np.float64( numCells )\n self.chipCells = np.int64( numCells )\n\n #Binding mismatch energies for the target TF\n self.spEnergies = spEnergies \n\n #Total number of binding locations\n self.N = np.int64( len(spEnergies) ) \n\n #Background binding energy for the control sample\n self.bgEnergy = bgEnergy \n\n #Binding energy E_0 corresponding to the unbound state. By default, this\n #is set to a value where the occupancy probability at the highest\n #affinity site is 0.99\n self.unboundEnergy = unboundEnergy\n\n #Chemical potential of the target TF. Default value : 0\n self.chemicalPotential = chemicalPotential \n\n #Fraction of cells that is used for the control experiment. Default : 0.9\n self.controlCells = np.int64( controlCellRatio * self.numCells ) \n\n #Binding energies of the second TF that may be passed in \n #to the simulation.\n self.secondTFspEnergies = secondTFspEnergies\n\n #Chemical potential of the second TF\n self.secondTFchemicalPotential = secondTFchemicalPotential\n\n #Interaction energy of the target TF (A) with the second TF (B)\n self.secondTFintEnergies = secondTFintEnergies\n\n #Indices of locations that are indirectly bound.\n self.indirectLocations = indirectLocations\n\n #Each location is assigned a name, which is just a number between 1 and N,\n #where N is the number of binding locations. This is the column used\n #to join entries with second tables from the fragment extraction, \n #PCR amplification and sequencing processes.\n self.locations = pd.DataFrame( columns=['name'] )\n if len( names ) == 0:\n self.locations.loc[:,'name'] = ['region_' + str(idx) for idx in range( 1, self.N+1 )]\n else:\n self.locations.loc[:,'name'] = names\n\n #Binding energies of the TF A at each location.\n self.locations.loc[:,'energy_A'] = spEnergies\n\n #By default, all bound locations are assumed to be directly bound, and\n #are assigned the label 'direct'.\n self.locations.loc[:,'binding'] = 'direct'\n\n if len(sequences) > 0:\n self.locations.loc[:,'sequence'] = sequences\n else:\n self.locations.loc[:,'sequence'] = [\"\"]*self.N\n\n #Chromatin accessibility of genomic locations. This is set to 1\n #if no value is passed. \n if len(chromAccessibility) == 0:\n self.chromAccessibility = np.ones( self.N )\n else:\n self.chromAccessibility = chromAccessibility\n\n if len(secondTFintEnergies) > 0:\n locRange = np.arange(self.N)\n #Locations where the interaction energy is negative are\n #cooperatively bound by A and B\n coopLocations = locRange[ secondTFintEnergies < 0 ]\n\n #Locations where the interaction energy is positive are\n #competitively bound by A and B.\n compLocations = locRange[ secondTFintEnergies > 0 ]\n\n #Binding energies of the second TF B. \n self.locations.loc[:,'energy_B'] = secondTFspEnergies\n\n #Interaction energies between A and B at each genomic location.\n self.locations.loc[:,'int_energy'] = secondTFintEnergies\n\n #The binding type at each location is set to \"cooperative\" or \"competitive\"\n #at each location based on the interaction energy assigned to it. \n self.locations.loc[coopLocations,'binding'] = 'cooperative'\n self.locations.loc[compLocations,'binding'] = 'competitive'\n\n if len( indirectLocations ) > 0:\n self.locations.loc[indirectLocations,'binding'] = 'indirect'\n self.indirectLocations = indirectLocations\n\n #pTFbound are the probabilities of finding the target TF bound at each\n #genomic location. pBgBound is the probability of each of these\n #locations being bound in the input sample of the ChIP-seq experiment.\n pTFbound, pBgBound = self.computeBindingProbabilities( )\n self.locations.loc[:,'p_occ_bg'] = pBgBound\n\n chipFragments = binom.rvs( self.chipCells, self.locations['p_occ_chip'].values, size=self.N )\n self.locations.loc[:,'chip_fragments'] = chipFragments\n\n controlBound = binom.rvs( self.controlCells, self.locations['p_occ_bg'].values, size=self.N )\n self.locations.loc[:,'control_fragments'] = controlBound",
"def get_alice_tmrna_169():\n seq_ftr = create_1_part_seqfeature(95923, 96358, 1, \"tmRNA\")\n return seq_ftr",
"def thetae(P, T, Eqn):\n #Define the constants \n #Note: We found that Rd was printing out as Rd = 0.287, so we multiplied it by 1000\n Lv, Cp_d, Rd = mpconsts.Lv.m, mpconsts.Cp_d.m, (mpconsts.Rd.m*1000) \n Cw = 4190 #specific heat of water\n Ws = sat_mixing_ratio(P, T, Eqn)\n Cw_d = Cp_d + (Ws * Cw) \n\n #Plug into Eqn 2 from Bolton (1980)\n a = (T * (1000/P)**( Rd/Cw_d ))\n b = np.exp((Lv*Ws) / (Cw_d*T))\n theta_e = a * b\n \n theta_e = a * b\n return theta_e",
"def runCalculation(self): \n \n # Calculate the sequence entropy of each column in a fasta file\n f = open(self.fasta_file,'r')\n self.data = wl.LogoData.from_seqs(wl.read_seq_data(f)) \n f.close()",
"def GET_CYL():\n nmax = 46\n gamval = np.zeros(nmax)\n lamval = np.zeros(nmax)\n bval = np.zeros(nmax)\n\n gamval[0] = 1.00001\n gamval[1] = 1.0001\n gamval[2] = 1.001\n gamval[3] = 1.005\n gamval[4] = 1.01\n gamval[5] = 1.03\n gamval[6] = 1.05\n gamval[7] = 1.07\n gamval[8] = 1.10\n gamval[9] = 1.15\n gamval[10] = 1.2\n gamval[11] = 1.3\n gamval[12] = 1.4\n gamval[13] = 1.5\n gamval[14] = 1.66667\n gamval[15] = 1.7\n gamval[16] = 1.8\n gamval[17] = 1.9\n gamval[18] = 1.92\n gamval[19] = 2.0\n gamval[20] = 2.0863\n gamval[21] = 2.0883\n gamval[22] = 2.125\n gamval[23] = 2.2\n gamval[24] = 2.3676\n gamval[25] = 2.3678\n gamval[26] = 2.4\n gamval[27] = 2.6\n gamval[28] = 2.8\n gamval[29] = 2.83920\n gamval[30] = 2.83929\n gamval[31] = 3.0\n gamval[32] = 3.4\n gamval[33] = 4.0\n gamval[34] = 5.0\n gamval[35] = 6.0\n gamval[36] = 7.0\n gamval[37] = 8.0\n gamval[38] = 10.0\n gamval[39] = 15.0\n gamval[40] = 20.0\n gamval[41] = 30.0\n gamval[42] = 50.0\n gamval[43] = 100.0\n gamval[44] = 1000.0\n gamval[45] = 9999.0\n\n lamval[0] = 1.0022073240\n lamval[1] = 1.0068195769\n lamval[2] = 1.0202846866\n lamval[3] = 1.0414733956\n lamval[4] = 1.0553973808\n lamval[5] = 1.0850737604\n lamval[6] = 1.1023892512\n lamval[7] = 1.1150692073\n lamval[8] = 1.1296268597\n lamval[9] = 1.1475773258\n lamval[10] = 1.1612203175\n lamval[11] = 1.1817213587\n lamval[12] = 1.1971414294\n lamval[13] = 1.2095591324\n lamval[14] = 1.2260537880\n lamval[15] = 1.2288931032\n lamval[16] = 1.2367055181\n lamval[17] = 1.2436278359\n lamval[18] = 1.2449208188\n lamval[19] = 1.2498244759\n lamval[20] = 1.2546830116\n lamval[21] = 1.2547907910\n lamval[22] = 1.2567323668\n lamval[23] = 1.2604989804\n lamval[24] = 1.2680643171\n lamval[25] = 1.2680727188\n lamval[26] = 1.2694076380\n lamval[27] = 1.2769816100\n lamval[28] = 1.2835139723\n lamval[29] = 1.2846912316\n lamval[30] = 1.2846938989\n lamval[31] = 1.2892136582\n lamval[32] = 1.2986950941\n lamval[33] = 1.3095267323\n lamval[34] = 1.3220499813\n lamval[35] = 1.3305627751\n lamval[36] = 1.3367301837\n lamval[37] = 1.3414054776\n lamval[38] = 1.3480251307\n lamval[39] = 1.3569909807\n lamval[40] = 1.3615356210\n lamval[41] = 1.3661223915\n lamval[42] = 1.3698225859\n lamval[43] = 1.3726158889\n lamval[44] = 1.3751432790\n lamval[45] = 1.3753967176\n\n bval[0] = 0.521740\n bval[1] = 0.554609\n bval[2] = 0.625514\n bval[3] = 0.697737\n bval[4] = 0.724429\n bval[5] = 0.731819\n bval[6] = 0.708880\n bval[7] = 0.682234\n bval[8] = 0.644590\n bval[9] = 0.593262\n bval[10] = 0.554542\n bval[11] = 0.502117\n bval[12] = 0.469268\n bval[13] = 0.447230\n bval[14] = 0.423698\n bval[15] = 0.420261\n bval[16] = 0.411663\n bval[17] = 0.405047\n bval[18] = 0.403911\n bval[19] = 0.399877\n bval[20] = 0.396295\n bval[21] = 0.396220\n bval[22] = 0.394904\n bval[23] = 0.392529\n bval[24] = 0.388444\n bval[25] = 0.388440\n bval[26] = 0.387812\n bval[27] = 0.384755\n bval[28] = 0.382794\n bval[29] = 0.382506\n bval[30] = 0.382505\n bval[31] = 0.381580\n bval[32] = 0.380564\n bval[33] = 0.380920\n bval[34] = 0.383355\n bval[35] = 0.386279\n bval[36] = 0.389064\n bval[37] = 0.391561\n bval[38] = 0.395687\n bval[39] = 0.402440\n bval[40] = 0.406405\n bval[41] = 0.410797\n bval[42] = 0.414640\n bval[43] = 0.417726\n bval[44] = 0.420658\n bval[45] = 0.420960\n\n return gamval, lamval, bval",
"def gmx_energy(configuration):\n _check()\n print_gro_file('tmp.gro', configuration,\n structure=GROMACSStructure(f'{mol_name}.gro'))\n\n gmx = Popen(['gmx', 'grompp',\n '-f', 'energy.mdp',\n '-c', 'tmp.gro',\n '-p', f'{mol_name}.top',\n '-o', 'energy.tpr'],\n stdout=DEVNULL, stderr=DEVNULL)\n gmx.wait()\n gmx = Popen(['gmx', 'mdrun', '-nt', '1', '-deffnm', 'energy'],\n stdout=DEVNULL, stderr=DEVNULL)\n gmx.wait()\n\n energy = energy_from_log()\n print(energy)\n return kj_mol_to_ev * energy",
"def sim1D(**kwargs):\n import matplotlib.pyplot as plt\n from matplotlib import rc\n import numpy as np\n import os\n import progressbar as pb\n\n #Settings to make the plots appropriate for inclusion in TeX generated publications\n rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n rc('text',usetex=True)\n FONTSIZE = 10\n FIGSIZE = (3.5,3.5)\n FIGDIM = ([0.15,0.1,0.8,0.85])\n\n#Proportional control coefficient\n if 'Kp' in kwargs:\n Kp = kwargs['Kp']\n else:\n Kp = .04\n\n#number of time samples\n if 'mtas' in kwargs:\n moving_time_average_samples = mtas\n else:\n moving_time_average_samples = 15\n\n#surface flux\n if 'qs_nom' in kwargs:\n qs_nom = kwargs['qs_nom']\n else:\n qs_nom = 600. #500. #585. #W\n\n#material properties\n if 'k_type' in kwargs:\n m=nylon12(kwargs['k_type']) #instantiate m - material\n if 'const' in kwargs['k_type']:\n if 'k' in kwargs:\n m.k_const = kwargs['k']\n print 'k found\\n'\n else:\n m = nylon12('linear')\n print 'using default linear thermal conductivity.\\n'\n \n#specific heat\n if 'c_type' in kwargs:\n m.c_type = kwargs['c_type']\n if 'const' in kwargs['c_type']:\n if 'c' in kwargs:\n m.c_const = kwargs['c']\n print 'constant c found'\n else:\n print 'using default linear specific heat'\n \n#density\n if 'rho' in kwargs:\n m.rho = kwargs['rho']\n\n#spatial domain\n if 'xmax' in kwargs:\n xmax = kwargs['xmax']\n else:\n xmax = 0.02 #[m] depth of powder to consider\n if 'dx' in kwargs:\n dx = kwargs['dx']\n else:\n dx = 1.016e-4\n if 'x' in kwargs:\n x = np.asarray(kwargs['x'])\n else:\n x = np.arange(0,xmax,dx)\n\n#Temperatures\n if 'T_initial' in kwargs:\n T_initial = kwargs['T_initial']\n else:\n T_initial = 300\n \n if 'T_offset' in kwargs:\n T_offset = kwargs['T_offset']\n else:\n T_offset = 3\n \n if 'T_set' in kwargs:\n T_set = kwargs['T_set']\n else:\n T_set = 470\n\n#time domain\n if 'time' in kwargs: #set up time variable\n time = kwargs['time']\n dt = time[1] - time[0]\n if 'data' in kwargs:\n data = kwargs['data']\n Compare = True\n else:\n Compare = False\n else: #use default\n dt = dx**2/(5*m.alpha(T_set)) #stability criterion Fo<=1/2\n if 'tmax' in kwargs:\n tmax = float(kwargs['tmax'])\n else:\n tmax = 100.\n time = np.arange(0.,tmax+dt,dt)\n Compare = False\n tmax = max(time)\n num_time_steps = len(time)\n\n#initialize the working variables\n T = np.ones((num_time_steps,len(x)))*T_initial\n qs = np.zeros(num_time_steps)\n err = np.zeros(num_time_steps)\n u = np.zeros(num_time_steps)\n\n#loop through the time and space domains\n inf = len(x)-1\n print \"Solving ...\\n\"\n pbar=pb.ProgressBar().start()\n for i in range(1,num_time_steps): #time step\n dt = time[i] - time[i-1]\n #constant flux boundary condition\n err[i] = T_set + T_offset - np.mean(T[range(max(0,i-moving_time_average_samples),i),0])\n u[i] = err[i] * Kp\n qs[i] = max(min(1.,u[i]) * qs_nom,-10)\n T[i,0] = 2*Fo_T(m.alpha(T[i-1,0]),dt,dx)*(T[i-1,1] + qs[i]*dx/m.k(T[i-1,1])) + (1 - 2*Fo_T(m.alpha(T[i-1,0]),dt,dx)) * T[i-1,0]\n\n #adiabatic far wall boundary condition\n T[i,inf] = 2*Fo_T(m.alpha(T[i-1,inf-1]),dt,dx) * T[i-1,inf-1] + (1 - 2*Fo_T(m.alpha(T[i-1,inf]),dt,dx)) * T[i-1,inf]\n\n #internal nodes heat equation\n for j in range(1,len(x)-1):\n T[i,j] = Fo_T(m.alpha(T[i-1,j]),dt,dx) * (T[i-1,j-1] + T[i-1,j+1]) + (1 - 2*Fo_T(m.alpha(T[i-1,j]),dt,dx)) * T[i-1,j]\n pbar.update(100.*float(i)/float(num_time_steps))\n pbar.finish()\n\n#plot the results\n print \"Plotting ...\\n\"\n fig = plt.figure(1,figsize=FIGSIZE)\n ax = fig.add_axes(FIGDIM)\n plotlabel = 'dx=%1.2e, Fo=%1.2e' %(dx,Fo_T(m.alpha(T_set),dt,dx))\n line = ax.plot(time,T[:,0],label=plotlabel)\n if(Compare):\n line2 = ax.plot(time,data,label='Reference')\n xtext = ax.set_xlabel('Time (s)',fontsize=FONTSIZE,family='sans-serif')\n ytext = ax.set_ylabel('Surface Temperature (K)',fontsize=FONTSIZE,family='sans-serif')\n for label in ax.get_xticklabels():\n label.set_family('sans-serif')\n\n if 'filename' in kwargs:\n filename = kwargs['filename']\n else:\n filename = 'last_sim'\n\n np.savez(filename,T=T,time=time,qs=qs)\n\n figfilename = filename+'.pdf'\n plt.savefig(figfilename,format='pdf')\n\n comment_info = \"qs_nom = %.0f\\nT_set = %1.1f\\nKp = %1.3f\\nT_initial = %1.3f\\nT_set = %1.1f\\nT_offset = %1.1f\\ndx = %1.3e\\ndt=%1.3e\" % (qs_nom,\n T_set,\n Kp,\n T_initial,\n T_set,\n T_offset,\n dx,\n dt)\n \n os.system(gen_add_comment_script(figfilename,comment_info))\n try:\n rmse = np.sqrt( np.mean( (T[:,0]-data)**2 ) )\n return rmse\n except:\n return -1."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
identifies SEED tile strings according to the seed ID and writes them into the stringIDs dictionary
|
def getIDs(self):
self.stringIDs[12]="tw-ICEN-12"
self.stringIDs[20]="tw-ICEN-20"
a=[10,14,15,16,17,18]
for i in a:
self.stringIDs[i]="tw-%s-%02d"%(self.seedID,i)
a=[11,13]
for i in a:
self.stringIDs[i]="tw-%s%d-%02d"%(2*self.seedID[0],tvsn, i)
if self.seedID=="A1":
self.stringIDs[10]="tw-ICEN-10"
self.stringIDs[11]="tw-ICN%d-11"%tvsn
self.stringIDs[13]="tw-ICN%d-13"%tvsn
self.stringIDs[17]="tw-ICEN-17"
if self.seedID=="B7":
self.stringIDs[16]="tw-ICEN-16"
if typ=='xj':
for i in [10,11,12,13,14,17,20]:self.stringIDs[i]=self.stringIDs[i]+'xj'
#print self.stringIDs
|
[
"def entity_name_id_map_from_dump():\n doc_cnt = 0\n my_ent_name_id_map = dict()\n my_ent_id_name_map = dict()\n duplicate_names = 0 # different articles with identical title\n duplicate_ids = 0 # with the same id\n with open(config.base_folder+\"data/basic_data/tokenizedWiki.txt\") as fin:\n for line in fin:\n line = line[:-1] # omit the '\\n' character\n if line.startswith('<doc\\xa0id=\"'):\n docid = line[9:line.find('\"', 9)]\n doctitle = line[line.rfind('=\"') + 2:-2].replace('\\xa0', ' ')\n\n if doctitle in my_ent_name_id_map:\n duplicate_names += 1\n if docid in my_ent_id_name_map:\n duplicate_ids += 1\n\n my_ent_name_id_map[doctitle] = docid # i can convert it to int as well\n my_ent_id_name_map[docid] = doctitle\n #print(\"docid: \", docid, \"\\t doctitle: \", doctitle)\n doc_cnt += 1\n if doc_cnt % 10000 == 0:\n print(\"doc_cnt = \", doc_cnt)\n print(\"len(ent_name_id_map) = \", len(my_ent_name_id_map))\n print(\"duplicate names: \", duplicate_names)\n print(\"duplicate ids: \", duplicate_ids)\n with open(config.base_folder+\"data/basic_data/my_wiki_name_id_fromdump.txt\", 'w') as fout:\n for doc_title, doc_id in my_ent_name_id_map.items():\n fout.write(doc_title + \"\\t\" + doc_id + \"\\n\")",
"def string_to_ind(setseed = 'blinded', Nsets = 10):\n hashstr = hashlib.md5(setseed).hexdigest()\n ind = int(int(hashstr,16)%Nsets)\n return ind",
"def _get_source_strings(self, ids):\r\n strings = super(_MarkSourceMixin, self)._get_source_strings(ids)\r\n res = []\r\n for s in strings:\r\n res.append(list(s))\r\n res[-1][1] = s[1] + '_txss'\r\n return res",
"def gen_twid_list(dst, srcs):\r\n fdst = open(dst, 'w+')\r\n\r\n cnt = 0\r\n for line in fileinput.input(srcs, openhook = fileinput.hook_compressed):\r\n try:\r\n status = json.loads(line)\r\n if status.has_key('place') and status['place'] != None:\r\n if status['place']['type'] != 'poi':\r\n continue\r\n print >> fdst, status['id']\r\n cnt += 1\r\n except ValueError:\r\n print 'ValueError'\r\n\r\n fdst.flush()\r\n fdst.close()\r\n logging.info('Generate tweet_id ::{0} tweet IDs are generated.'.format(cnt))\r\n logging.info('------------------------------------------')",
"def _build_ID_sets(self):\n # Search the train folder for the samples, create string IDs for them\n self._IDs = []\n for video in os.listdir(self._trn_dir): # video: 'alley_1'\n frames = sorted(os.listdir(self._trn_dir + '/' + video))\n for idx in range(len(frames) - 1):\n frame1_ID = f'{video}/{frames[idx]}'\n frame2_ID = f'{video}/{frames[idx+1]}'\n flow_ID = f'{video}/{frames[idx].replace(\".png\", \".flo\")}'\n self._IDs.append((frame1_ID, frame2_ID, flow_ID))\n\n # Build the train/val datasets\n if self.opts['val_split'] > 0.:\n self._trn_IDs, self._val_IDs = train_test_split(self._IDs, test_size=self.opts['val_split'],\n random_state=self.opts['random_seed'])\n else:\n self._trn_IDs, self._val_IDs = self._IDs, None\n\n # Build the test dataset\n self._tst_IDs = []\n for video in os.listdir(self._tst_dir): # video: 'ambush_1'\n frames = sorted(os.listdir(self._tst_dir + '/' + video))\n for idx in range(len(frames) - 1):\n frame1_ID = f'{video}/{frames[idx]}'\n frame2_ID = f'{video}/{frames[idx+1]}'\n flow_ID = f'{video}/{frames[idx].replace(\".png\", \".flo\")}'\n self._tst_IDs.append((frame1_ID, frame2_ID, flow_ID))\n\n # Build a list of simplified IDs for Tensorboard logging\n self._trn_IDs_simpl = self.simplify_IDs(self._trn_IDs)\n self._val_IDs_simpl = self.simplify_IDs(self._val_IDs)\n self._tst_IDs_simpl = self.simplify_IDs(self._tst_IDs)",
"def _get_id(self, s):\n ### Begin your code\n ans = self.str_to_id.get(s)\n #print(ans)\n if ans == None:\n p = self.__len__()\n self.id_to_str.append(s)\n self.str_to_id.setdefault(s, p)\n return p\n else:\n return ans\n ### End your code",
"def interpret_scells(sky_cells):\n scell_files = {}\n for scell in sky_cells.values():\n for member in scell.members:\n if member not in scell_files:\n scell_files[member] = {}\n scell_files[member][scell.sky_cell_id] = scell\n\n # convert each entry into a ';'-delimited string instead of a list of IDs\n for member in scell_files:\n scell_files[member]['id'] = ';'.join([id for id in scell_files[member]])\n\n return scell_files",
"def get_random_tileid():\r\n return randrange(0, len(ALL_TILES))",
"def get_byid_name_map():\n byid_name_map = {}\n out, err, rc = run_command([LS, '-l', '/dev/disk/by-id'],\n throw=True)\n if rc == 0:\n for each_line in out:\n # Split the line by spaces and '/' chars\n line_fields = each_line.replace('/', ' ').split()\n # Grab every sda type name from the last field in the line and add\n # it as a dictionary key with it's value as the by-id type name so\n # we can index by sda type name and retrieve the by-id. As there\n # are often multiple by-id type names for a given sda type name we\n # gain consistency in mapped by-id value by always preferring the\n # longest by-id for a given sda type name key.\n if len(line_fields) >= 5:\n # Ensure we have at least 5 elements to avoid index out of\n # range and to skip lines such as \"total 0\"\n if line_fields[-1] not in byid_name_map.keys():\n # We don't yet have a record of this device so take one.\n byid_name_map[line_fields[-1]] = line_fields[-5]\n # ie {'sda': 'ata-QEMU_HARDDISK_QM00005'}\n else:\n # We already have a record of this device so check if the\n # current line's by-id name is longer.\n if len(line_fields[-5]) > len(\n byid_name_map[line_fields[-1]]):\n # The current line's by-id name is longer so use it.\n byid_name_map[line_fields[-1]] = line_fields[-5]\n return byid_name_map",
"def set_id(self, inc_string):\n hasher = hashlib.sha1()\n # Remove non-ascii chars for hash\n hasher.update((''.join(i for i in inc_string if ord(i) < 128)).encode('utf-8'))\n self.id = hasher.hexdigest()",
"def create_seed_mature_name_map_file(seed_list, seed_length, pre_mir_name_to_mature_5p_or_3p_map, pre_mir_name_to_seeds_map):\n seed_to_mature_map = {}\n\n if seed_length == 6:\n table_data = table_data_6\n organisms = organisms_6\n else:\n table_data = table_data_7\n organisms = organisms_7\n\n for seed in seed_list:\n seed_dict = map_seed_to_organisms_extended(\n table_data,\n seed,\n organisms,\n pre_mir_name_to_seeds_map,\n pre_mir_name_to_mature_5p_or_3p_map)\n\n # mature_name_appearances_map = defaultdict(int)\n mature_names_list = []\n for organism in seed_dict[seed]:\n for pre_mir_name in seed_dict[seed][organism]:\n mature_name = seed_dict[seed][organism][pre_mir_name]['mature name']\n # reconstruct mature name: remove prefix, remove letters from mid name, etc.\n mature_name_reconstructed = reconstruct_mature_name(mature_name)\n if mature_name_reconstructed is not None and len(mature_name_reconstructed) != 0:\n # collect all reconstructed names to later chose one family name representative from\n mature_names_list.append(mature_name_reconstructed)\n\n # decide on the chosen family name using majority vote selection\n common_prefix = find_common_prefix(mature_names_list)\n if common_prefix is not None and len(str(common_prefix)) != 0:\n seed_to_mature_map[seed] = common_prefix\n print(\"done with seed \" + str(seed) + \" and mapped to \" + str(common_prefix))\n\n # access to database and save file\n with open('static/Model/maps/seed_to_mature_map_' + str(seed_length) + '.txt', \"w\") as f:\n json.dump(seed_to_mature_map, f, indent=4)",
"def test_snippets_to_ids():\n\tsnippets = [['sentence', 'one'], ['sentence'], ['two']]\n\tresult = (([12205, 68, 0], [12205, 0, 0]), (2, 1))\n\tassert lstm.snippets_to_ids(snippets, 3, 2) == result\n\n\tsnippets = [['sentence', 'three']]\n\tresult = (([12205, 98, 0], [0, 0, 0]), (2, 0))\n\tassert lstm.snippets_to_ids(snippets, 3, 2) == result",
"def get_map_seed(instructions):\n match = re.search(rb'\\x00.*? (\\-?[0-9]+)\\x00.*?\\.rms', instructions)\n seed = None\n if match:\n seed = int(match.group(1))\n return seed",
"def generate_hash_map(self):\n\n # clear the hash map\n self._hash_map.clear()\n\n for line in self._document_content:\n\n line = line.encode('utf-8')\n\n line = str(line).translate(PUNCTUATION_TRANS)\n words = line.split()\n\n for word in words:\n\n word = word.decode('utf-8-sig')\n word = PorterStemmer().stem(word)\n word = word.lower()\n\n if word.isalpha():\n if not self._is_stop_word(word):\n\n # if the word is not in hash\n if word not in self._hash_map:\n self._hash_map[word] = 1\n else:\n self._hash_map[word] += 1",
"def map_to_decs(entity_text, name_to_id,synonym_to_id):\n \n global decs_cache\n\n entity_text_spaces = entity_text.replace(\"_\",\" \")\n\n if entity_text_spaces.replace(',','') in map(str.lower,name_to_id): #There is an exact match for this entity in name_to_id\n codes = process.extract(entity_text_spaces.replace(\" \",\"_\"), name_to_id.keys(), limit=4000, scorer=fuzz.token_sort_ratio)\n\n for d in codes:\n term_name = d[0]\n score = d[1]\n if entity_text_spaces.replace(',','').lower() == term_name.lower():\n codes = [(term_name,score)]\n\n decs_cache[entity_text] = codes\n\n elif entity_text_spaces.replace(',','') in map(str.lower,synonym_to_id): #There is an exact match for this entity in synonym_to_id\n codes = process.extract(entity_text_spaces.replace(\" \",\"_\"), synonym_to_id.keys(), limit = 4000, scorer=fuzz.token_sort_ratio)\n for d in codes: \n term_name = d[0]\n score = d[1]\n term_id = synonym_to_id[term_name]\n if entity_text_spaces.replace(',','').lower() == term_name.lower():\n codes = [(term_name,score)]\n decs_cache[entity_text] = codes\n \n \n elif entity_text.endswith(\"s\") and entity_text[:-1] in decs_cache: # Removal of suffix -s \n codes = decs_cache[entity_text[:-1]]\n\n \n elif entity_text in decs_cache: # There is already a candidate list stored in cache file\n codes = decs_cache[entity_text]\n\n\n else:\n # Get first ten candidates according to lexical similarity with entity_text\n \n codes = process.extract(entity_text, name_to_id.keys(), scorer=fuzz.token_sort_ratio, limit=10)\n if codes == []:\n pass\n \n elif codes[0][1] == 100: # There is an exact match for this entity\n codes = [codes[0]]\n \n elif codes[0][1] < 100: # Check for synonyms of this entity\n drug_syns = process.extract(entity_text, synonym_to_id.keys(), limit=10, scorer=fuzz.token_sort_ratio)\n\n for synonym in drug_syns:\n\n if synonym[1] == 100:\n codes = [synonym]\n \n else:\n if synonym[1] > codes[0][1]:\n codes.append(synonym)\n \n decs_cache[entity_text] = codes\n \n # Build the candidates list with each match id, name and matching score with entity_text\n\n matches = []\n for d in codes:\n term_name = d[0]\n score = d[1]\n\n if term_name in name_to_id.keys():\n ls2 = []\n for i in name_to_id.keys():\n if i.lower() == term_name.lower(): \n ls2.append(name_to_id[i])\n term_id = ls2[0] #If there is 2 or more term_ids for the same entity the first id will be linked to the entity\n\n elif term_name in synonym_to_id.keys():\n ls2=[]\n for i in synonym_to_id.keys():\n if i.lower() == term_name.lower(): \n ls2.append(synonym_to_id[i])\n term_id = ls2[0] #If there is 2 or more term_ids for the same entity the first id will be linked to the entity\n\n \n else:\n term_id = \"NIL\"\n\n match = {\"ontology_id\": term_id,\n \"name\": term_name,\n \"match_score\": d[1]/100}\n\n \n \n matches.append(match)\n #print(matches)\n\n return matches",
"def create_state_id(self):\n for key, value in config.fips_dict.iteritems():\n if key == self.state.lower():\n state_num = value\n if state_num <=9:\n state_num = '0' + str(state_num)\n else:\n state_num = str(state_num)\n\n return 'st' + state_num",
"def update_ids(self):\n paths=self.get_surf_paths()\n for path in paths:\n S=self.get_surface(path)\n S.id=path",
"def transSeqNotoGeneID(inputDir,translationTable):\n IDDic = {}\n fileTemp=basic.myFile(inputDir,translationTable)\n lines = fileTemp.readFile()\n for line in lines:\n IDList = line.split(\"\\t\")\n IDDic[IDList[1][:-1]] = IDList[0]\n return IDDic",
"def write_ids_and_times(full_id_strings, storm_times_unix_sec,\n pickle_file_name):\n\n error_checking.assert_is_string_list(full_id_strings)\n error_checking.assert_is_numpy_array(\n numpy.array(full_id_strings), num_dimensions=1)\n num_storm_objects = len(full_id_strings)\n\n error_checking.assert_is_integer_numpy_array(storm_times_unix_sec)\n error_checking.assert_is_numpy_array(\n storm_times_unix_sec,\n exact_dimensions=numpy.array([num_storm_objects], dtype=int)\n )\n\n metadata_dict = {\n FULL_IDS_KEY: full_id_strings,\n STORM_TIMES_KEY: storm_times_unix_sec,\n }\n\n file_system_utils.mkdir_recursive_if_necessary(file_name=pickle_file_name)\n\n pickle_file_handle = open(pickle_file_name, 'wb')\n pickle.dump(metadata_dict, pickle_file_handle)\n pickle_file_handle.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
splits all sequences into possible subsequences of length num (for num=5, as in 12345, 23456, 34567 ...). Identifiers AABBB, with AA sequence number, BBB position of first base in string. Negative identifiers denote reversed sequences. If comp is set, the function returns complementary subsequences instead.
|
def getSubseq(self,num, comp=False):
##add reverses!!!
self.getStrands()
for key in self.stringSequences.keys():
string=self.stringSequences[key].replace("5'-","")
string=string.replace("-3'","")
string=string.replace(" ","")
for i in range(len(string)+1-num):
if comp:
self.subsequences[get_complement(string[i:i+num])]=int(key[-2:])*1000+i
self.subsequences[get_complement(string[i:i+num][::-1])]=-int(key[-2:])*1000+i
else:
self.subsequences[string[i:i+num]]=int(key[-2:])*1000+i
self.subsequences[string[i:i+num][::-1]]=-int(key[-2:])*1000+i
|
[
"def split_subsequences(iterable, length=2, overlap=0, \r\n join_substr=True):\r\n isstring = isinstance(iterable, str) and join_substr\r\n it = iter(iterable)\r\n results = list(itertools.islice(it, length))\r\n while len(results) == length:\r\n yield ''.join(results) if isstring else results\r\n results = results[length - overlap:]\r\n results.extend(itertools.islice(it, length - overlap))\r\n if results:\r\n yield ''.join(results) if isstring else results",
"def _all_splits(seq):\n for index in range(1, len(seq)):\n yield (seq[0:index], seq[index:])",
"def subseqs(s):\n if len(s)==0:\n return [[]]\n else:\n sub=subseqs(s[1:])\n return insert_into_all(s[0],sub)+sub",
"def split_into_fib_seq_842(num:str) -> List[int]:\n\n #\n # \"1101111\"\n # ^ <- pointer to left-most digit (i)\n # ^ <- pointer to current right-most digit (j)\n # ^ <- pointer to next digit (k)\n #\n # \"1101111\"\n # ^ <- pointer to left-most digit (i)\n # ^ <- pointer to current right-most digit (j)\n # ^ <- pointer to next digit (k)\n #\n #\n # \"1101111\"\n # ^ <- pointer to left-most digit\n # ^ <- pointer to current right-most digit\n # ^ <- pointer to next digit.\n\n # 1, 1, 0 <- fails\n #",
"def letter_combos(string, num_pad, idx=0, result=\"\"):\n\n if idx >= len(string) and len(result) > 0:\n print result\n return\n\n lst = num_pad.get(string[idx])\n\n for l in lst:\n letter_combos(string, num_pad, idx + 1, result + l)",
"def expand_serial_no(serial_strings):\n case_ids = []\n for serial in serial_strings:\n result = re.match('(\\d+)[.]{2}(\\d+)', serial)\n if result:\n start, end = map(int, result.groups())\n case_ids.extend(range(start, end + 1))\n else:\n case_ids.append(int(serial))\n\n return case_ids",
"def get_component_numbers(num, n_splits):\n splits = None\n for n in range(n_splits, 1, -1):\n splits = split_into_n_parts(num - 1, n)\n if splits is not None:\n break\n if splits is None:\n return np.arange(num)[1:]\n component_nums = []\n prev = 0\n for num in splits:\n component_nums += [prev + num]\n prev += num\n return component_nums",
"def intersperse(main, *extras) :\n def isbase(char) :\n return get_ucd(char, 'gc').startswith(\"L\")\n\n res = []\n extras = list(extras)\n #extras.sort(cmp=lambda a,b : cmp(a[1], b[1]))\n for m in main :\n groups = []\n base = \"\"\n for v in groupby(m, lambda x:get_ucd(x, 'gc')[0]) :\n k = v[0]\n d = \"\".join(v[1])\n if k == \"L\" :\n if base : groups.extend((base, \"\"))\n for c in d[:-1] :\n groups.extend((c, \"\"))\n base = d[-1]\n elif k == \"M\" :\n base = base + d\n else :\n groups.extend((base, d))\n base = \"\"\n if base : groups.extend((base, \"\"))\n # groups is now 2n list where list[n] is base+dias, list[n+1] is punc separators\n for i in range(0, len(groups), 2) :\n dias = list(groups[i][1:])\n orders = [get_ucd(c, 'ccc') for c in dias]\n bases = list(zip(dias, orders))\n new = sorted(bases + extras, cmp=lambda a,b: cmp(a[1], b[1]))\n results = list(zip(*new))\n groups[i] = \"\".join([groups[i][0]] + list(results[0]))\n res.append(\"\".join(groups))\n return res",
"def subs(s, count):\r\n return [s[i:(i + count)] for i in range(len(s) - count + 1)]",
"def LCStr(a,b):\n L = np.zeros((len(a), len(b)))\n z = 0 # Use to denote the max element in L\n ret = [] # All the common sub-string with longest length will store in ret\n for i in xrange(0,len(a)):\n for j in xrange(0,len(b)):\n if a[i] == b[j]:\n if i==0 or j==0:\n L[i][j] = 1\n else:\n L[i][j] = L[i-1][j-1] + 1\n if L[i][j] > z:\n z = L[i][j]\n ret = []\n if L[i][j] == z:\n ret.append(a[int(i-z+1):i+1])\n else:\n L[i][j] = 0\n return ret",
"def make_addsub_combination_array(\n composed_numbers: List[Union[int, float]], passage_numbers: List[Union[int, float]],\n compnumber2numcombinations: Dict[Union[int, float], List[Tuple]]\n ):\n max_num_combinations = max(len(combinations) for (_, combinations) in compnumber2numcombinations.items())\n number_combinations_indices = -1 * np.ones(shape=(len(composed_numbers), max_num_combinations, 2),\n dtype=np.int32)\n\n for composed_num, combinations in compnumber2numcombinations.items():\n compnumber_idx = composed_numbers.index(composed_num)\n for combination_num, (num1, num2) in enumerate(combinations):\n (passagenum1idx, passagenum2idx) = (passage_numbers.index(num1), passage_numbers.index(num2))\n number_combinations_indices[compnumber_idx, combination_num, :] = [passagenum1idx, passagenum2idx]\n return number_combinations_indices, max_num_combinations",
"def split_every(seq, n):\n return [seq[i:i+n] for i in range(0, len(seq), n)]",
"def subsets(s):\n # YOUR CODE HERE\n # Got the idea from https://coderbyte.com/algorithm/print-all-subsets-given-set\n # total num of sets\n set_num = int(pow(2, len(s)))\n # convert set to list for easy indexing\n m = list(s)\n\n for i in range(0, set_num):\n # new list to yield\n k = []\n\n # convert to binary so that a 1=add, 0=ignore\n t = \"{0:b}\".format(i)\n\n # pad it according to length of set\n while len(t) < len(s):\n t = '0' + t\n\n # iterate over binary to match 1's\n for j in range(0, len(t)):\n if t[j] == '1':\n k.append(m[j])\n yield k",
"def get_substrings(text):\n return [text[x:y] for x, y in combinations(range(len(text) + 1), r=2)]",
"def component_id_creation(self, comp_no):\n comp_list = []\n for i in range(1, comp_no + 1):\n comp_list.append(component_prefix + `i`)\n return comp_list",
"def substrings(a, b, n):\n\n # TODO\n la = len(a)\n lb = len(b)\n sub_a = []\n sub_b = []\n sub = []\n\n for i in range(la-n+1):\n sub_a.append(a[i:i+n])\n\n for j in range(lb-n+1):\n sub_b.append(b[j:j+n])\n\n for k in sub_a:\n if k in sub_b:\n sub.append(k)\n\n sub = set(sub)\n\n return sub",
"def subsets(arr):\n return itertools.chain(*[itertools.combinations(arr, i + 1) for i, a in enumerate(arr)])",
"def patSeqGen(seqpat):\n N_parts = re.findall(\"(N+)\", seqpat)\n N_gens = [genparts(len(stretch)) for stretch in N_parts]\n # Make as tuple so it can be treated same as an N_generator:\n const_tups = [(part,) for part in seqpat.split('N') if part]\n # If seqpat starts with one or more N, insert empty element in front of const_tups\n # to make zipping consistent. (Alternatively, reverse order of izip...)\n if seqpat[0] == 'N':\n const_tups.insert(0, None)\n #pairs = izip_longest(N_gens, const_tups)\n #else:\n # pairs = izip_longest(const_tups, N_gens)\n gens = (gen for gen in chain(*zip_longest(const_tups, N_gens)) if gen)\n return (\"\".join(comb) for comb in product(*gens))",
"def icombinationSeqs(combinationLists):\n for seqCombs in icombinations(combinationLists):\n result = []\n for seq in seqCombs:\n result.extend(seq)\n yield tuple(result)\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load fstype information from the parser instance.
|
def _get_fstype_from_parser(self, fstype=None):
if not fstype:
if self.index in self.disk.parser.fstypes:
fstype = self.disk.parser.fstypes[self.index]
elif '*' in self.disk.parser.fstypes:
fstype = self.disk.parser.fstypes['*']
elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None:
fstype = "?" + self.disk.parser.fstypes['?']
else:
fstype = ""
if not fstype:
self.filesystem = None
elif isinstance(fstype, FileSystem):
fstype.volume = self
self.filesystem = fstype
elif fstype in VOLUME_SYSTEM_TYPES:
self.volumes.vstype = fstype
self.filesystem = FILE_SYSTEM_TYPES["volumesystem"](self)
elif fstype.startswith("?"):
fallback = FILE_SYSTEM_TYPES[fstype[1:]](self)
self.filesystem = filesystems.FallbackFileSystem(self, fallback)
else:
self.filesystem = FILE_SYSTEM_TYPES[fstype](self)
|
[
"def family_symbol_and_types_from_family(self, familyLoaded): # this is only used by check_family_dict()\n doc = __revit__.ActiveUIDocument.Document\n try:\n symbolIds = list(familyLoaded.GetFamilySymbolIds())\n familySymbol = doc.GetElement(symbolIds[0])\n familyName = familySymbol.Family.Name\n if not familySymbol.IsActive:\n familySymbol.Activate()\n \n # template dict\n returnDict = {familyName: {\"Symbol\": None, \"Types\": {}}}\n \n # set family symbol\n returnDict[familyName][\"Symbol\"] = familySymbol.Id.IntegerValue\n # load types if they exist into the dict\n if len(symbolIds)>1:\n for symbolId in symbolIds:\n symbol = doc.GetElement(symbolId)\n symbolName = symbol.LookupParameter(\"Type Name\").AsString()\n returnDict[familyName][\"Types\"].update({symbolName: symbolId.IntegerValue})\n else:\n returnDict[familyName][\"Types\"].update({familyName: symbolIds[0].IntegerValue})\n return returnDict\n except Exception as inst:\n OutputException(inst)",
"def loader_for_type(self, ctype):\n for loadee, mimes in Mimer.TYPES.iteritems():\n for mime in mimes:\n if ctype.startswith(mime):\n return loadee",
"def getTypeInfo():",
"def __determine_data_type(self, smrf_config):\n loader_args = dict(start_date=self.start_date, end_date=self.end_date)\n\n if InputCSV.DATA_TYPE in smrf_config:\n self.data_type = InputCSV.DATA_TYPE\n self.load_class = InputCSV(\n **loader_args,\n stations=smrf_config[InputCSV.DATA_TYPE]['stations'],\n config=smrf_config[InputCSV.DATA_TYPE],\n )\n elif GriddedInput.TYPE in smrf_config:\n self.data_type = smrf_config[GriddedInput.TYPE]['data_type']\n data_inputs = dict(\n bbox=self.bbox,\n config=smrf_config,\n topo=self.topo,\n )\n if self.data_type == InputGribHRRR.DATA_TYPE:\n self.load_class = InputGribHRRR(**loader_args, **data_inputs)\n elif self.data_type == InputNetcdf.DATA_TYPE:\n self.load_class = InputNetcdf(**loader_args, **data_inputs)\n elif self.data_type == InputWRF.DATA_TYPE:\n self.load_class = InputWRF(**loader_args, **data_inputs)\n else:\n raise AttributeError(\n 'Unknown gridded data input type in ini-file'\n )\n else:\n raise AttributeError(\n 'Missing required data type attribute in ini-file'\n )",
"def load_type(self, type_path):\n # Open the the file\n with open(type_path, 'r') as type_file:\n # read the file content\n type_content = type_file.read()\n # add the type in database\n type_object = create_type(type_content, 'type_name', type_path)\n return type_object",
"def sc_read_10x_h5_ft_type(fname: str, ft_type: str) -> AnnData:\n assert fname.endswith(\".h5\")\n parsed = sc.read_10x_h5(fname, gex_only=False)\n parsed.var_names_make_unique()\n assert ft_type in set(\n parsed.var[\"feature_types\"]\n ), f\"Given feature type {ft_type} not in included types: {set(parsed.var['feature_types'])}\"\n\n retval = parsed[\n :,\n [n for n in parsed.var_names if parsed.var.loc[n, \"feature_types\"] == ft_type],\n ]\n return retval",
"def get_parser_type(headers):\n\n server = headers[\"Server\"]\n\n if server is not None:\n if server == \"nginx\":\n return NginxParser",
"def inferType(fname):\n with h5py.File(fname, 'r') as f:\n\n ftype = f.attrs.get('Type')\n\n if ftype not in ('linear', 'nonlinear'):\n raise X5Error('Unknown type: {}'.format(ftype))\n\n return ftype",
"def _find_loader(data_type):\n\n data_type = data_type.lower()\n\n if 'bcsd' in data_type:\n loader = load_bcsd\n elif 'gmfd' in data_type:\n loader = load_gmfd\n elif 'best' in data_type:\n loader = load_best\n elif 'era' in data_type:\n loader = load_era5\n else:\n raise TypeError(\"'\" + data_type + \"' not supported. Supported data \"\n \"types are: NASA BCSD, GMFD, BEST, ERA5.\")\n return loader",
"def init(cls):\n mimetypes.init()",
"def parse_type(type_name):\n for name, type_object in _type_definitions:\n if type_name == name:\n return type_object\n raise Exception(\"unknown type '%s'\" % type_name)",
"def _parse_header(self):\n\n if self.ei_magic != '\\x7fELF':\n return\n\n self.seek(16,0)\n reading = {'h': self.le_half, 'w': self.le_word,'a': self.le_addr,\n 'o': self.le_offset, 'x': self.le_xword}\n labels = ('type', 'machine', 'version', 'entry', 'phoff', \\\n 'shoff', 'flags', 'ehsize', 'phentsize', 'phnum',\\\n 'shentsize','shnum','shstrndx')\n htypes = ('h','h','w','a','o','o','w','h','h','h','h','h','h')\n\n # Retrieve ELF header\n self.elfhead = dict(zip(labels,[reading[t]() for t in htypes]))\n\n # Retrieve section header string table.\n # sh: name, type, flags, addr, offset, size, link, info, addralign, entsize\n self.seek((self.elfhead['shentsize'] * self.elfhead['shstrndx'])\\\n + self.elfhead['shoff'], 0)\n\n labels = ('name', 'type', 'flags', 'addr', 'offset', \\\n 'size', 'link', 'info', 'addralign', 'entsize')\n\n shtypes = ('w','w','x','a','o','x','w','w','x','x')\n\n sh_strtableh = dict(zip(labels,[reading[t]() for t in shtypes]))\n self.seek(sh_strtableh['offset'],0)\n self.sh_strtableh = sh_strtableh\n\n # Now the section header is known, can retrieve dynamic string table\n self.dynstrh = self._find_section('.dynstr')",
"def load_user_type():\n\twith open('./seed_data/user_type.tsv', 'r+') as data:\n\t\tfor row in data:\n\t\t\trow = row.rstrip()\n\t\t\tuser_type = row.split(\"\\t\")\n\n\t\t\tuser_type = UserType(user_type=user_type)\n\n\t\t\tdb.session.add(user_type)\n\t\t\tdb.session.commit()",
"def from_yaml_tag(self, ctx, tag, _serialization_context=None):\n tag = self.fix_yaml_tag(ctx, tag)\n asdftype = self._type_by_tag.get(tag)\n if asdftype is not None and _serialization_context is not None:\n _serialization_context._mark_extension_used(self._extension_by_type[asdftype])\n return asdftype",
"def __init__(self, typestr):\n from collections import defaultdict\n self.typestr = typestr\n self.ndims = 0\n self.funcs = []\n self.types = self.__class__.expandFTypeStr(typestr)\n #log(\"For typestr %s, got types of %s\" % (typestr, self.types))\n self.funcs = [self.parse(s) for s in self.types]\n self.times = defaultdict(float)\n self.times.update(typestr=typestr, types=self.types)",
"def read(cls, reader: Reader) -> AssetType:\n class_id = reader.read(SInt32)\n is_stripped = reader.read(Boolean)\n script_type_index = reader.read(SInt16)\n if class_id.value == 114:\n script_id = reader.read_bytes(16)\n else:\n script_id = None\n old_type_hash = reader.read_bytes(16)\n return cls(\n class_id, is_stripped, script_type_index, script_id,\n old_type_hash\n )",
"def read_type(self):\n return type_get_read_type(self)",
"def load(type_definition_path, type_definition_manager, logger):\n logger.info('Loading type definition {0}...'.format(type_definition_path))\n valid_extension = ('.yaml', '.csar')\n if not type_definition_path.endswith(valid_extension):\n raise exceptions.\\\n TypeDefinitionException('Type definition file has invalid extension')\n\n type_definition_file_path = service_template_utils.get(type_definition_path, None)\n type_definition = type_definition_manager.load_type_definition(type_definition_file_path)\n logger.info(\"Type definition loaded. The type definition's name is {0} and version is {1}\".\\\n format(type_definition.name, type_definition.version))",
"def extract(type_decl, arg, filename=None, data=None, handler_collection=None):\n\n if arg is not None:\n if isinstance(arg, str):\n filename = arg\n else:\n data = arg\n\n if data is None:\n if filename is None:\n raise ValueError('no data to load')\n with open(filename) as fp:\n # TODO @nrosenstein Load objects into ordered dicts.\n if filename.endswith('.yml') or filename.endswith('.yaml'):\n import yaml\n data = getattr(yaml, 'safe_load', yaml.load)(fp)\n elif filename.endswith('.cson'):\n import cson\n data = cson.load(fp)\n else:\n import json\n data = json.load(fp, object_pairs_hook=OrderedDict)\n\n if handler_collection is None:\n handler_collection = root_handler_collection\n\n context = Context(None, None, filename=filename,\n handler_collection=handler_collection)\n return handler_collection.load(type_decl, data, context)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Obtains a generic description of the volume, containing the file system type, index, label and NTFS version. If with_size is provided, the volume size is also included.
|
def get_description(self, with_size=True, with_index=True):
desc = ''
if with_size and self.size:
desc += '{0} '.format(self.get_formatted_size())
s = self.info.get('statfstype') or self.info.get('fsdescription') or '-'
if with_index:
desc += '{1}:{0}'.format(s, self.index)
else:
desc += s
if self.info.get('label'):
desc += ' {0}'.format(self.info.get('label'))
if self.info.get('version'): # NTFS
desc += ' [{0}]'.format(self.info.get('version'))
return desc
|
[
"def info(self):\n ret = libvirtmod.virStorageVolGetInfo(self._o)\n if ret is None: raise libvirtError ('virStorageVolGetInfo() failed', vol=self)\n return ret",
"def volume_size(self, volume, new_size=None):\n return self.request( \"volume-size\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n }, {\n 'is-fixed-size-flex-volume': [ bool, False ],\n 'is-readonly-flex-volume': [ bool, False ],\n 'is-replica-flex-volume': [ bool, False ],\n 'volume-size': [ basestring, False ],\n } )",
"def _create_volume(self, name, size):\n\n params = {}\n params['name'] = self.configuration.ixsystems_dataset_path + '/' + name\n params['type'] = 'VOLUME'\n params['volsize'] = ix_utils.get_bytes_from_gb(size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)\n LOG.debug('_create_volume params : %s', params)\n LOG.debug('_create_volume urn : %s', request_urn)\n ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,\n request_urn, jparams)\n LOG.debug('_create_volume response : %s', json.dumps(ret))\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while creating volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)",
"def volume_get_filer_info(self):\n return self.request( \"volume-get-filer-info\", {\n }, {\n 'disk-types': [ basestring, False ],\n 'default-raidtype': [ basestring, False ],\n 'checksum-types': [ basestring, False ],\n 'root-volume': [ basestring, False ],\n 'raidgroup-size': [ RaidgroupSizeInfo, True ],\n 'allowed-raidtypes': [ RaidtypeInfo, True ],\n 'snapshots-max': [ int, False ],\n } )",
"def volume_wafl_info(self):\n return self.request( \"volume-wafl-info\", {\n }, {\n 'root-volume': [ basestring, False ],\n 'disk-types': [ basestring, False ],\n 'snapshots-max': [ int, False ],\n 'checksum-types': [ basestring, False ],\n } )",
"def volume_info(mnode, volname):\n return RestClient(mnode).handle_request(\"GET\",\n \"/v1/volumes/%s\" % volname,\n httplib.OK, None)",
"def __init__(\n self, name, path, vgname, used=False, discovered=None, attr=None, uuid=None,\n total=None, extent_size=None, stripes=1, stripesize=0, origin=None,\n lvm_command=None, lvm_lockfile=DEFAULT_LVM_LOCKFILE, lvm_timeout=DEFAULT_LVM_TIMEOUT,\n appname=None, verbose=0, version=__version__,\n base_dir=None, use_stderr=False, simulate=False, sudo=False,\n quiet=False,\n ):\n\n # Initialisation of the parent object\n super(LogicalVolume, self).__init__(\n name=name,\n path=path,\n vgname=vgname,\n attr=attr,\n uuid=uuid,\n used=used,\n discovered=discovered,\n lvm_command=lvm_command,\n lvm_lockfile=lvm_lockfile,\n lvm_timeout=lvm_timeout,\n appname=appname,\n verbose=verbose,\n version=version,\n base_dir=base_dir,\n use_stderr=use_stderr,\n simulate=simulate,\n sudo=sudo,\n quiet=quiet,\n )\n\n if self.discovered:\n self.extent_size = int(extent_size)\n extents_total = int(total / self.extent_size)\n self.set_extent_count(extents_total, extents_total)\n\n self._stripes = int(stripes)\n \"\"\"\n @ivar: number of stripes of this LV\n @type: int\n \"\"\"\n\n self._stripesize = long(stripesize)\n \"\"\"\n @ivar: size of a stripe in Bytes\n @type: long\n \"\"\"\n\n self._origin = origin\n \"\"\"\n @ivar: the origin device of a snapshot volume (LV name)\n @type: str\n \"\"\"\n\n self.devices = []\n \"\"\"\n @ivar: list of all PVs, where this LV lies as tuples\n with the PV device name and the number of the start extent\n @type: list of tuples\n \"\"\"\n\n self.initialized = True",
"def info_vd_size(self):\n ret = self._get_attr(\"infoVDSize\")\n return ret",
"def volume_options_list_info(self, volume):\n return self.request( \"volume-options-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'options': [ VolumeOptionInfo, True ],\n } )",
"def stat_volume(path, **kwargs):\n raw = SMBRawIO(\n path, mode=\"r\", share_access=\"rwd\", desired_access=FilePipePrinterAccessMask.FILE_READ_ATTRIBUTES, **kwargs\n )\n with SMBFileTransaction(raw) as transaction:\n query_info(transaction, FileFsFullSizeInformation)\n full_size = transaction.results[0]\n unit_in_bytes = full_size[\"sectors_per_unit\"].get_value() * full_size[\"bytes_per_sector\"].get_value()\n return SMBStatVolumeResult(\n total_size=full_size[\"total_allocation_units\"].get_value() * unit_in_bytes,\n caller_available_size=full_size[\"caller_available_units\"].get_value() * unit_in_bytes,\n actual_available_size=full_size[\"actual_available_units\"].get_value() * unit_in_bytes,\n )",
"def bld(cls, adapter, name, size=None, mount_type='rw'):\n vom = super(VOptMedia, cls)._bld(adapter)\n vom._media_name(name)\n if size is not None:\n vom._size(size)\n vom._mount_type(mount_type)\n return vom",
"def get_file_server_glusterfs_volume_type(sc):\n # type: (StorageClusterSettings) -> str\n try:\n voltype = sc.file_server.server_options[\n 'glusterfs']['volume_type'].lower()\n except KeyError:\n voltype = 'distributed'\n return voltype",
"def do_create_volume(sess, size, display_name, attach_it, chap_credentials, mode):\n\n try:\n _logger.info(\"Creating a new %d GB volume %s\", size, display_name)\n inst = sess.this_instance()\n if inst is None:\n raise Exception(\"OCI SDK error: couldn't get instance info\")\n _logger.debug('\\n availability_domain %s\\n compartment_id %s',\n inst.get_availability_domain_name(), inst.get_compartment_id())\n #\n # GT\n # vol = sess.create_volume(inst.get_compartment_id(),\n vol = sess.create_volume(sess.this_compartment().get_ocid(),\n inst.get_availability_domain_name(),\n size=size,\n display_name=display_name,\n wait=True)\n except Exception as e:\n _logger.debug(\"Failed to create volume\", exc_info=True)\n raise Exception(\"Failed to create volume\") from e\n\n _logger.info(\"Volume [%s] created\", vol.get_display_name())\n\n if not attach_it:\n return\n\n compat_info_message(gen_msg=\"Attaching the volume to this instance\", mode=mode)\n try:\n if chap_credentials:\n vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=True)\n else:\n vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=False)\n except Exception as e:\n _logger.debug('Cannot attach BV', exc_info=True)\n vol.destroy()\n raise Exception('Cannot attach BV') from e\n #\n # attach using iscsiadm commands\n compat_info_message(gen_msg=\"Attaching iSCSI device.\", mode=mode)\n\n vol_portal_ip = vol.get_portal_ip()\n vol_portal_port = vol.get_portal_port()\n vol_iqn = vol.get_iqn()\n vol_username = vol.get_user()\n vol_password = vol.get_password()\n retval = iscsiadm.attach(ipaddr=vol_portal_ip,\n port=vol_portal_port,\n iqn=vol_iqn,\n username=vol_username,\n password=vol_password,\n auto_startup=True)\n compat_info_message(compat_msg=\"iscsiadm attach Result: %s\" % iscsiadm.error_message_from_code(retval),\n gen_msg=\"Volume [%s] is attached.\" % vol.get_display_name(), mode=mode)\n if retval == 0:\n _logger.debug('Creation successful')\n if chap_credentials:\n _logger.debug('Attachment OK: saving chap credentials.')\n add_chap_secret(vol_iqn, vol_username, vol_password)\n return\n\n # here because of error case\n try:\n _logger.debug('Destroying the volume')\n vol.destroy()\n except Exception as e:\n _logger.debug(\"Failed to destroy volume\", exc_info=True)\n _logger.error(\"Failed to destroy volume: %s\", str(e))\n\n raise Exception('Failed to attach created volume: %s' % iscsiadm.error_message_from_code(retval))",
"def _get_volume_name(self):\n pass",
"def discover(self):\n\n cname = self.vgname + '/' + self.name\n\n if not self.exists():\n if self.verbose > 2:\n LOG.debug(\n _(\"LV %r doesn't exists, discovery not possible.\"), cname)\n return False\n\n if self.verbose > 2:\n LOG.debug(_(\"Discovering logical volume %r ...\"), cname)\n\n self.discovered = False\n self.devices = []\n\n attr_params = \"lv_name,vg_name,stripes,stripesize,lv_attr,lv_uuid,\"\n attr_params += \"devices,lv_path,vg_extent_size,lv_size,origin\"\n\n cmd_params = [\n \"lvs\",\n \"--nosuffix\",\n \"--noheadings\",\n \"--units\",\n \"b\",\n \"--separator\",\n \";\",\n \"-o\",\n attr_params,\n cname\n ]\n\n (ret_code, std_out, std_err) = self.exec_lvm(\n cmd_params, quiet=True, simulate=False, force=True)\n if ret_code:\n if ret_code == 5:\n LOG.debug(_(\"Logical volume %r not found.\"), cname)\n return False\n msg = _(\"Error %(rc)d getting LVM logical volume %(name)s: %(msg)s\") % {\n 'rc': ret_code, 'name': cname, 'msg': std_err}\n raise LvmVolumeError(msg)\n\n lines = std_out.split('\\n')\n\n devices = ''\n\n for line in lines:\n\n line = line.strip()\n if line == '':\n continue\n\n words = line.split(\";\")\n\n # lvname = words[0].strip()\n # vgname = words[1].strip()\n stripes = int(words[2])\n stripesize = int(words[3])\n attr = words[4].strip()\n uuid = words[5].strip()\n devices = words[6].strip()\n path = words[7].strip()\n extent_size = int(words[8])\n total = int(words[9])\n origin = words[10].strip()\n if origin == '':\n origin = None\n\n self._stripes = stripes\n self._stripesize = stripesize\n self._path = path\n self.attr = attr\n self._uuid = uuid\n self._origin = origin\n\n self.used = True\n\n self.discovered = True\n\n self.extent_size = extent_size\n extents_total = total / extent_size\n self.set_extent_count(extents_total, extents_total)\n\n break\n\n if self.discovered:\n match = re.search(r'(.*)\\((\\d+)\\)', devices)\n if match:\n self.add_device(match.group(1), int(match.group(2)))\n\n return self.discovered",
"def _extend_volume(self, name, new_size):\n LOG.debug('_extend__volume name: %s', name)\n params = {}\n params['volsize'] = ix_utils.get_bytes_from_gb(new_size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s/id/%s') % (\n FreeNASServer.REST_API_VOLUME,\n urllib.parse.quote_plus(\n self.configuration.ixsystems_dataset_path + '/' + name))\n ret = self.handle.invoke_command(FreeNASServer.UPDATE_COMMAND,\n request_urn, jparams)\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while extending volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)",
"def volume_add(self, volume, disk_size_with_unit=None, mirror_disks=None, disk_size=None, force=None, disks=None, raid_group=None, disk_count=None):\n return self.request( \"volume-add\", {\n 'disk_size_with_unit': [ disk_size_with_unit, 'disk-size-with-unit', [ basestring, 'None' ], False ],\n 'mirror_disks': [ mirror_disks, 'mirror-disks', [ DiskInfo, 'None' ], True ],\n 'disk_size': [ disk_size, 'disk-size', [ int, 'None' ], False ],\n 'force': [ force, 'force', [ bool, 'None' ], False ],\n 'disks': [ disks, 'disks', [ DiskInfo, 'None' ], True ],\n 'raid_group': [ raid_group, 'raid-group', [ basestring, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'disk_count': [ disk_count, 'disk-count', [ int, 'None' ], False ],\n }, {\n 'bad-disks': [ DiskInfo, True ],\n } )",
"def show_volume(svm_name) -> None:\n print()\n print(\"Getting Volume Details\")\n print(\"===================\")\n try:\n for volume in Volume.get_collection(\n **{\"svm.name\": svm_name}, fields=\"uuid\"):\n print(\n \"Volume name:-%s ; Volume uuid:-%s \" %\n (volume.name, volume.uuid))\n except NetAppRestError as error:\n print(\"Error:- \" % error.http_err_response.http_response.text)\n print(\"Exception caught :\" + str(error))",
"def volume_autosize_get(self, volume):\n return self.request( \"volume-autosize-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'increment-size': [ basestring, False ],\n 'minimum-size': [ basestring, False ],\n 'grow-threshold-percent': [ int, False ],\n 'maximum-size': [ basestring, False ],\n 'shrink-threshold-percent': [ int, False ],\n 'is-enabled': [ bool, False ],\n 'mode': [ basestring, False ],\n } )",
"def test_create_volume_no_noncustomized_offering_with_size(self):\n\n location = self.driver.list_locations()[0]\n\n self.assertRaises(\n LibcloudError,\n self.driver.create_volume,\n 'vol-0', location, 11)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Obtains the size of the volume in a humanreadable format (i.e. in TiBs, GiBs or MiBs).
|
def get_formatted_size(self):
if self.size is not None:
if self.size < 1024:
return "{0} B".format(self.size)
elif self.size < 1024 ** 2:
return "{0} KiB".format(round(self.size / 1024, 2))
elif self.size < 1024 ** 3:
return "{0} MiB".format(round(self.size / 1024 ** 2, 2))
elif self.size < 1024 ** 4:
return "{0} GiB".format(round(self.size / 1024 ** 3, 2))
else:
return "{0} TiB".format(round(self.size / 1024 ** 4, 2))
else:
return self.size
|
[
"def get_size(self):\n units = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\")\n for i, unit in enumerate(units):\n high = 10**(i*3)\n if self.size < high*1000:\n return f\"{round(self.size/high, 3)} {unit}\"",
"def size_unit(self) -> str:\n return pulumi.get(self, \"size_unit\")",
"def size_human(size):\r\n\r\n if size:\r\n _abbrevs = [\r\n (1<<50L, 'P'),\r\n (1<<40L, 'T'),\r\n (1<<30L, 'G'),\r\n (1<<20L, 'M'),\r\n (1<<10L, 'k'),\r\n (1, 'bytes')]\r\n\r\n for factor, suffix in _abbrevs:\r\n if size > factor:\r\n break\r\n if factor == 1:\r\n return \"%d %s\" % (size, suffix)\r\n else:\r\n return \"%.3f%s\" % (float(size)/float(factor), suffix)",
"def human_readable_file_size(size_in_bytes):\n return size(size_in_bytes, system=alternative)",
"def si_size(b):\n UNITS = ('B', 'KB', 'MB', 'GB', 'TB', 'PB')\n index = 0\n while b >= 1024 and index < len(UNITS) - 1:\n b /= 1024.0\n index += 1\n return \"%.1f %s\" % (b, UNITS[index])",
"def human_file_size(size):\n suffixes = ' kMGTPEH'\n if size == 0:\n num_scale = 0\n else:\n num_scale = int(math.floor(math.log(size) / math.log(1000)))\n if num_scale > 7:\n suffix = '?'\n else:\n suffix = suffixes[num_scale]\n num_scale = int(math.pow(1000, num_scale))\n value = size / num_scale\n str_value = str(value)\n if str_value[2] == '.':\n str_value = str_value[:2]\n else:\n str_value = str_value[:3]\n return \"{0:>3s}{1}\".format(str_value, suffix)",
"def size_converter(_bytes: int) -> str:\n KB = _bytes / float(1 << 10)\n MB = _bytes / float(1 << 20)\n GB = _bytes / float(1 << 30)\n\n if GB > 1:\n return f\"{round(GB, 2):,} GB\"\n elif MB > 1:\n return f\"{round(MB, 2):,} MB\"\n\n return f\"{round(KB, 2):,} KB\"",
"def volume_usage_in_bytes(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"volume_usage_in_bytes\")",
"def get_display_size(size):\n return \"{} ({}) ({})\".format(\n size, bytes_to_human(size, binary=True),\n bytes_to_human(size, binary=False))",
"def formatSize(self):\n return format_size(self.getSize())",
"def fileSizeAsMb(cls, size):\n for q in [\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"]:\n if size < 1024 or q == \"TB\":\n s = str(round(size, 1))\n if s.endswith(\".0\"):\n s = s[:-2]\n return \"%s %s\" % (s, q)\n size = size / 1024.0\n # not reachable\n return `size`",
"def format_size(size):\n if abs(size) < 1000:\n return str(size) + 'B'\n\n for unit in ('k', 'M', 'G'):\n size /= 1000\n if abs(size) < 1000:\n return SIZE_FORMAT.format(size, unit)\n\n return SIZE_FORMAT.format(size / 1000, 'T')",
"def convert_magnitude(byte_value):\n \n if byte_value < 1024:\n \n # Bytes\n size_as_string = '%dB' % byte_value\n\n elif byte_value < 1048576:\n\n # Kilo.\n size_as_string = '%.2fK' % (1.0 * byte_value / 1024)\n\n elif byte_value < 1073741824:\n\n # Mega\n size_as_string = '%.2fM' % (1.0 * byte_value / 1048576)\n\n else:\n\n # Giga\n size_as_string = '%.2fG' % (1.0 * byte_value / 1073741824)\n \n ######################\n return size_as_string\n ######################",
"def _convert_size(input_size):\n if input_size == 0:\n return '0B'\n\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(input_size, 1024)))\n power = math.pow(1024, i)\n size = round(input_size / power, 2)\n return '%s %s' % (size, size_name[i])",
"def generate_human_readable_size(byte_size: int) -> str:\n size_measurement_units = (('KiB', 1024), ('MiB', 1024**2), ('GiB', 1024**3), ('TiB', 1024**4))\n suffix = None\n divisor = None\n for u, m in size_measurement_units:\n if byte_size >= m:\n suffix = u\n divisor = m\n\n if suffix and divisor:\n return f'{round(byte_size / divisor, 1)} {suffix}'\n return f'{byte_size}B'\n # return f'{round(byte_size/divisor, 1)} {suffix}'",
"def volume_size_total(self, volume, human_readable=True):\r\n volume = self._get_volume(volume)\r\n if volume is not None and self._volume_mounted(volume[\"devicefile\"]):\r\n return_data = int(volume[\"size\"])\r\n if human_readable:\r\n return FormatHelper.bytes_to_readable(\r\n return_data)\r\n else:\r\n return return_data",
"def maximum_volume_size(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"maximum_volume_size\")",
"def fmt_binary_size(size):\n units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB']\n\n unit = 0\n for unit in range(0, len(units)):\n if size < 1024:\n break\n size /= 1024.0\n\n size = int(math.ceil(size))\n\n return f'{size} {units[unit]}'",
"def nice_size(size):\n words = [ 'bytes', 'Kb', 'Mb', 'Gb' ]\n try:\n size = float( size )\n except:\n return '??? bytes'\n for ind, word in enumerate(words):\n step = 1024 ** (ind + 1)\n if step > size:\n size = size / float(1024 ** ind)\n out = \"%.1f %s\" % (size, word)\n return out\n return '??? bytes'",
"def info_vd_size(self):\n ret = self._get_attr(\"infoVDSize\")\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves the FS type from the blkid command.
|
def _get_blkid_type(self):
try:
result = _util.check_output_(['blkid', '-p', '-O', str(self.offset), self.get_raw_path()])
if not result:
return None
# noinspection PyTypeChecker
blkid_result = dict(re.findall(r'([A-Z]+)="(.+?)"', result))
self.info['blkid_data'] = blkid_result
if 'PTTYPE' in blkid_result and 'TYPE' not in blkid_result:
return blkid_result.get('PTTYPE')
else:
return blkid_result.get('TYPE')
except Exception:
return None # returning None is better here, since we do not care about the exception in determine_fs_type
|
[
"def GetFilesystem(path):\n cmd = ['lsblk', path, '-f', '-o', 'FSTYPE', '-n']\n log.info('Running {0!s}'.format(cmd))\n fstype = subprocess.check_output(cmd).split()\n if not fstype:\n # Lets wait a bit for any previous blockdevice operation to settle\n time.sleep(2)\n fstype = subprocess.check_output(cmd).split()\n\n if len(fstype) != 1:\n raise TurbiniaException(\n '{0:s} should contain exactly one partition, found {1:d}'.format(\n path, len(fstype)))\n return fstype[0].decode('utf-8').strip()",
"def udev_device_get_format(info):\n return info.get(\"ID_FS_TYPE\")",
"def _get_fstype_from_parser(self, fstype=None):\n if not fstype:\n if self.index in self.disk.parser.fstypes:\n fstype = self.disk.parser.fstypes[self.index]\n elif '*' in self.disk.parser.fstypes:\n fstype = self.disk.parser.fstypes['*']\n elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None:\n fstype = \"?\" + self.disk.parser.fstypes['?']\n else:\n fstype = \"\"\n\n if not fstype:\n self.filesystem = None\n elif isinstance(fstype, FileSystem):\n fstype.volume = self\n self.filesystem = fstype\n elif fstype in VOLUME_SYSTEM_TYPES:\n self.volumes.vstype = fstype\n self.filesystem = FILE_SYSTEM_TYPES[\"volumesystem\"](self)\n elif fstype.startswith(\"?\"):\n fallback = FILE_SYSTEM_TYPES[fstype[1:]](self)\n self.filesystem = filesystems.FallbackFileSystem(self, fallback)\n else:\n self.filesystem = FILE_SYSTEM_TYPES[fstype](self)",
"def get_file_server_glusterfs_volume_type(sc):\n # type: (StorageClusterSettings) -> str\n try:\n voltype = sc.file_server.server_options[\n 'glusterfs']['volume_type'].lower()\n except KeyError:\n voltype = 'distributed'\n return voltype",
"def _get_disk_type(self, pool_name, result):\n pool_info = self.helper._find_pool_info(pool_name, result)\n if not pool_info:\n return None\n\n pool_disk = []\n for i, x in enumerate(['ssd', 'sas', 'nl_sas']):\n if pool_info['TIER%dCAPACITY' % i] != '0':\n pool_disk.append(x)\n\n if len(pool_disk) > 1:\n pool_disk = ['mix']\n\n return pool_disk[0] if pool_disk else None",
"def managed_disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"managed_disk_type\")",
"def _get_magic_type(self):\n\n try:\n with io.open(self.disk.get_fs_path(), \"rb\") as file:\n file.seek(self.offset)\n fheader = file.read(min(self.size, 4096) if self.size else 4096)\n except IOError:\n logger.exception(\"Failed reading first 4K bytes from volume.\")\n return None\n\n # TODO fallback to img-cat image -s blocknum | file -\n # if we were able to load the module magic\n try:\n # noinspection PyUnresolvedReferences\n import magic\n\n if hasattr(magic, 'from_buffer'):\n # using https://github.com/ahupp/python-magic\n logger.debug(\"Using python-magic Python package for file type magic\")\n result = magic.from_buffer(fheader)\n self.info['magic_data'] = result\n return result\n\n elif hasattr(magic, 'open'):\n # using Magic file extensions by Rueben Thomas (Ubuntu python-magic module)\n logger.debug(\"Using python-magic system package for file type magic\")\n ms = magic.open(magic.NONE)\n ms.load()\n result = ms.buffer(fheader)\n ms.close()\n self.info['magic_data'] = result\n return result\n\n else:\n logger.warning(\"The python-magic module is not available, but another module named magic was found.\")\n\n except ImportError:\n logger.warning(\"The python-magic module is not available.\")\n except AttributeError:\n logger.warning(\"The python-magic module is not available, but another module named magic was found.\")\n return None # returning None is better here, since we do not care about the exception in determine_fs_type",
"def get_disk_type(self, vmdk):\n disk_type = None\n\n def extract_disk_type(file):\n for line in file:\n info = line.split('=')\n\n if info == []:\n continue\n if info[0] == 'createType':\n #info format => ['createTyel', disk_type'\\n']\n createType_without_newline = info[1][:-1]\n return createType_without_newline\n\n if vmdk.startswith(('http:')):\n with closing(urlopen(vmdk)) as page:\n disk_type = extract_disk_type(page)\n else:\n with open(vmdk, 'r') as f:\n disk_type = extract_disk_type(f)\n\n if not disk_type:\n excep_msg = _(\"Could not extract disk_type from VMDK file.\")\n LOG.error(excep_msg)\n raise exceptions.VimException(excep_msg)\n\n return disk_type",
"def get_drive_type(self):",
"def get_mount_nfs_info():\n command='mount | grep \"/mnt/sysimg\"'\n out = connections.execute_mml_without_check(command)\n\n if out.count('on /mnt/sysimg type nfs') == 1:\n return out.split(\"on /mnt/sysimg type nfs\")[0].strip()",
"def svn_fs_type(*args) -> \"char const **\":\n return _fs.svn_fs_type(*args)",
"def GetFilesystemBlockSize(options):\n\n config, partitions = LoadPartitionConfig(options)\n print config['metadata']['fs_block_size']",
"def parse_fs_command(self, command, nargs=0, ret=str):\r\n\t\targs = shlex.split(command)\r\n\t\tif len(args) < 1 or (len(args) != nargs+1 and nargs != -1):\r\n\t\t\tself.stdout.write(Text(\"Error: invalid argument count!\\n\", \"red\"))\r\n\t\t\treturn None, None\r\n\t\ti = args[0]\r\n\t\tif i not in self.FSIs:\r\n\t\t\tself.stdout.write(Text(\"Error: Interface not found!\\n\", \"red\"))\r\n\t\t\treturn None, None\r\n\t\tif ret == str:\r\n\t\t\tif len(args) > 1:\r\n\t\t\t\targs = \" \".join(args[1:])\r\n\t\t\telse:\r\n\t\t\t\targs = \"\"\r\n\t\telif ret == tuple:\r\n\t\t\targs = args[1:]\r\n\t\telse:\r\n\t\t\traise ValueError(\"Unknown return type!\")\r\n\t\tfsi = self.FSIs[i]\r\n\t\treturn fsi, args",
"def get_ent_type(self, line):\n\n\t\treturn self.kb_shm.dataType(line)",
"def get_disk_format(file_path):\n file_format = QEMUIMG.get_disk_format(file_path)\n\n if file_format == 'vmdk':\n # Look at the VMDK file header to determine the sub-format\n with open(file_path, 'rb') as f:\n # The header contains a fun mix of binary and ASCII, so ignore\n # any errors in decoding binary data to strings\n header = f.read(1000).decode('ascii', 'ignore')\n # Detect the VMDK format from the output:\n match = re.search('createType=\"(.*)\"', header)\n if not match:\n raise RuntimeError(\"Could not find VMDK 'createType' in the \"\n \"file header:\\n{0}\".format(header))\n vmdk_format = match.group(1)\n logger.info(\"VMDK sub-format is '{0}'\".format(vmdk_format))\n return (file_format, vmdk_format)\n else:\n # No known/applicable sub-format\n return (file_format, None)",
"def get_fsid(self):\n self.require_state(\"connected\")\n buf_len = 37\n fsid = create_string_buffer(buf_len)\n ret = run_in_thread(self.librados.rados_cluster_fsid,\n (self.cluster, byref(fsid), c_size_t(buf_len)))\n if ret < 0:\n raise make_ex(ret, \"error getting cluster fsid\")\n return fsid.value",
"def do_filetype(self, line):\n if len(line) == 0:\n self.print(\"Must provide a filename\")\n return\n filename = resolve_path(line)\n mode = auto(get_mode, filename)\n if mode_exists(mode):\n if mode_isdir(mode):\n self.print('dir')\n elif mode_isfile(mode):\n self.print('file')\n else:\n self.print('unknown')\n else:\n self.print('missing')",
"def _get_disk_type(self, pool_info):\n pool_disks = []\n for i, x in enumerate(constants.TIER_DISK_TYPES):\n if (pool_info.get('TIER%dCAPACITY' % i) and\n pool_info.get('TIER%dCAPACITY' % i) != '0'):\n pool_disks.append(x)\n\n if len(pool_disks) > 1:\n pool_disks = ['mix']\n\n return pool_disks[0] if pool_disks else None",
"def file_format(self):\n return self._get_val_str(_DISK_FILEFORMAT)",
"def shell_type(self):\n return get_kind(type(self))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks the volume for its magic bytes and returns the magic.
|
def _get_magic_type(self):
try:
with io.open(self.disk.get_fs_path(), "rb") as file:
file.seek(self.offset)
fheader = file.read(min(self.size, 4096) if self.size else 4096)
except IOError:
logger.exception("Failed reading first 4K bytes from volume.")
return None
# TODO fallback to img-cat image -s blocknum | file -
# if we were able to load the module magic
try:
# noinspection PyUnresolvedReferences
import magic
if hasattr(magic, 'from_buffer'):
# using https://github.com/ahupp/python-magic
logger.debug("Using python-magic Python package for file type magic")
result = magic.from_buffer(fheader)
self.info['magic_data'] = result
return result
elif hasattr(magic, 'open'):
# using Magic file extensions by Rueben Thomas (Ubuntu python-magic module)
logger.debug("Using python-magic system package for file type magic")
ms = magic.open(magic.NONE)
ms.load()
result = ms.buffer(fheader)
ms.close()
self.info['magic_data'] = result
return result
else:
logger.warning("The python-magic module is not available, but another module named magic was found.")
except ImportError:
logger.warning("The python-magic module is not available.")
except AttributeError:
logger.warning("The python-magic module is not available, but another module named magic was found.")
return None # returning None is better here, since we do not care about the exception in determine_fs_type
|
[
"def magic_number(self) -> 'bytes':\n return self._magic",
"def check_magic_no(header):\n try:\n magic_no = ((header[0] << 8) + header[1]).to_bytes(2, 'big')\n if int.from_bytes(magic_no, 'big') != 0x497E:\n sys.exit(1)\n print('Magic number acceptable.\\n')\n\n except:\n print('Error while checking the magic number\\n')\n sys.exit(1)",
"def _get_volume(self, volume_devicefile):\r\n if self._data is not None:\r\n for volume in self._data[\"volumes\"]:\r\n if volume[\"devicefile\"] == volume_devicefile:\r\n return volume",
"def read_raw(self):\n\n return self.read_volume(\"/volumes/raw\")",
"def get_vbmeta_size(vbmeta_bytes):\n\n # Keep in sync with |AvbVBMetaImageHeader|.\n AVB_MAGIC = b'AVB0' # pylint: disable=C0103\n AVB_VBMETA_IMAGE_HEADER_SIZE = 256 # pylint: disable=C0103\n FORMAT_STRING = ( # pylint: disable=C0103\n '!4s2L' # magic, 2 x version.\n '2Q' # 2 x block size: Authentication and Auxiliary blocks.\n )\n\n if len(vbmeta_bytes) < struct.calcsize(FORMAT_STRING):\n return 0\n\n data = vbmeta_bytes[:struct.calcsize(FORMAT_STRING)]\n (magic, _, _,\n authentication_block_size,\n auxiliary_data_block_size) = struct.unpack(FORMAT_STRING, data)\n\n if magic == AVB_MAGIC:\n return (AVB_VBMETA_IMAGE_HEADER_SIZE +\n authentication_block_size +\n auxiliary_data_block_size)\n return 0",
"def read_mbr(self):\n\n\t\tself.volume_object.seek(0)\n\t\tbuf = self.volume_object.read(512)\n\n\t\tif len(buf) != 512:\n\t\t\traise ValueError('Cannot read the MBR code and data')\n\n\t\tif struct.unpack('<H', buf[510 : 512])[0] != 0xAA55:\n\t\t\traise ValueError('Boot signature not found')\n\n\t\tis_boot_code_present = buf[0] != 0 and buf[1] != 0\n\t\tdisk_signature = struct.unpack('<L', buf[440 : 444])[0]\n\n\t\treturn StandardMBR(is_boot_code_present, disk_signature)",
"def magic(self):\n try:\n with magic.Magic() as m:\n return m.id_filename(self.path)\n except Exception:\n return ''",
"def has_raw(self):\n return self.__has_volume(\"/volumes/raw\")",
"def _determine_volume(self):\n vol_id = self.resource.volume_id or self._volume_id_in_node_data() or (self.resource.device and self._currently_attached_volume(self.resource.env.config.aws.instance_id, self.resource.device))\n if not vol_id:\n raise Fail(\"volume_id attribute not set and no volume id is set in the node data for this resource (which is populated by action create)\")\n\n # check that volume exists\n vol = self._volume_by_id(vol_id)\n if not vol:\n raise Fail(\"No volume with id %s exists\" % vol_id)\n\n return vol",
"def _identify(self, blocknum):\n header = struct.Struct(\">LLQ\")\n with open(self._blockpath(blocknum)) as fh:\n (magicnumber, version, sequence) = header.unpack(fh.read(header.size))\n assert magicnumber == self._magicnumber_block\n assert version == self._version\n return sequence",
"def test_manage_volume_volume_type_by_uuid(self):\n body = {'volume': {'host': 'host_ok',\n 'ref': 'fake_ref',\n 'volume_type': fake.VOLUME_TYPE_ID,\n 'bootable': True}}\n res = self._get_resp_post(body)\n self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)",
"def _currently_attached_volume(self, instance_id, device):\n volumes = self.ec2.get_all_volumes()\n for v in volumes:\n if v.attach_data and v.attach_data.instance_id == instance_id and v.attach_data.device == device:\n return v",
"def list_files_on_volume(self, volume):\n try:\n self.get_volume(volume)\n except DeploymentError as exc:\n raise exc\n\n res = self._dispatch(['run', '--rm', '-v', '{}:/data'.format(volume), 'busybox', 'ls', '/data'])\n assert len(res.stderr) == 0\n\n return res",
"def get_volumes_owned_binary(self):\n vol_str = \"\"\n for val in self.vol_arr:\n vol_str += \"{0:032b}\".format(val)[::-1]\n return vol_str",
"def boundingBoxVolume(self):\n return _cpp_methods.boundingBoxVolume(self)",
"def get_volume_info(disk_snapshot_id):\n output = subprocess.check_output([\n 'qemu-img',\n 'info',\n '--output=json',\n disk_snapshot_id,\n ])\n return json.loads(str(output))",
"def mount_check(self, volname):\n if not self.vol_dict[volname][\"mounted\"]:\n log.error(\"Volume {0} is not mounted\".format(volname))\n return None\n return self.vol_dict[volname][\"Local\"]",
"def get_volume(self, name):\n try:\n return self._docker.volumes.get(name)\n except (docker.errors.NotFound, docker.errors.APIError) as exc:\n raise DeploymentError('Could not find volume {}: {}'.format(name, exc if exc else ''))",
"def set_magic(cls, magic: int) -> int:\n cls.MAGIC = magic\n return cls.MAGIC"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a label that is safe to add to a path in the mountpoint for this volume.
|
def get_safe_label(self):
if self.info.get('label') == '/':
return 'root'
suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else ""
if suffix and suffix[0] == '_':
suffix = suffix[1:]
if len(suffix) > 2 and suffix[-1] == '_':
suffix = suffix[:-1]
return suffix
|
[
"def udev_device_get_label(info):\n return info.get(\"ID_FS_LABEL\")",
"def format_label(self, label):\n# logging.debug(\"format_label(%s (type %s))\" % (label, type(label)))\n if isinstance(label, basestring):\n if config_pathtools.is_server_path(label):\n return config_pathtools.server_path_to_basename(label)\n else:\n return label\n elif isinstance(label, tuple):\n return \"\\\\n\".join([self.format_label(x) for x in label])\n else:\n raise TypeError(\"Unsupported type: %s\" % type(label))",
"def pathify(label):\n return re.sub(r'-$', '',\n re.sub(r'-+', '-',\n re.sub(r'[^\\w/]', '-', unidecode(label).lower())))",
"def _make_private_label(label, suffix = \"\"):\n package, _, name = label.rpartition(\":\")\n prefix, _, name = name.rpartition(\"/\")\n if prefix:\n prefix = prefix + \"/\"\n return package + \":\" + prefix + _make_private_name(name, suffix)",
"def _make_public_label(label, suffix = \"\"):\n package, _, name = label.rpartition(\":\")\n prefix, _, name = name.rpartition(\"/\")\n if prefix:\n prefix = prefix + \"/\"\n return package + \":\" + prefix + _make_public_name(name, suffix)",
"def getLabelName(self):\n\n if self.type == operation.LABEL:\n return self.labelName",
"def get_label_name(self):\n try:\n return self.label_name\n except:\n return ''",
"def data_label(path):\n if os.path.basename(path) == '':\n path = os.path.dirname(path)\n _, fname = os.path.split(path)\n name, _ = os.path.splitext(fname)\n return name",
"def label(self, labelpos):\n warnings.warn(\"The ldns_rdf.label() method is scheduled\" +\n \" to be deprecated in future releases.\" +\n \" Convert the ldns_rdf to ldsn_dname and the use its\" +\n \" methods.\", PendingDeprecationWarning, stacklevel=2)\n return _ldns.ldns_dname_label(self, labelpos)\n #parameters: const ldns_rdf *, uint8_t,\n #retvals: ldns_rdf *",
"def label():\n return _make_type(_core.LLVMLabelType(), TYPE_LABEL)",
"def create_node_label(mutation: dict) -> str:\n return mutation['path'].name",
"def get_label(path):\n\n\t# see if .switch_label exists\n\n\tif os.path.isfile(os.path.join(path, '.switch_label')):\n\t\ttry:\n\t\t\twith open(os.path.join(path, '.switch_label')) as f:\n\t\t\t\tlabel = f.readline().strip()\n\n\t\t\t\tif len(label) > 0:\n\t\t\t\t\treturn label\n\n\t\texcept OSError:\n\t\t\tpass\n\n\t# check if the directory name has any stuff appended to it\n\n\tdirname = os.path.basename(path)\n\n\tif dirname.startswith('rootfs_'):\n\t\tlabel = dirname[len('rootfs_'):]\n\n\t\tif len(label) > 0:\n\n\t\t\t# save label for next occasion\n\n\t\t\ttry:\n\t\t\t\twith open(os.path.join(path, '.switch_label'), 'w') as f:\n\t\t\t\t\tf.write(label + '\\n')\n\t\t\texcept OSError:\n\t\t\t\tpass\n\n\t\t\treturn label\n\n\t# see if any *release files exist in /etc/\n\n\trlsfiles = glob.glob(os.path.join(path, 'etc', '*release')) + glob.glob(os.path.join(path, 'usr', 'lib', 'os-release*'))\n\n\tif len(rlsfiles) > 0:\n\t\trlslines = []\n\n\t\tfor file in rlsfiles:\n\t\t\ttry:\n\t\t\t\twith open(file) as f:\n\t\t\t\t\trlslines += f.readlines()\n\t\t\texcept OSError:\n\t\t\t\tpass\n\n\t\tname = ['', '', ''] # ID || DISTRIB_ID || NAME\n\t\tvers = ['', '', ''] # DISTRIB_CODENAME || DISTRIB_RELEASE || VERSION_ID\n\n\t\tfor line in rlslines:\n\t\t\tkv = line.split('=', 1)\n\n\t\t\tif len(kv) < 2:\n\t\t\t\tcontinue\n\n\t\t\tkey = kv[0].strip().strip('\"\\'').lower()\n\t\t\tval = kv[1].strip().strip('\"\\'').lower()\n\n\t\t\tif len(val) == 0:\n\t\t\t\tcontinue\n\n\t\t\tif key == 'id':\n\t\t\t\tname[0] = val\n\t\t\telif key == 'distrib_id':\n\t\t\t\tname[1] = val\n\t\t\telif key == 'name':\n\t\t\t\tname[2] = val\n\n\t\t\tif key == 'distrib_codename':\n\t\t\t\tvers[0] = val\n\t\t\telif key == 'distrib_release':\n\t\t\t\tvers[1] = val\n\t\t\telif key == 'version_id':\n\t\t\t\tvers[2] = val\n\n\t\tname = list(filter(None, name))\n\t\tvers = list(filter(None, vers))\n\n\t\tif len(name) > 0:\n\t\t\tlabel = name[0] + ('_' + vers[0] if len(vers) > 0 else '')\n\n\t\t\t# save label for next occasion\n\n\t\t\ttry:\n\t\t\t\twith open(os.path.join(path, '.switch_label'), 'w') as f:\n\t\t\t\t\tf.write(label + '\\n')\n\t\t\texcept OSError:\n\t\t\t\tpass\n\n\t\t\treturn label\n\n\t# oh well\n\n\treturn ''",
"def _get_volume_name(self):\n pass",
"def get_label_db_path(self, stage):\n raise NotImplementedError('Please implement me')",
"def write_label(self, label):\n self.write_vm_cmd('label', label)",
"def get_label(self, linode_id):\n\n data = self.linode_info(linode_id)\n return data.split('\\n')[0].split(':')[-1].strip()",
"def label(self):\n if self.tex is None:\n name_tex = r'{\\rm %s}' % text2tex(self.name)\n else:\n name_tex = self.tex\n\n if self.units == ureg.dimensionless:\n units_tex = ''\n else:\n units_tex = r' \\; \\left( {:~L} \\right)'.format(self.units)\n\n return name_tex + units_tex",
"def id_for_label(value):\n return f\"labels->{value}\"",
"def label(self, graph, node, valid_name):\n return self.depending_library.link_label(graph, node, valid_name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method to call vshadowmount and mount NTFS volume shadow copies.
|
def detect_volume_shadow_copies(self):
volume = self.volumes._make_subvolume(flag='alloc', offset=0, fstype='vss-container')
volume.mount()
return volume.volumes
|
[
"def mount(self, volume_id, client_name, mountpath, do_vssprotection=True):\n return self._snap_operation(0, volume_id, client_name, mountpath, do_vssprotection)",
"def mount_ss(self):\n if match_fs(self.mp, ['nilfs', 'nilfs2']):\n self.mount_tmpfs()\n if not self.passive:\n self.thin_out_snapshots()\n self.do_mount_ss(False)",
"def _mount_volumes(volumes):\n\n user_data_script_section = ''\n\n for volume in volumes:\n device = volume.device\n vol_type = volume.vol_type\n directory = volume.mount\n\n user_data_script_section += f\"\"\"\nmkfs -t {vol_type} {device}\nls {directory} || mkdir {directory}\nmount {device} {directory}\n\"\"\"\n\n return user_data_script_section",
"def volume_mount(self, volume_name, junction_path, export_policy_override=None, activate_junction=None):\n return self.request( \"volume-mount\", {\n 'export_policy_override': [ export_policy_override, 'export-policy-override', [ bool, 'None' ], False ],\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n 'activate_junction': [ activate_junction, 'activate-junction', [ bool, 'None' ], False ],\n 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],\n }, {\n } )",
"def mount(device, mountpoint, *args, readonly=False, mkfs=False):\n raise NotImplementedError(\"Contribute on github.com/alej0varas/pybolator\")",
"def _mount_docker_tmpfs(newroot_norm):\n # /etc/docker as temp fs as dockerd create /etc/docker/key.json\n fs_linux.mount_tmpfs(newroot_norm, '/etc/docker')",
"def virtdisk_MirrorVirtualDisk(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"VirtualDiskHandle\", \"Flags\", \"Parameters\", \"Overlapped\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def create_shadow_dirs(self) -> None:\n for shadow_dir in self.shadow_directories:\n # setup shadow and src paths, using node unique paths when configured\n shadow_path = Path(shadow_dir.path)\n if shadow_dir.src is None:\n src_path = shadow_path\n else:\n src_path = Path(shadow_dir.src)\n if shadow_dir.has_node_paths:\n src_path = src_path / self.node.name\n # validate shadow and src paths\n if not shadow_path.is_absolute():\n raise CoreError(f\"shadow dir({shadow_path}) is not absolute\")\n if not src_path.is_absolute():\n raise CoreError(f\"shadow source dir({src_path}) is not absolute\")\n if not src_path.is_dir():\n raise CoreError(f\"shadow source dir({src_path}) does not exist\")\n # create root of the shadow path within node\n logger.info(\n \"node(%s) creating shadow directory(%s) src(%s) node paths(%s) \"\n \"templates(%s)\",\n self.node.name,\n shadow_path,\n src_path,\n shadow_dir.has_node_paths,\n shadow_dir.templates,\n )\n self.node.create_dir(shadow_path)\n # find all directories and files to create\n dir_paths = []\n file_paths = []\n for path in src_path.rglob(\"*\"):\n shadow_src_path = shadow_path / path.relative_to(src_path)\n if path.is_dir():\n dir_paths.append(shadow_src_path)\n else:\n file_paths.append((path, shadow_src_path))\n # create all directories within node\n for path in dir_paths:\n self.node.create_dir(path)\n # create all files within node, from templates when configured\n data = self.data()\n templates = TemplateLookup(directories=src_path)\n for path, dst_path in file_paths:\n if shadow_dir.templates:\n template = templates.get_template(path.name)\n rendered = self._render(template, data)\n self.node.create_file(dst_path, rendered)\n else:\n self.node.copy_file(path, dst_path)",
"def virtdisk_CreateVirtualDisk(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"VirtualStorageType\", \"Path\", \"VirtualDiskAccessMask\", \"SecurityDescriptor\", \"Flags\", \"ProviderSpecificFlags\", \"Parameters\", \"Overlapped\", \"Handle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def createExtendedMarker(self, device):\n mountpoint = tempfile.mkdtemp(\".hc\", \".\", \"/mnt\")\n args = [\"/bin/mount\", \"-t\", \"ext3\", device, mountpoint]\n mount = self.callProcess(args)\n ret = self.waitProcess(mount)\n if ret == 0:\n marker = os.path.join(mountpoint, \".extended_volume\")\n os.mknod(marker)\n hidden = os.path.join(mountpoint, \".hidden\")\n fd = open(hidden, \"w\")\n fd.write(\"lost+found\\nVirtualBox VMs\\n\")\n fd.close()\n os.chown(mountpoint, self.uid, self.gid)\n os.chown(marker, self.uid, self.gid)\n os.chown(hidden, self.uid, self.gid)\n args = [\"/bin/umount\", \"-l\", device]\n umount = self.callProcess(args)\n ret = self.waitProcess(umount)\n if ret != 0:\n self.failed(_(\"Could not unmount device. The error returned was:\\n%i\") % ret)\n else:\n self.failed(\"mount failed with %i\" % ret)\n try:\n os.rmdir(mountpoint)\n except:\n self.failed(_(\"Could not remove temp dir. The error returned was:\\n%s\") % sys.exc_info())",
"def _ScanVolumeSystemRootNode(\n self, scan_context, scan_node, auto_recurse=True):\n if scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:\n # For VSS add a scan node for the current volume.\n path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)\n if path_spec:\n scan_context.AddScanNode(path_spec, scan_node.parent_node)\n\n # Determine the path specifications of the sub file entries.\n try:\n file_entry = resolver.Resolver.OpenFileEntry(\n scan_node.path_spec, resolver_context=self._resolver_context)\n except errors.BackEndError:\n # Note that because pytsk returns slots LVM can be prematurely detected\n # and we have to catch the resulting BackEndError exception. Also see:\n # https://github.com/log2timeline/dfvfs/issues/578\n return\n\n for sub_file_entry in file_entry.sub_file_entries:\n sub_scan_node = scan_context.AddScanNode(\n sub_file_entry.path_spec, scan_node)\n\n if scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:\n # Since scanning for file systems in VSS snapshot volumes can\n # be expensive we only do this when explicitly asked for.\n continue\n\n if auto_recurse or not scan_context.updated:\n self._ScanNode(scan_context, sub_scan_node, auto_recurse=auto_recurse)",
"def mount(volname):\n mnt = tempfile.mkdtemp(prefix=\"georep_\")\n execute([\"mount\", \"-t\", \"glusterfs\",\n \"localhost:/%s\" % volname, mnt])\n yield mnt\n execute([\"umount\", \"-l\", mnt])",
"def mount():\n\n u_boot_console.log.action('Mounting exported UMS device')\n cmd = ('/bin/mount', host_ums_part_node)\n u_boot_utils.run_and_log(u_boot_console, cmd)",
"def pyre_mountPrivateFilespace(self):\n # get the file server\n vfs = self.vfs\n # get the namespace\n namespace = self.pyre_namespace\n # if i don't have a namespace\n if not namespace:\n # make an empty virtual filesystem and return it\n return vfs.virtual()\n\n # attempt to\n try:\n # get my private filespace\n pfs = vfs[namespace]\n # if not there\n except vfs.NotFoundError:\n # make it\n pfs = vfs.folder()\n # and mount it\n vfs[namespace] = pfs\n\n # check whether\n try:\n # the user directory is already mounted\n pfs[self.USER]\n # if not\n except pfs.NotFoundError:\n # check whether\n try:\n # i have a folder in the user area\n userdir = vfs[vfs.USER_DIR, namespace]\n # if not\n except vfs.NotFoundError:\n # make and mount an empty folder\n pfs[self.USER] = pfs.folder()\n # if it is there\n else:\n # look deeply\n userdir.discover()\n # and mount it\n pfs[self.USER] = userdir\n\n # get my prefix\n prefix = self.pyre_prefix\n # if i don't have one\n if not prefix:\n # attach an empty folder; must use {pfs} to do this to guarantee filesystem consistency\n pfs[self.SYSTEM] = pfs.folder()\n # and return\n return pfs\n # otherwise, get the associated filesystem\n home = vfs.retrieveFilesystem(root=prefix)\n # and mount my folders in my namespace\n self.pyre_mountApplicationFolders(pfs=pfs, prefix=home)\n\n # now, build the protocol resolution folders by assembling the contents of the\n # configuration folders in priority order\n for root in [self.SYSTEM, self.USER]:\n # build the work list: triplets of {name}, {source}, {destination}\n todo = [(root, pfs[root], pfs)]\n # now, for each triplet in the work list\n for path, source, destination in todo:\n # go through all the children of {source}\n for name, node in source.contents.items():\n # if the node is a folder\n if node.isFolder:\n # gingerly attempt to\n try:\n # grab the associated folder in {destination}\n link = destination[name]\n # if not there\n except destination.NotFoundError:\n # no worries, make it\n link = destination.folder()\n # and attach it\n destination[name] = link\n # add it to the work list\n todo.append((name, node, link))\n # otherwise\n else:\n # link the file into the destination folder\n destination[name] = node\n\n # all done\n return pfs",
"def virtdisk_AttachVirtualDisk(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"VirtualDiskHandle\", \"SecurityDescriptor\", \"Flags\", \"ProviderSpecificFlags\", \"Parameters\", \"Overlapped\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def auto_mount(pvc_name='', volume_mount_path=''):\n if pvc_name and volume_mount_path:\n return mount_pvc(volume_name=pvc_name, volume_mount_path=volume_mount_path)\n if 'V3IO_ACCESS_KEY' in os.environ:\n return mount_v3io()\n if 'MLRUN_PVC_MOUNT' in os.environ:\n mount = os.environ.get('MLRUN_PVC_MOUNT')\n items = mount.split(':')\n if len(items) != 2:\n raise ValueError('MLRUN_PVC_MOUNT should include <pvc-name>:<mount-path>')\n return mount_pvc(volume_name=items[0], volume_mount_path=items[1])\n raise ValueError('failed to auto mount, need to set env vars')",
"def mountShares(self, node, sourcedir, sourceip, mountpoint, interval):\r\n log.info(\"Mounting NFS shares on %s\", node.alias)\r\n cmd = \"mount -t nfs \" + sourceip + \":\" + sourcedir + \" \" + mountpoint\r\n log.info(cmd)\r\n\n if not node.ssh.isdir(mountpoint): node.ssh.makedirs(mountpoint)\r\n\n # TRY REPEATEDLY TO MOUNT\r\n file_list = []\r\n while not file_list:\r\n log.debug(\"automount.NfsShares.mountShares cmd: %s\" % cmd)\r\n node.ssh.execute(cmd)\r\n file_list = node.ssh.ls(mountpoint)\r\n if file_list: break\r\n log.debug(\"Sleeping %s seconds\" % interval)\r\n time.sleep(float(interval))",
"def mount_bind(newroot, target, source=None, recursive=True, read_only=True):\n # Ensure root directory exists\n if not os.path.exists(newroot):\n raise Exception('Path %r does not exist' % newroot)\n\n if source is None:\n source = target\n\n target = utils.norm_safe(target)\n source = utils.norm_safe(source)\n\n # Make sure target directory exists.\n if not os.path.exists(source):\n raise Exception('Source path %r does not exist' % source)\n\n mnt_flags = [MS_BIND]\n\n # Use --rbind for directories and --bind for files.\n if recursive and os.path.isdir(source):\n mnt_flags.append(MS_REC)\n\n # Strip leading /, ensure that mount is relative path.\n while target.startswith('/'):\n target = target[1:]\n\n # Create mount directory, make sure it does not exists.\n target_fp = os.path.join(newroot, target)\n if os.path.isdir(source):\n utils.mkdir_safe(target_fp)\n else:\n utils.mkfile_safe(target_fp)\n\n res = mount(source=source, target=target_fp, fs_type=None, mnt_flags=mnt_flags)\n\n if res == 0 and read_only:\n res = mount(\n source=None, target=target_fp,\n fs_type=None, mnt_flags=MS_BIND | MS_RDONLY | MS_REMOUNT\n )\n\n return res",
"def Mount(options):\n\n config, partitions = LoadPartitionConfig(options)\n GetPartitionTableFromImage(options, config, partitions)\n mounts = {}\n\n for part_num, part in partitions.iteritems():\n path = part.get('mount', None)\n if not path or not path.startswith('/'):\n continue\n if not part.get('image_exists', False):\n continue\n\n mounts[path] = part\n\n if '/' not in mounts:\n raise InvalidLayout('No partition defined to mount on /')\n\n def DoMount(mount):\n full_path = os.path.realpath(options.mount_dir + mount['mount'])\n mount_opts = ['loop',\n 'offset=%d' % mount['image_first_byte'],\n 'sizelimit=%d' % mount['image_bytes']]\n if options.read_only:\n mount_opts.append('ro')\n elif (mount.get('fs_type', None) in ('ext2', 'ext4') and\n not IsE2fsReadWrite(options, mount)):\n mount_opts.append('ro')\n\n if mount.get('fs_subvolume', None):\n mount_opts.append('subvol=%s' % mount['fs_subvolume'])\n\n Sudo(['mkdir', '-p', full_path])\n # This tends to fail, retry if it does\n err = None\n for i in range(0,5):\n try:\n Sudo(['mount', '-t', mount.get('fs_type', 'auto'),\n '-o', ','.join(mount_opts),\n options.disk_image, full_path])\n err = None\n break\n except subprocess.CalledProcessError as e:\n print(\"Error mounting %s, attempt %d\" % (full_path, i))\n err = e\n time.sleep(5)\n\n if err is not None:\n raise err\n\n for src, dst in mount.get('binds', {}).iteritems():\n # src may be relative or absolute, os.path.join handles this.\n full_src = os.path.realpath(\n options.mount_dir + os.path.join(mount['mount'], src))\n full_dst = os.path.realpath(options.mount_dir + dst)\n Sudo(['mkdir', '-p', full_src, full_dst])\n Sudo(['mount', '--bind', full_src, full_dst])\n\n for mount in sorted(mounts, key=len):\n DoMount(mounts[mount])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generator that mounts this volume and either yields itself or recursively generates its subvolumes.
|
def init(self, only_mount=None, skip_mount=None, swallow_exceptions=True):
if swallow_exceptions:
self.exception = None
try:
if not self._should_mount(only_mount, skip_mount):
yield self
return
if not self.init_volume():
yield self
return
except ImageMounterError as e:
if swallow_exceptions:
self.exception = e
else:
raise
if not self.volumes:
yield self
else:
for v in self.volumes:
yield from v.init(only_mount, skip_mount, swallow_exceptions)
|
[
"def _iter_volumes(self):\n if self.volumes:\n for volume_name, container_path in self.volumes.iteritems():\n if \"/\" in volume_name:\n # if a / is found in the name, assume it's a full path specified on the host\n host_path = volume_name\n else:\n host_path = \"%s/volumes/%s/%s\" % (self.project.home_path, self.name, volume_name)\n yield (host_path, container_path)",
"def walk(self, file_filter=None):\n for dirpath, _, filenames in os.walk(self.mountpoint):\n for f in filenames:\n full_path = os.path.join(dirpath, f)\n if file_filter is None or file_filter(full_path):\n # remove mounted prefix; eg '/tmp/thumbtack/im_x30_s3s'\n path_within_volume = os.path.relpath(full_path, start=self.mountpoint)\n yield full_path, path_within_volume",
"def get_each_volume(wildfrag):\n for (i_system,) in wildfrag.retrieve_system_ids():\n system = wildfrag.retrieve_system(i_system)\n\n for i_device, device in enumerate(system.devices):\n for i_volume, volume in enumerate(device.volumes):\n yield volume, system, device, i_volume, i_system, i_device",
"def mount(volname):\n mnt = tempfile.mkdtemp(prefix=\"georep_\")\n execute([\"mount\", \"-t\", \"glusterfs\",\n \"localhost:/%s\" % volname, mnt])\n yield mnt\n execute([\"umount\", \"-l\", mnt])",
"def iterate_containers(self):\r\n\r\n for container_name in os.listdir(self.base_path):\r\n full_path = os.path.join(self.base_path, container_name)\r\n if not os.path.isdir(full_path):\r\n continue\r\n yield self._make_container(container_name)",
"def _mount_volumes(volumes):\n\n user_data_script_section = ''\n\n for volume in volumes:\n device = volume.device\n vol_type = volume.vol_type\n directory = volume.mount\n\n user_data_script_section += f\"\"\"\nmkfs -t {vol_type} {device}\nls {directory} || mkdir {directory}\nmount {device} {directory}\n\"\"\"\n\n return user_data_script_section",
"def iterate_volumes(self, node=None, ex_datacenter=None):\n if node is not None:\n if ex_datacenter:\n raise ValueError(\n \"Cannot list the volumes for the datacenter and the \"\n \"virtual machine at the same time\")\n virtual_machine = self.ex_get_vm(node)\n else:\n virtual_machine = None\n\n if ex_datacenter is not None:\n ex_datacenter = self._get_datacenter_by_id(ex_datacenter)\n\n # querying the creation timestamps of node(s) and volumes\n node_creation_times = self._query_node_creation_times(\n virtual_machine=virtual_machine)\n volume_creation_times = self._query_volume_creation_times(\n virtual_machine=virtual_machine)\n\n shared_files = collections.defaultdict(list)\n\n def result_to_volumes(files_info, allow_shared=False):\n \"\"\"\n :type disks_page: tp.Union[tp.List[_FileInfo], tp.List[_VMDiskInfo]]\n :rtype: tp.List[StorageVolume]\n \"\"\"\n if files_info and isinstance(files_info[0], _VMDiskInfo):\n files_info = (disk.file_info for disk in files_info)\n\n volumes = []\n for file_info in files_info:\n\n if not allow_shared and any(\n d.sharing\n for d in file_info.devices):\n shared_files[file_info.path].append(file_info)\n continue\n\n try:\n volume = self._to_volume(file_info)\n except LibcloudError as err:\n # one broken volume should not break the whole iteration\n LOG.warning(str(err))\n continue\n\n created_at = volume_creation_times.get(volume.id)\n for device in file_info.devices:\n if created_at:\n break\n if device.is_root:\n created_at = node_creation_times.get(device.owner_id)\n volume.extra['created_at'] = created_at\n\n volumes.append(volume)\n return volumes\n\n for item in self._query_vm_virtual_disks(\n virtual_machine=virtual_machine,\n datacenter=ex_datacenter,\n process_fn=result_to_volumes):\n yield item\n\n # collect and yield the shared volumes at the end of iteration\n merged_shared_files = []\n for files_info in shared_files.values():\n files_info[0].devices = list({\n device for file_info in files_info\n for device in file_info.devices})\n merged_shared_files.append(files_info[0])\n for item in result_to_volumes(merged_shared_files, allow_shared=True):\n yield item",
"def walk(self):\n dirs = []\n files = []\n for o in self.iterator():\n if o.type != 5: continue\n if o.IsDir():\n dirs += [o.Name()]\n else:\n files += [o.Name()]\n yield self.path, dirs, files\n for subdir in dirs:\n for a,b,c in self.opendir(subdir).walk():\n yield a, b, c",
"def walk(self, path):\n from gcsfs.core import norm_path\n path = norm_path(_stringify_path(path))\n directories = set()\n files = set()\n\n for key in self.fs.ls(path, detail=True):\n # each info name must be at least [path]/part , but here\n # we check also for names like [path]/part/\n path = key['name']\n if key['storageClass'] == 'DIRECTORY':\n directories.add(path)\n elif key['storageClass'] == 'BUCKET':\n pass\n else:\n files.add(path)\n\n files = sorted([posixpath.split(f)[1] for f in files\n if f not in directories])\n directories = sorted([posixpath.split(x)[1]\n for x in directories])\n\n yield path, directories, files\n\n for directory in directories:\n for tup in self.walk(directory):\n yield tup",
"def walk(self, path=None, onerror=None):\n root = os.path.normpath(self.base)\n # when dirpath == root, dirpath[prefixlen:] becomes empty\n # because len(dirpath) < prefixlen.\n prefixlen = len(pathutil.normasprefix(root))\n oids = []\n\n for dirpath, dirs, files in os.walk(\n self.reljoin(self.base, path or b''), onerror=onerror\n ):\n dirpath = dirpath[prefixlen:]\n\n # Silently skip unexpected files and directories\n if len(dirpath) == 2:\n oids.extend(\n [dirpath + f for f in files if _lfsre.match(dirpath + f)]\n )\n\n yield (b'', [], oids)",
"def detect_volume_shadow_copies(self):\n\n volume = self.volumes._make_subvolume(flag='alloc', offset=0, fstype='vss-container')\n volume.mount()\n return volume.volumes",
"def walk(self):\n yield self\n for child in self.children.values():\n for page in child.walk():\n yield page",
"def mountsources(self):\n # walk each asset source, choose a real mountpoint for it, then mount it there\n for (id, assetsource) in self.asset_sources.iteritems():\n mountpoint = self.calc_mountpoint_for_assetsource(assetsource)\n mountpoint.mount_source(assetsource, self)",
"def walk(self, structure):\n try:\n files = structure.get(\"__/files\", {})\n except AttributeError: # root element is list\n files = {}\n folders = list(structure.keys())\n if \"__/files\" in folders:\n folders.remove(\"__/files\")\n yield folders, files\n if folders is None:\n folders = []\n for folder in folders:\n yield from self.walk(structure[folder])",
"def ephemeral_files(self):\n for name in self.ephemeral_file_names:\n yield self.get_ephemeral_file(name)",
"def _subfolders(self):\n if not os.path.exists(self.path):\n return\n for dirname in os.listdir(self.path):\n try:\n self._parse_folder_name(dirname)\n except InvalidFolderNameFormat:\n continue\n yield osp.join(self.path, dirname)",
"def walk(self, topdown=True):\n\n if topdown:\n yield (self, self.subcollections, self.data_objects)\n for subcollection in self.subcollections:\n new_root = subcollection\n for x in new_root.walk(topdown):\n yield x\n if not topdown:\n yield (self, self.subcollections, self.data_objects)",
"def __iter__(self):\n for path in self.Paths:\n yield path",
"def volume_mount(self, volume_name, junction_path, export_policy_override=None, activate_junction=None):\n return self.request( \"volume-mount\", {\n 'export_policy_override': [ export_policy_override, 'export-policy-override', [ bool, 'None' ], False ],\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n 'activate_junction': [ activate_junction, 'activate-junction', [ bool, 'None' ], False ],\n 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],\n }, {\n } )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Bind mounts the volume to another mountpoint. Only works if the volume is already mounted.
|
def bindmount(self, mountpoint):
if not self.mountpoint:
raise NotMountedError(self)
try:
_util.check_call_(['mount', '--bind', self.mountpoint, mountpoint], stdout=subprocess.PIPE)
self.bindmounts.append(mountpoint)
return True
except Exception as e:
logger.exception("Error bind mounting {0}.".format(self))
raise SubsystemError(e)
|
[
"def volumeBind(influence=\"string\", name=\"string\"):\n pass",
"def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n instance_name = instance['name']\n if instance_name not in self.__mounts:\n self.__mounts[instance_name] = {}\n self.__mounts[instance_name][mountpoint] = connection_info",
"def mount(self, path, mount):\n self._mountpoints[self._join_chunks(self._normalize_path(path))] = mount",
"def attach_to_instance(self, volume, instance, mountpoint):\r\n return volume.attach_to_instance(instance, mountpoint)",
"def mount_bind(newroot, target, source=None, recursive=True, read_only=True):\n # Ensure root directory exists\n if not os.path.exists(newroot):\n raise Exception('Path %r does not exist' % newroot)\n\n if source is None:\n source = target\n\n target = utils.norm_safe(target)\n source = utils.norm_safe(source)\n\n # Make sure target directory exists.\n if not os.path.exists(source):\n raise Exception('Source path %r does not exist' % source)\n\n mnt_flags = [MS_BIND]\n\n # Use --rbind for directories and --bind for files.\n if recursive and os.path.isdir(source):\n mnt_flags.append(MS_REC)\n\n # Strip leading /, ensure that mount is relative path.\n while target.startswith('/'):\n target = target[1:]\n\n # Create mount directory, make sure it does not exists.\n target_fp = os.path.join(newroot, target)\n if os.path.isdir(source):\n utils.mkdir_safe(target_fp)\n else:\n utils.mkfile_safe(target_fp)\n\n res = mount(source=source, target=target_fp, fs_type=None, mnt_flags=mnt_flags)\n\n if res == 0 and read_only:\n res = mount(\n source=None, target=target_fp,\n fs_type=None, mnt_flags=MS_BIND | MS_RDONLY | MS_REMOUNT\n )\n\n return res",
"def attach_volume(self, instance_name, device_path, mountpoint):\n\n # Find the actual instance ref so we can see if it has a Reddwarf\n # friendly volume. i.e. a formatted filesystem with UUID attribute\n # set.\n meta = self._find_by_name(instance_name)\n instance = db.instance_get(context.get_admin_context(), meta['id'])\n if instance['volumes']:\n for vol in instance['volumes']:\n if vol['mountpoint'] == mountpoint and vol.has_key('uuid'):\n # Volume has a UUID so do all the mount magic using the\n # UUID instead of the device name.\n self._container_script_modify(instance, None, vol['uuid'],\n mountpoint, 'add')\n else:\n self._container_script_modify(instance, device_path, None,\n mountpoint, 'add')\n else:\n LOG.error('No volume in the db for this instance')\n LOG.error('Instance: %s' % (instance_name,))\n LOG.error('Device: %s' % (device_path,))\n LOG.error('Mount: %s' % (mountpoint,))\n raise exception.Error('No volume in the db for this instance')",
"def mount(device, mountpoint, *args, readonly=False, mkfs=False):\n raise NotImplementedError(\"Contribute on github.com/alej0varas/pybolator\")",
"def mount(self, volname):\n check = self.mount_check(volname)\n if check:\n return check\n cmdline = self.vol_dict[volname][\"cmdline\"]\n mount_cmd = docker[cmdline]\n mount_cmd()\n self.vol_dict[volname][\"mounted\"] = True\n return self.vol_dict[volname][\"Local\"]",
"def _bindProcSysDev(self, mountPoint):\n if mountPoint != \"/\":\n self.__debug(\"mount point ≠ / so mount /dev, /proc and /sys in \" + mountPoint)\n self._procInBootMounted = True\n slt.execCall('mount -o bind /dev {mp}/dev'.format(mp=mountPoint))\n slt.execCall('mount -o bind /proc {mp}/proc'.format(mp=mountPoint))\n slt.execCall('mount -o bind /sys {mp}/sys'.format(mp=mountPoint))",
"def attach_volume(self, context, **kwargs):\n # TODO(lyarwood): Remove this encryptor and refactor the LUKS based\n # encryptors in the U release.\n versionutils.report_deprecated_feature(\n LOG,\n \"The plain CryptsetupEncryptor is deprecated and will be removed \"\n \"in a future release. Existing users are encouraged to retype \"\n \"any existing volumes using this encryptor to the 'luks' \"\n \"LuksEncryptor or 'luks2' Luks2Encryptor encryptors as soon as \"\n \"possible.\")\n key = self._get_key(context).get_encoded()\n passphrase = self._get_passphrase(key)\n\n self._open_volume(passphrase, **kwargs)\n\n # modify the original symbolic link to refer to the decrypted device\n self._execute('ln', '--symbolic', '--force',\n '/dev/mapper/%s' % self.dev_name, self.symlink_path,\n root_helper=self._root_helper,\n run_as_root=True, check_exit_code=True)",
"def attach_to_instance(self, instance, mountpoint):\r\n instance_id = _resolve_id(instance)\r\n try:\r\n resp = self._nova_volumes.create_server_volume(instance_id,\r\n self.id, mountpoint)\r\n except Exception as e:\r\n raise exc.VolumeAttachmentFailed(\"%s\" % e)",
"def attach_volume(self, node, volume, device=None):\r\n raise NotImplementedError('attach not implemented for this driver')",
"def mount(self, volume_id, client_name, mountpath, do_vssprotection=True):\n return self._snap_operation(0, volume_id, client_name, mountpath, do_vssprotection)",
"def _bind_overlay_docker(container_dir, root_dir):\n # XXX: This path is mounted as RW\n # because ro volume in treadmill container can not be mounted in docker\n # 'Error response from daemon: chown /etc/hosts: read-only file system.'\n overlay_dir = os.path.join(container_dir, 'overlay')\n\n fs_linux.mount_bind(\n root_dir, os.path.join(os.sep, _CONTAINER_DOCKER_ETC_DIR, 'hosts'),\n source=os.path.join(overlay_dir, 'etc/hosts'),\n recursive=False, read_only=False\n )",
"def bind(config, distro):\n distro_data = validate_distro(config.distro_maps, distro)\n #print(distro_data, config.btrfs_uuid)\n mount_distro_helper(config.btrfs_uuid, distro_data)",
"def volume_mount(self, volume_name, junction_path, export_policy_override=None, activate_junction=None):\n return self.request( \"volume-mount\", {\n 'export_policy_override': [ export_policy_override, 'export-policy-override', [ bool, 'None' ], False ],\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n 'activate_junction': [ activate_junction, 'activate-junction', [ bool, 'None' ], False ],\n 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],\n }, {\n } )",
"def if_mounted(self, mountpoint=None):\n self.writeCommand('if_mounted', mountpoint)\n return self",
"def _mnt_loop_dev(self):\n cmd = ['mount', '-t', 'ext2', self._loop_dev, '-o', 'ro', self._mnt_path()]\n output = self._run_cmd(cmd, sudo=True)\n if output['success']:\n self._mounted = True\n return True\n else:\n return False",
"def _AddSecretVolumeMount(mounts, secret_name):\n _AddVolumeMount(\n mounts,\n mount_name=secret_name,\n mount_path='/etc/' + secret_name.replace('-', '_'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Recursively gets a list of all subvolumes and the current volume.
|
def get_volumes(self):
if self.volumes:
volumes = []
for v in self.volumes:
volumes.extend(v.get_volumes())
volumes.append(self)
return volumes
else:
return [self]
|
[
"def all_volumes(self):\n _logger.debug('%s', where_am_i())\n volumes = []\n for compartment in self.all_compartments():\n comp_volumes = compartment.all_volumes()\n if comp_volumes is not None:\n volumes += comp_volumes\n return volumes",
"def get_all_volume_ids(client):\n if supports_volumes_api(client):\n # volumes upgraded from 1.5.0 to 1.9 will also be returned here and\n # they include bind volumes (at least ~1.5.0)\n # (this is not the case for fresh >=1.8 docker containers)\n return {v['Name'] for v in (client.volumes()['Volumes'] or [])}\n else:\n return {os.path.basename(path)\n for path in get_immediate_subdirectories(DOCKER_VOLUMES_DIR)}",
"def get_volumes(self):\n\tapi = NaElement(\"volume-get-iter\")\n\txi = NaElement(\"desired-attributes\")\n\tapi.child_add(xi)\n\t## This specifies max number of volume records to pull from sdk api\n\t## Default is 20. 20000 is enough for most clusters\n\tapi.child_add_string(\"max-records\",self.MAX_VOLUMES)\n\txi1 = NaElement(\"volume-attributes\")\n\txi.child_add(xi1)\n\txi41 = NaElement(\"volume-id-attributes\")\n\txi41.child_add_string(\"instance-uuid\",\"<instance-uuid>\")\n\txi41.child_add_string(\"name\",\"<name>\")\n\txi41.child_add_string(\"owning-vserver-name\",\"<owning-vserver-name>\")\n\txi41.child_add_string(\"uuid\",\"<uuid>\")\n\txi1.child_add(xi41)\n\txo = self.s.invoke_elem(api)\n\tself.sd.incr(\"api.invoke\")\n\tf = xmltodict.parse(xo.sprintf())\n\tvolumes = f['results']['attributes-list']['volume-attributes']\n\tvol_list = []\n\tfor volume in volumes:\n\t vol_list.append({'cluster-name':self.CLUSTER_NAME,\n\t\t\t 'owning-vserver-name':volume['volume-id-attributes']['owning-vserver-name'],\n\t\t\t 'name':volume['volume-id-attributes']['name'],\n\t\t\t 'instance-uuid':volume['volume-id-attributes']['instance-uuid']\n\t\t\t })\n\treturn vol_list",
"def list_volumes( fields ):\n global conf\n\n volume_names = VOLUME_NAMES( conf )\n ret = []\n \n for name in volume_names:\n vol_conf = read_volume( name, fields )\n vol_conf['NAME'] = name\n ret.append( vol_conf )\n\n return ret",
"def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)",
"def volume_list(mnode):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes\", httplib.OK, None)",
"def volumes(self) -> List:\n if self.node is None:\n return []\n # Removing boot volume from the list\n volume_attachments = []\n for i in self.node[\"volume_attachments\"]:\n volume_detail = self.service.get_volume(i[\"volume\"][\"id\"])\n for vol in volume_detail.get_result()[\"volume_attachments\"]:\n if vol[\"type\"] == \"data\":\n volume_attachments.append(vol)\n return volume_attachments",
"def get_volumes(instance):\n volumes = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n volume_tag_reference = \"%s_data\" % tag['Value']\n\n for volume in instance['volumes']:\n if volume['volume_tags'] != None:\n for volume_tag in volume['volume_tags']:\n if volume_tag['Key'] == 'Name' and volume_tag['Value'] == volume_tag_reference:\n volumes.append(volume['VolumeId'])\n snapshot_volumes = []\n for volume in instance['volumes']:\n if volume['VolumeId'] in volumes:\n vol = {\"VolumeId\": volume['VolumeId'], \"volume_tags\": volume['volume_tags']}\n snapshot_volumes.append(vol)\n return snapshot_volumes",
"def _populate_subvolume_info(self):\n try:\n info = btrfs(\n \"subvolume\", \"list\", under_docker(\"btrfs\", \"subvolumes\")\n )\n except subprocess.CalledProcessError:\n fail(\"Unable to retrieve btrfs subvolume info.\")\n for line in info.splitlines():\n _, subvol_id, _, _, _, _, _, _, path = line.split()\n container = self._containers.get(os.path.split(path)[-1])\n if container is not None:\n container.subvol_id = subvol_id\n self._containers_by_subvol[subvol_id] = container",
"def get_each_volume(wildfrag):\n for (i_system,) in wildfrag.retrieve_system_ids():\n system = wildfrag.retrieve_system(i_system)\n\n for i_device, device in enumerate(system.devices):\n for i_volume, volume in enumerate(device.volumes):\n yield volume, system, device, i_volume, i_system, i_device",
"def volumes(self) -> list[EyeVolume]:\n volumes = []\n for s in self.series:\n try:\n volumes.append(s.get_volume())\n except Exception as e:\n logger.debug(''.join(traceback.format_exception(e)))\n return volumes",
"def get_watched_volumes(connection):\n return connection.get_all_volumes(\n filters={'tag-key': 'AutomatedEBSSnapshots'})",
"def _iter_volumes(self):\n if self.volumes:\n for volume_name, container_path in self.volumes.iteritems():\n if \"/\" in volume_name:\n # if a / is found in the name, assume it's a full path specified on the host\n host_path = volume_name\n else:\n host_path = \"%s/volumes/%s/%s\" % (self.project.home_path, self.name, volume_name)\n yield (host_path, container_path)",
"def list(connection):\n volumes = get_watched_volumes(connection)\n\n if not volumes:\n logger.info('No watched volumes found')\n return\n\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')\n logger.info(\n '| {volume:<21} '\n '| {volume_name:<20.20} '\n '| {interval:<12} '\n '| {retention:<10} |'.format(\n volume='Volume ID',\n volume_name='Volume name',\n interval='Interval',\n retention='Retention'))\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')\n\n for volume in volumes:\n if 'AutomatedEBSSnapshots' not in volume.tags:\n interval = 'Interval tag not found'\n elif volume.tags['AutomatedEBSSnapshots'] not in VALID_INTERVALS:\n interval = 'Invalid interval'\n else:\n interval = volume.tags['AutomatedEBSSnapshots']\n\n if 'AutomatedEBSSnapshotsRetention' not in volume.tags:\n retention = 0\n else:\n retention = volume.tags['AutomatedEBSSnapshotsRetention']\n\n # Get the volume name\n try:\n volume_name = volume.tags['Name']\n except KeyError:\n volume_name = ''\n\n logger.info(\n '| {volume_id:<14} '\n '| {volume_name:<20.20} '\n '| {interval:<12} '\n '| {retention:<10} |'.format(\n volume_id=volume.id,\n volume_name=volume_name,\n interval=interval,\n retention=retention))\n\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')",
"def test_01_list_volumes(self):\n list_volume_response = Volume.list(\n self.apiclient,\n ids=[self.vm1_root_volume.id, self.vm2_root_volume.id, self.vm3_root_volume.id],\n type='ROOT',\n listAll=True\n )\n self.assertEqual(\n isinstance(list_volume_response, list),\n True,\n \"List Volume response was not a valid list\"\n )\n self.assertEqual(\n len(list_volume_response),\n 3,\n \"ListVolumes response expected 3 Volumes, received %s\" % len(list_volume_response)\n )",
"def find_volumes():\n global email_message\n email_message += 'Finding volumes that match the requested filter: %(filter)s\\n\\n' % {\n 'filter': config.volumes['filter']\n }\n return conn.get_all_volumes(filters=config.volumes['filter'])",
"def volumes(self):\n volumes = self.properties[self.VOLUMES]\n\n return ((vol[self.VOLUME_ID],\n vol[self.VOLUME_DEVICE]) for vol in volumes)",
"def volumes(self):\r\n volumes = self.properties[self.VOLUMES]\r\n\r\n return ((vol[self.VOLUME_ID],\r\n vol[self.VOLUME_DEVICE]) for vol in volumes)",
"def list_volumes(self, datacenter_id, depth=1):\n response = self._perform_request(\n '/datacenters/%s/volumes?depth=%s' % (datacenter_id, str(depth)))\n\n return response",
"def list_volume_snapshots(self, volume):\r\n raise NotImplementedError(\r\n 'list_volume_snapshots not implemented for this driver')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Makes window constant bordered.
|
def set_constant_bordered(self):
self._borders_state.change_to_constant()
|
[
"def make_border(self):\n self.is_border = True",
"def showBorders(self,window):\n mx,my,Mx,My=self.getCorners(window)\n p=[(mx,my),(mx,My),(Mx,My),(Mx,my)]\n for i in range(len(p)):\n j=(i+1)%len(p)\n start=p[i]\n end=p[j]\n start=self.getToScreen(start,window)\n end= self.getToScreen(end,window)\n window.draw.line(window.screen,self.theme[\"borders_color\"],start,end,1)",
"def set_border(client, size):\n i3.border('pixel'+ str(size))",
"def dontshowwindow(self):\n if not self._outline_color is None:\n self._outline_color = None\n self._invalrectandborder()",
"def Style(window):\n ##Set the background color.\n window.SetBackgroundColour(BACKGROUND_COLOR)\n return",
"def draw_borders(self) -> None:\n self.draw_rectangle(-1, -1, self.h - 2, self.w - 2)\n text = self.title[:self.w - 4]\n\n if self.is_active:\n if len(text) > 0:\n self.draw(-1, 0, \" \")\n self.draw(-1, 1, text, TextStyles.BOLD | TextStyles.CYAN)\n self.draw(-1, len(text) + 1, \" \")\n else:\n if len(text) > 0:\n self.draw(-1, 0, \" \" + text + \" \")",
"def addBorders( img, win, color ): \n #set color\n color = color\n\n #add top border\n for x in range(WIDTH):\n win.update()\n for y in range(6):\n img.setPixel(x, y, color)\n\n #add bottom border\n for x in range(WIDTH):\n win.update()\n for y in range(HEIGHT-5, HEIGHT):\n img.setPixel(x, y, color)\n\n #add left border\n for y in range(HEIGHT):\n win.update()\n for x in range(5):\n img.setPixel(x, y, color)\n\n #add right border \n for y in range(HEIGHT):\n win.update()\n for x in range(WIDTH-5, WIDTH):\n img.setPixel(x, y, color)",
"def paint_borders(self, color: ColorsType, width: int) -> None:",
"def border(self, border):\n self._border = border",
"def on(self):\n dacq_fixwin(self.fwnum, self.x, self.y, self.size, self.vbias)",
"def transparent_window(window):\n change_transparency(window)",
"def __init__(self, window):\n self.window = window\n self.window.title(\"Sorting Algorithm Visualizer\")\n self.window.geometry(\"800x450\")\n self.window.minsize(800, 450)\n self.window.maxsize(800, 450)\n self.window.config(bg = \"#152e57\")",
"def create_game_borders(screen, border_corner, border_dim, game_corner, game_dim):\r\n border = pygame.Rect(border_corner, border_dim)\r\n pygame.draw.rect(screen, WHITE, border)\r\n board = pygame.Rect(game_corner, game_dim)\r\n pygame.draw.rect(screen, BLACK, board)",
"def setBorder(self, *args) -> \"void\":\n return _coin.SoVectorizeAction_setBorder(self, *args)",
"def add_border(self):\n new_surf = Surface((THUMB_SIZE + (BORDER_WIDTH * 2),\n THUMB_SIZE + (BORDER_WIDTH * 2)))\n new_surf.blit(self.image, (BORDER_WIDTH, BORDER_WIDTH))\n border_rect = Rect(get_line_center(BORDER_WIDTH),\n get_line_center(BORDER_WIDTH),\n THUMB_SIZE + BORDER_WIDTH + 1,\n THUMB_SIZE + BORDER_WIDTH + 1)\n pygame.draw.rect(new_surf, THUMB_BORDER_COLOR, border_rect,\n BORDER_WIDTH)\n self.image = new_surf",
"def color_border(window, start_y, start_x, stop_y, stop_x, color):\n try:\n for i in range(start_y, stop_y):\n window.addstr(i, start_x, ' ', curses.color_pair(color))\n window.addstr(i, stop_x, ' ', curses.color_pair(color))\n for i in range(start_x, stop_x):\n window.addstr(start_y, i, ' ', curses.color_pair(color))\n window.addstr(stop_y, i, ' ', curses.color_pair(color))\n # for loops fail to add last element.\n window.addstr(stop_y, stop_x, ' ', curses.color_pair(color))\n except curses.error:\n # curses.error is raised at end of line and can safely be ignored.\n pass",
"def _update_border_color(widget, condition):\n # TODO Use better way of changing border color\n if condition:\n widget.setStyleSheet(\"border:1px solid rgb(0, 255, 0);\")\n else:\n widget.setStyleSheet(\"border:1px solid rgb(255, 0, 0);\")",
"def draw_background(self):\r\n\t\tself.app.background(0,0,0)",
"def add_borders(self):\n new_surf = Surface((PREVIEW_WIDTH + (BORDER_WIDTH * 4),\n PREVIEW_HEIGHT + (BORDER_WIDTH * 4)))\n new_surf.blit(self.image, (BORDER_WIDTH * 2, BORDER_WIDTH * 2))\n\n inner_rect = Rect(BORDER_WIDTH + get_line_center(BORDER_WIDTH),\n BORDER_WIDTH + get_line_center(BORDER_WIDTH),\n PREVIEW_WIDTH + BORDER_WIDTH + 1,\n PREVIEW_HEIGHT + BORDER_WIDTH + 1)\n outer_rect = Rect(get_line_center(BORDER_WIDTH),\n get_line_center(BORDER_WIDTH),\n PREVIEW_WIDTH + (BORDER_WIDTH * 3) + 1,\n PREVIEW_HEIGHT + (BORDER_WIDTH * 3) + 1)\n pygame.draw.rect(new_surf, PREVIEW_INNER_BORDER_COLOR, inner_rect,\n BORDER_WIDTH)\n pygame.draw.rect(new_surf, PREVIEW_OUTER_BORDER_COLOR, outer_rect,\n BORDER_WIDTH)\n\n self.image = new_surf"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Changes image to `new_image`. Does not copy `new_image`. Use this instead of `=` or `blit`.
|
def reset_image(self, new_image: pygame.Surface):
# We use `=` instead of `blit` because `=` does not save alpha.
self.image = new_image
self._borders_state.fix_borders()
|
[
"def new_image(image):\n os.replace(image,PICTURES_IN + image)\n return",
"def updateImage(self):\n self.image = self.getImage(self.location, self.name, self.imageType)",
"def copy_image(self): \r\n\r\n for i in range(0, self.width):\r\n for j in range(0, self.height): \r\n self.image_old[i, j] = self.image[i, j]\r\n \r\n return self.image_old",
"def update_image(self):\n self.image = self.capture_image()\n self.update_background()",
"def change_img(obj: pygame.sprite.Sprite, img):\r\n obj.image = img\r\n obj.image.set_colorkey(service.colors[\"BLACK\"])",
"def updatePixels():\n new = createImage(width,height,'RGBA')\n color = _getColor((200))\n glClearColor (*color)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n if npy:\n new.pixels = numpy.array(screen.pixels)\n new.updatePixels()\n else: \n for i in range(width*height): new.pixels[i] = screen.pixels[i]\n image(new,0,0)",
"def img_update(self, image=None):\n self.camera_started = True\n try:\n if not self.request_new_image:\n self.sig_msg.emit(self.__class__.__name__ + \": no new image needed, frame dropped.\")\n else:\n self.img = image.copy()\n # convert to grayscale\n self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n self.request_new_image = False\n except Exception as err:\n self.sig_msg.emit(self.__class__.__name__, \": exception in img_update \" + str(err))",
"def colorize(image, newColor):\n image = image.copy()\n\n # zero out RGB values\n image.fill((0, 0, 0, 255), None, pg.BLEND_RGBA_MULT)\n # add in new RGB values\n image.fill(newColor[0:3] + (0,), None, pg.BLEND_RGBA_ADD)\n\n return image",
"def resize_image(img, new_width, new_height):\n img_new = img.resize((new_width, new_height))\n return img_new",
"def change_size(self, new_width, new_height, new_state = None):\n copy_width = min(self.width, new_width)\n copy_height = min(self.height, new_height)\n \n old_height = self.height\n old_width = self.width\n old_image = copy.deepcopy(self.ansi_image)\n \n self.clear_image(new_width, new_height)\n if new_state == None:\n for y in range(copy_height):\n for x in range(copy_width):\n self.ansi_image[y][x] = old_image[y][x]\n self.have_cache = False\n else:\n self.ansi_image = copy.deepcopy(new_state)\n \n self.have_cache = False\n self.is_dirty = True\n return (old_width, old_height, old_image)",
"def copy(self):\n newimg = self.getEmpty() \n cv.Copy(self.getBitmap(), newimg)\n return Image(newimg, colorSpace=self._colorSpace)",
"def rescaled_image():",
"def edit_image(self):\n self.update()",
"def update_image(window: tk.Tk, img: Image):\r\n\r\n window.display_image(img)",
"def setImage(self, image: 'SbImage') -> \"void\":\n return _coin.SoVRMLImageTexture_setImage(self, image)",
"def change_imagesize(self,new_width, new_height):\n #TODO catch out of bounds\n new_ratio = float(new_height)/float(new_width)\n surface = (self.xabsoluteend-self.xabsolutestart)*(self.yabsoluteend-self.yabsolutestart)\n new_xabsolutewidth = math.sqrt(surface/new_ratio)\n new_yabsoluteheight = math.sqrt(surface*new_ratio)\n width_difference = new_xabsolutewidth-(self.xabsoluteend-self.xabsolutestart)\n height_difference = new_yabsoluteheight-(self.yabsoluteend-self.yabsolutestart)\n self.xabsolutestart -= 0.5*width_difference\n self.xabsoluteend += 0.5*width_difference\n self.yabsolutestart -= 0.5*height_difference\n self.yabsoluteend += 0.5*height_difference\n self.height = new_height\n self.width = new_width",
"def img_cb(self, image):\n \n self.last_img = image\n self.is_new_img = True",
"def _new_image(self, msg):\n filepath = msg.data\n self.set_image(filepath)\n self.set_state(BallotScreen.STATE_IDLE)\n self.Refresh()",
"def replace(self, img, dst_clr):\n for i in range(80, 340): #x1 x2\n for j in range(500, 800): #y1 y2\n img[j][i] = dst_clr\n return img"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Switchs window to `hidden` state.
|
def hide(self):
self._state = window_states.HiddenWindowState(self)
self.action_after_hide()
|
[
"def __toggleWindow(self, w):\n if w.isHidden():\n w.show()\n else:\n w.hide()",
"def became_invisible(window):\n log.debug(\"Window is invisible\")\n\n # FIXME - notify the manager that operation has completed.\n # For now, terminate.\n self.handle_terminate()",
"def hide(self):\n self.withdraw()",
"def become_invisible(self, window, callback):\n window.close()\n callback(window)",
"def setHidden( self, state ):\r\n\t\tself._nativePointer.ishidden = state\r\n\t\treturn True",
"def hide(self):\n self.is_visible = False",
"def hide(self):\n self.root.withdraw()",
"def hidden(self, hidden):\n \n self._hidden = hidden",
"def become_invisible(self, window, callback):\n if not self._visible:\n log.debug(\"Window already invisible. Bug in caller?\")\n callback(window)\n\n self._visible = False\n self._become_invisible(window, callback)",
"def is_hidden(self):\n return isinstance(self._state, window_states.HiddenWindowState)",
"def closeSession(self):\n self.hide()",
"def directory_view_window_visibility_callback(self, visible):\n self.hidden = not visible",
"def toggleWindowVisibility(string):\n pass",
"def _become_invisible(self, window, callback):\n self.perform_animation(\n window, AppKit.NSViewAnimationFadeOutEffect, callback\n )",
"def hide(self):\n super().hide()\n\n self._window_root_container.hide()",
"def hide(self):\n self.topFrame.forget()",
"def reset_hidden_states(self, device):\n\n pass",
"def notebook_visible_toggle_action(self):\n\n self.notebook.Show(not self.notebook.IsShown())\n self.viewmenu.Check(406, self.notebook.IsShown())\n self.SendSizeEvent()",
"def hideVideoWindow(self, force=False):\n if not self.fsActive() or force:\n # Hide the video window\n self.videoWindow.set_size_request(1, 1)\n # Make the hight of the window as small as possible\n w = self.window().get_size()[0]\n self.window.resize(w, 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns True if window is in hidden state.
|
def is_hidden(self):
return isinstance(self._state, window_states.HiddenWindowState)
|
[
"def is_hidden(self):\n if self.cellStatus == 'H':\n return True\n else:\n return False\n pass",
"def is_visible(self):\n return not self._panel.hidden()",
"def is_visible() -> bool:\n return win.winfo_ismapped()",
"def has_hidden(self, ):\n return self.AttributeNames.HIDDEN in self.attrs",
"def is_visible(self) -> bool:\n return False",
"def is_visible(self):\n return self.visible",
"def became_invisible(window):\n log.debug(\"Window is invisible\")\n\n # FIXME - notify the manager that operation has completed.\n # For now, terminate.\n self.handle_terminate()",
"def is_hidden(self, row, column):\n\n return self.board[row][column].cellStatus\n pass",
"def hide(self):\n self._state = window_states.HiddenWindowState(self)\n self.action_after_hide()",
"def is_formula_hidden(self):\n return self.container['is_formula_hidden']",
"def _is_win_visible(self, i3_win) -> bool:\n try:\n xprop = check_output(['xprop', '-id', str(i3_win.window)]).decode()\n return '_NET_WM_STATE_HIDDEN' not in xprop\n except FileNotFoundError:\n # if xprop not found, fall back to just checking if tmux win is on our current worksapce:\n self.logger.debug('xprop utility is not found - please install it.')\n self.logger.debug('will decide visibility simply by checking if tmux is on our current workspace')\n return self._is_tmux_win_on_current_ws(i3_win)",
"def isHidden(self, target, context):\n if self.Hidewhen:\n try:\n result = self.runFormulaScript(\n SCRIPT_ID_DELIMITER.join(['action', context.id, self.id, 'hidewhen']),\n target,\n self.Hidewhen,\n True,\n context.id)\n except PlominoScriptException, e:\n e.reportError(\n '\"%s\" self hide-when failed' % self.Title())\n # if error, we hide anyway\n result = True\n return result\n else:\n return False",
"def visible(self) -> bool:\n own_value = self.styles.get_rule(\"visibility\")\n if own_value is not None:\n return own_value != \"hidden\"\n return self.parent.visible if self.parent else True",
"def is_hidden(self):\n return bool(type_get_record_field_hidden_status(self.recordtype, self.index))",
"def visible(self) -> bool:\n return self.raw_data[\"visible\"]",
"def get_visibility(self):\n return bool(self.actor.GetVisibility())",
"def directory_view_window_visibility_callback(self, visible):\n self.hidden = not visible",
"def is_visible(self):\n return self.node.is_visible()",
"def visibilityState(self):\n return 'visible'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Action window does when it switchs to `hidden` state.
|
def action_after_hide(self):
|
[
"def hide(self):\n self._state = window_states.HiddenWindowState(self)\n self.action_after_hide()",
"def became_invisible(window):\n log.debug(\"Window is invisible\")\n\n # FIXME - notify the manager that operation has completed.\n # For now, terminate.\n self.handle_terminate()",
"def __toggleWindow(self, w):\n if w.isHidden():\n w.show()\n else:\n w.hide()",
"def notebook_visible_toggle_action(self):\n\n self.notebook.Show(not self.notebook.IsShown())\n self.viewmenu.Check(406, self.notebook.IsShown())\n self.SendSizeEvent()",
"def hide(self):\n self.withdraw()",
"def hide(self):\n self.is_visible = False",
"def hidden(self, hidden):\n \n self._hidden = hidden",
"def become_invisible(self, window, callback):\n window.close()\n callback(window)",
"def directory_view_window_visibility_callback(self, visible):\n self.hidden = not visible",
"def is_hidden(self):\n return isinstance(self._state, window_states.HiddenWindowState)",
"def setHidden( self, state ):\r\n\t\tself._nativePointer.ishidden = state\r\n\t\treturn True",
"def notebook_visible_toggle_event(self, event):\n\n if event.GetId() == 406:\n self.notebook_visible_toggle_action()\n else:\n event.Skip()",
"def _become_invisible(self, window, callback):\n self.perform_animation(\n window, AppKit.NSViewAnimationFadeOutEffect, callback\n )",
"def become_invisible(self, window, callback):\n if not self._visible:\n log.debug(\"Window already invisible. Bug in caller?\")\n callback(window)\n\n self._visible = False\n self._become_invisible(window, callback)",
"def cancel_hide(self):\n if self.pending_hide is not None:\n self.pending_hide.cancel()",
"def isHidden(self, target, context):\n if self.Hidewhen:\n try:\n result = self.runFormulaScript(\n SCRIPT_ID_DELIMITER.join(['action', context.id, self.id, 'hidewhen']),\n target,\n self.Hidewhen,\n True,\n context.id)\n except PlominoScriptException, e:\n e.reportError(\n '\"%s\" self hide-when failed' % self.Title())\n # if error, we hide anyway\n result = True\n return result\n else:\n return False",
"def hide(self):\n self.root.withdraw()",
"def handle_become_invisible(self):\n log.debug(\"Handling a become_invisible request\")\n\n def became_invisible(window):\n \"\"\"Callback when window becomes invisible.\"\"\"\n log.debug(\"Window is invisible\")\n\n # FIXME - notify the manager that operation has completed.\n # For now, terminate.\n self.handle_terminate()\n\n self.get_window().become_invisible(became_invisible)",
"def toggle_visibility(self):\n\n if self.actor.GetVisibility():\n self.actor.VisibilityOff()\n\n else:\n self.actor.VisibilityOn()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Switchs window to `passive` state.
|
def passive(self):
self._state = window_states.PassiveWindowState(self)
self.action_after_passive()
|
[
"def is_passive(self):\n return isinstance(self._state, window_states.PassiveWindowState)",
"def _update_is_passive(self):\n passive_setting = self._view.settings().get('wrap_as_you_type_passive')\n if passive_setting in (None, False, True):\n self.is_passive = bool(passive_setting)\n else:\n self.is_passive = False\n raise UserFacingError('The value must be a boolean')",
"def _set_passive(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=YANGBool,\n is_leaf=True,\n yang_name=\"passive\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"http://openconfig.net/yang/network-instance\",\n defining_module=\"openconfig-network-instance\",\n yang_type=\"boolean\",\n is_config=True,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"passive must be of a type compatible with boolean\"\"\",\n \"defined-type\": \"boolean\",\n \"generated-type\": \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"passive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)\"\"\",\n }\n )\n\n self.__passive = t\n if hasattr(self, \"_set\"):\n self._set()",
"def action_after_passive(self):",
"def _set_passive(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"passive\", rest_name=\"passive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Passive information'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='empty', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"passive must be of a type compatible with empty\"\"\",\n 'defined-type': \"empty\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"passive\", rest_name=\"passive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Passive information'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='empty', is_config=True)\"\"\",\n })\n\n self.__passive = t\n if hasattr(self, '_set'):\n self._set()",
"def ChangeGestureMode(self,parameter):\r\n if (self.gestureButton.GetActive() == True):\r\n self.ada.Notify('gestureModeStarted',True)\r\n self.gestureButton.SetLabel('Deactivate')\r\n else:\r\n self.ada.Notify('gestureModeStarted',False)\r\n self.gestureButton.SetLabel('Activate')",
"def toSafeMode(self):\r\n self.start()\r\n time.sleep(0.03)\r\n # now we're in PASSIVE_MODE, so we repeat the above code...\r\n self.send( SAFE )\r\n # they recommend 20 ms between mode-changing commands\r\n time.sleep(0.03)\r\n # change the mode we think we're in...\r\n self.sciMode = SAFE_MODE\r\n # no response here, so we don't get any...\r\n return",
"def is_passive(self, node):\n return bool(self.network_graph.nodes[node]['node_passive'])",
"def activateFullscreen(self):\n if not self.videoWindowShown():\n return\n\n self.window.fullscreen()",
"def proxy_passive_to_active():\n logger.info('Change all proxys to active')\n proxys = zapi.proxy.get(output=[ 'shorten', 'host' ],\n filter={ 'status': 6 })\n if ( proxys.__len__() == 0 ):\n logger.info('Done')\n return\n bar = ProgressBar(maxval=proxys.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()\n i = 0\n for x in proxys:\n i += 1\n proxyid = x['proxyid']\n result = zapi.proxy.update(proxyid=proxyid, status=5)\n logger.echo = False\n logger.debug('Changed from passive to active proxy: %s' % (x['host']))\n bar.update(i)\n bar.finish()\n logger.echo = True\n logger.info('Done')\n return",
"def is_passive_standby_enabled():\n ui_config_value = UserConfigMgmt()[\"coc_v8_enabled\"]\n if platform == 'v6':\n # get the current user choice through the system property\n # deep standby mode is S5 power state mode, normal is S2\n SystemProperties = autoclass('android.os.SystemProperties')\n standby_mode_system_property = SystemProperties.get(\n 'persist.sys.power.offstate', 'S2')\n android_config_value = standby_mode_system_property == 'S5'\n if android_config_value != ui_config_value:\n # The value of the android property may have been updated by\n # the standby popup for example, in java side\n log.error(\"Bad passive standby conf: configstore value is %s \"\n \"whereas android value is %s\",\n ui_config_value, android_config_value)\n # TODO: update the value stored in configstore ?\n # let's take the value of the android property anyway\n return android_config_value\n else:\n return ui_config_value",
"def mode_toggle(self, mode: str) -> bool:\n if mode.lower() not in self.modes:\n logger.debug('Invalid purifier mode used - %s',\n mode)\n return False\n head, body = self.build_api_dict('setPurifierMode')\n if not head and not body:\n return False\n\n body['payload']['data'] = {\n 'mode': mode.lower()\n }\n if mode == 'manual':\n body['payload'] = {\n 'data': {\n 'id': 0,\n 'level': 1,\n 'type': 'wind'\n },\n 'method': 'setLevel',\n 'type': 'APP'\n }\n\n r, _ = Helpers.call_api(\n '/cloud/v2/deviceManaged/bypassV2',\n method='post',\n headers=head,\n json_object=body,\n )\n\n if Helpers.code_check(r):\n if mode.lower() == 'manual':\n self.speed = 1\n self.mode = 'manual'\n else:\n self.mode = mode\n self.speed = 0\n return True\n logger.debug('Error setting purifier mode')\n return False",
"def option_activated(self):\n self._start_new_game()",
"def is_passive(self, item):\n return item in self._passive.get((item.next, item.dot), set())",
"def toggle_anontunnel(self):\n if not self.is_running:\n self.start_anontunnel()\n else:\n self.stop_anontunnel()",
"def reset_standby (self):\n if self.__standby:\n log.debug(\"Reset request to active mode\")\n self.__standby = False",
"def activate_window_desktop(self, window: wrappers.Window) -> Optional[bool]:\n pass",
"def toggle_activate(self):\n self.set_active(status = not self._is_active)",
"def set_password_mode(self, mode):\n if mode:\n self.send_raw(bytes([IAC, WILL, ECHO]))\n else:\n self.send_raw(bytes([IAC, WONT, ECHO]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns True if window is in passive state.
|
def is_passive(self):
return isinstance(self._state, window_states.PassiveWindowState)
|
[
"def passive(self):\n self._state = window_states.PassiveWindowState(self)\n self.action_after_passive()",
"def is_passive(self, node):\n return bool(self.network_graph.nodes[node]['node_passive'])",
"def is_passive(self, item):\n return item in self._passive.get((item.next, item.dot), set())",
"def _update_is_passive(self):\n passive_setting = self._view.settings().get('wrap_as_you_type_passive')\n if passive_setting in (None, False, True):\n self.is_passive = bool(passive_setting)\n else:\n self.is_passive = False\n raise UserFacingError('The value must be a boolean')",
"def is_active(self):\n return isinstance(self._state, window_states.ActiveWindowState)",
"def is_vpn_active(self):\n is_active = False\n if self.NI_TUN in netifaces.interfaces():\n ni_addresses = netifaces.ifaddresses(self.NI_TUN)[netifaces.AF_INET]\n if ni_addresses:\n # we suppose that we have only one address for this network interface\n # and we take the address of the first and only one in the list\n self.pni[self.PNI_ADDRESS] = ni_addresses[0][self.NI_ADDR_KEY]\n if self.pni[self.PNI_ADDRESS]:\n is_active = True\n return is_active",
"def getEntertainmentFastPassAvailable(self):\n bool = self.__data['fastPass']\n if bool == 'true':\n return True\n else:\n return False",
"def is_server_active(self):\n return self._process and self._process.is_alive()",
"def getEntertainmentFastPassPlusAvailable(self):\n bool = self.__data['fastPassPlus']\n if bool == 'true':\n return True\n else:\n return False",
"def is_live(self):\n return self.live_state is not None and self.live_state != ENDED",
"def is_monitor_active(self):\n return self._monitor and self._monitor.is_alive()",
"def is_alive(self):\n return hasattr(self, 'alive') and self.alive",
"def is_active(self):\n return self.element_info.active",
"def isWin(self):\n return self.pos in self.data['win_states']",
"def has_active_flows(self) -> bool:\n return bool(self.active_flows)",
"def is_visible() -> bool:\n return win.winfo_ismapped()",
"def is_in_observingstate(self):\n if not self.checkobservingallowed():\n return False\n swlevel = self._lcu_interface.get_swlevel()\n if swlevel == 3:\n return True\n if swlevel == 2:\n # If in tile-off mode, count it as observing state\n if self.septonconf:\n return True\n else:\n return False\n else:\n return False",
"def is_live_stream(self):\n return self.show.upper() == 'LIVESTREAM'",
"def in_game(self):\n try:\n if self.p.poll() is None:\n return True\n else:\n return False\n except:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Action window does when it switchs to `passive` state.
|
def action_after_passive(self):
|
[
"def passive(self):\n self._state = window_states.PassiveWindowState(self)\n self.action_after_passive()",
"def is_passive(self):\n return isinstance(self._state, window_states.PassiveWindowState)",
"def _update_is_passive(self):\n passive_setting = self._view.settings().get('wrap_as_you_type_passive')\n if passive_setting in (None, False, True):\n self.is_passive = bool(passive_setting)\n else:\n self.is_passive = False\n raise UserFacingError('The value must be a boolean')",
"def option_activated(self):\n self._start_new_game()",
"def action_after_active(self):",
"def ChangeGestureMode(self,parameter):\r\n if (self.gestureButton.GetActive() == True):\r\n self.ada.Notify('gestureModeStarted',True)\r\n self.gestureButton.SetLabel('Deactivate')\r\n else:\r\n self.ada.Notify('gestureModeStarted',False)\r\n self.gestureButton.SetLabel('Activate')",
"def is_passive(self, node):\n return bool(self.network_graph.nodes[node]['node_passive'])",
"def __activated(self, reason):\n if (\n reason == QSystemTrayIcon.Context or\n reason == QSystemTrayIcon.MiddleClick\n ):\n self.__showContextMenu()\n elif reason == QSystemTrayIcon.DoubleClick:\n self.__startEric()",
"def update(self):\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n\n if self.active_flag:\n self.sense_and_act()",
"def _set_passive(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=YANGBool,\n is_leaf=True,\n yang_name=\"passive\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"http://openconfig.net/yang/network-instance\",\n defining_module=\"openconfig-network-instance\",\n yang_type=\"boolean\",\n is_config=True,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"passive must be of a type compatible with boolean\"\"\",\n \"defined-type\": \"boolean\",\n \"generated-type\": \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"passive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)\"\"\",\n }\n )\n\n self.__passive = t\n if hasattr(self, \"_set\"):\n self._set()",
"def activeAllOffCallback( self ) :\n\n self.activeCallback( False )",
"def active(self):\n self._state = window_states.ActiveWindowState(self)\n self.action_after_active()",
"def is_passive(self, item):\n return item in self._passive.get((item.next, item.dot), set())",
"def take_action(plug, action):\n if action == \"on\":\n plug.power_on()\n else:\n plug.power_off()",
"def proxy_passive_to_active():\n logger.info('Change all proxys to active')\n proxys = zapi.proxy.get(output=[ 'shorten', 'host' ],\n filter={ 'status': 6 })\n if ( proxys.__len__() == 0 ):\n logger.info('Done')\n return\n bar = ProgressBar(maxval=proxys.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()\n i = 0\n for x in proxys:\n i += 1\n proxyid = x['proxyid']\n result = zapi.proxy.update(proxyid=proxyid, status=5)\n logger.echo = False\n logger.debug('Changed from passive to active proxy: %s' % (x['host']))\n bar.update(i)\n bar.finish()\n logger.echo = True\n logger.info('Done')\n return",
"def _set_passive(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"passive\", rest_name=\"passive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Passive information'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='empty', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"passive must be of a type compatible with empty\"\"\",\n 'defined-type': \"empty\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"passive\", rest_name=\"passive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Passive information'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='empty', is_config=True)\"\"\",\n })\n\n self.__passive = t\n if hasattr(self, '_set'):\n self._set()",
"def to_idle(self):\r\n\r\n\t\tself.__send_extended_byte_array(self.MODE_IDLE, [])",
"def _toggle_fscreen(self, event):\n self.fs = not self.fs\n self.window.attributes(\"-fullscreen\", self.fs)",
"def in_window(self):\n if self.actions == -1:\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Switchs window to `active` state.
|
def active(self):
self._state = window_states.ActiveWindowState(self)
self.action_after_active()
|
[
"def change_focus(window):\n xdotool('windowactivate', window)",
"def active(self, active):\n \n self._active = active",
"def set_active(self):\n bytes_to_write = self._to_byte_array((Commands.TOGGLE_STATE_COMMAND, Commands.ACTIVE))\n Controller._write_bytes(bytes_to_write)",
"def _set_active(self, active):\n self._active = active\n\n if active:\n # We must redraw the clock...\n self._update_cb()\n\n # And update again the clock every seconds.\n gobject.timeout_add(1000, self._update_cb)",
"def activate():\n \n if Robo.state != \"active\":\n Robo.state = \"active\"\n print \"Robot is now active\"\n else:\n Robo.state = \"not active\"\n print \"Robot is no longer active\"",
"def setActiveWin(self, window):\n self.activeWindow = window\n self.controlActivated.emit(self)\n self.updateCommandsAvail()\n filterTextDialog = globalref.mainControl.filterTextDialog\n if filterTextDialog and filterTextDialog.isVisible():\n filterTextDialog.updateAvail('', True)\n filterConditionDialog = globalref.mainControl.filterConditionDialog\n if filterConditionDialog and filterConditionDialog.isVisible():\n filterConditionDialog.updateFilterControls()",
"def op_set_window(self, window_num):\n self._ui.screen.select_window(window_num)",
"def switch(self, to: str, whisper=None) -> None:\n\t\ts = self._scenes.get(to.lower())\n\t\tif s is None:\n\t\t\traise Exception(f\"No Scene named: '{to}'\")\n\t\tself._active.deactivate()\n\t\tself._active = s\n\t\tself._active.activate(whisper)\n\t\tself._active.focus_set()",
"def activate(self):\n\n # check log_win to determine, if windows are already created\n if self.wins.log_win is not None:\n self.wins.input_win.state.active = True\n self.wins.input_win.redraw()\n self.wins.log_win.state.active = False\n self.wins.log_win.redraw()\n self.clear_notifications()\n return",
"def toggleActivation(self):\n\n # Switch the activation colors and reverse activation variable\n temp = self.color\n self.setColor(self.deactiveColor)\n self.color = self.deactiveColor\n self.deactiveColor = temp\n\n self.active = not self.active\n if not self.isActive():\n self.setCenter(Point(self.centerX + 3, self.centerY + 5))\n else:\n self.setCenter(Point(self.centerX-3, self.centerY-5))",
"def toggle_activate(self):\n self.set_active(status = not self._is_active)",
"def user32_SwitchToThisWindow(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\", \"fAltTab\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def setActive( self, state ):\r\n\t\tif ( state ):\r\n\t\t\tself._nativePointer.current = True\r\n\t\telse:\r\n\t\t\tmxs.layerManager.getLayer(0).current = True\r\n\r\n\t\treturn self._nativePointer.current",
"def set_active(self):\n if self.tabindex is not None:\n for i in range(self.view.tabber.count()):\n self.view.tabber.setTabEnabled(i, False)\n self.view.tabber.setTabEnabled(self.tabindex, True)\n self.view.tabber.setCurrentIndex(self.tabindex)",
"def activate(self):\n self.vpi.get_viewport_window().set_active_camera(str(self.prim.GetPath()))",
"def activate_window_desktop(self, window: wrappers.Window) -> Optional[bool]:\n pass",
"def user32_SetActiveWindow(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def switch_to_default_tab(self):\n if self.default_tab:\n self.driver.switch_to.window(self.default_tab)",
"def switch_screen(self, screen):\n\t\tself.screen_manager.switch_current_screen(screen)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns True if window is in active state.
|
def is_active(self):
return isinstance(self._state, window_states.ActiveWindowState)
|
[
"def is_active(self):\n return self.element_info.active",
"def is_monitor_active(self):\n return self._monitor and self._monitor.is_alive()",
"def in_window(self):\n if self.actions == -1:\n return True\n else:\n return False",
"def is_active(self):\n if self.state != 'checkout':\n return True",
"def _get_isActive(self) -> \"bool\" :\n return _core.Workspace__get_isActive(self)",
"def active(self):\n self._state = window_states.ActiveWindowState(self)\n self.action_after_active()",
"def is_active(self):\n # print(self.get_full_name() + \" is active? \" + str(self.is_current_member()) + \" \" +\n # str(self.is_current_collaborator()))\n return self.is_current_member() or self.is_current_collaborator()",
"def isWin(self):\n return self.pos in self.data['win_states']",
"def activate(self) -> \"bool\" :\n return _core.Workspace_activate(self)",
"def is_server_active(self):\n return self._process and self._process.is_alive()",
"def is_active(self):\n return self.season.is_active",
"def _is_win_visible(self, i3_win) -> bool:\n try:\n xprop = check_output(['xprop', '-id', str(i3_win.window)]).decode()\n return '_NET_WM_STATE_HIDDEN' not in xprop\n except FileNotFoundError:\n # if xprop not found, fall back to just checking if tmux win is on our current worksapce:\n self.logger.debug('xprop utility is not found - please install it.')\n self.logger.debug('will decide visibility simply by checking if tmux is on our current workspace')\n return self._is_tmux_win_on_current_ws(i3_win)",
"def is_display_active(self):\n return self.op is not None",
"def is_idle(self):\n pass",
"def has_windows(self):\n\n if self.wins.log_win:\n return True\n\n return False",
"def isActive(self) -> \"SbBool\":\n return _coin.ScXMLStateMachine_isActive(self)",
"def is_visible() -> bool:\n return win.winfo_ismapped()",
"def _get_isActive(self) -> \"bool\" :\n return _core.ToolbarTab__get_isActive(self)",
"def AcceptsFocus(self):\n return self.IsShown() and self.IsEnabled()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Action window does when it switchs to `active` state.
|
def action_after_active(self):
|
[
"def active(self):\n self._state = window_states.ActiveWindowState(self)\n self.action_after_active()",
"def change_click(self):\r\n self.switch_window.emit()",
"def action(self, state):\n pass",
"def active(self, active):\n \n self._active = active",
"def activate():\n \n if Robo.state != \"active\":\n Robo.state = \"active\"\n print \"Robot is now active\"\n else:\n Robo.state = \"not active\"\n print \"Robot is no longer active\"",
"def option_activated(self):\n self._start_new_game()",
"def switch_active_callback():\n global switch_active\n switch_active = True\n print 'Switch state was changed!'\n if GPIO.input(switch_input):\n print 'Switch was closed!'\n switch_state = True\n else:\n print 'Switch was opened!'\n switch_state = False",
"def _set_active(self, active):\n self._active = active\n\n if active:\n # We must redraw the clock...\n self._update_cb()\n\n # And update again the clock every seconds.\n gobject.timeout_add(1000, self._update_cb)",
"def set_active(self):\n bytes_to_write = self._to_byte_array((Commands.TOGGLE_STATE_COMMAND, Commands.ACTIVE))\n Controller._write_bytes(bytes_to_write)",
"def activer(self):\n self.est_activee = True",
"def toggleActivation(self):\n\n # Switch the activation colors and reverse activation variable\n temp = self.color\n self.setColor(self.deactiveColor)\n self.color = self.deactiveColor\n self.deactiveColor = temp\n\n self.active = not self.active\n if not self.isActive():\n self.setCenter(Point(self.centerX + 3, self.centerY + 5))\n else:\n self.setCenter(Point(self.centerX-3, self.centerY-5))",
"def setActiveWin(self, window):\n self.activeWindow = window\n self.controlActivated.emit(self)\n self.updateCommandsAvail()\n filterTextDialog = globalref.mainControl.filterTextDialog\n if filterTextDialog and filterTextDialog.isVisible():\n filterTextDialog.updateAvail('', True)\n filterConditionDialog = globalref.mainControl.filterConditionDialog\n if filterConditionDialog and filterConditionDialog.isVisible():\n filterConditionDialog.updateFilterControls()",
"def change_focus(window):\n xdotool('windowactivate', window)",
"def _notify_active_cb(self, widget, event):\n self._clock.active = self.props.active\n if self.props.active:\n self._inhibit_suspend()\n else:\n self._allow_suspend()",
"def activate(self):\n\n # check log_win to determine, if windows are already created\n if self.wins.log_win is not None:\n self.wins.input_win.state.active = True\n self.wins.input_win.redraw()\n self.wins.log_win.state.active = False\n self.wins.log_win.redraw()\n self.clear_notifications()\n return",
"def activate():\n #Log.info(\"Event-System activated\")\n Event.activated = True",
"def in_window(self):\n if self.actions == -1:\n return True\n else:\n return False",
"def select_action(self, observation):",
"def activeAllOnCallback( self ) :\n\n self.activeCallback( True )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns true if window can handle mouse click.
|
def can_handle_click(self, mouse_pos: Tuple[int, int]) -> bool:
return self._state.can_handle(mouse_pos)
|
[
"def _should_handle_mouse_press(self, buttons: int) -> bool:\n return (\n self.definition.on_press is not None\n # Also handle if on_release is defined so we can record which mouse button was used.\n or self.definition.on_release is not None\n or self.definition.depressed_color is not None\n )",
"def is_mouse_in_window(x, y):\n\tif (x <= right_bound and x >= left_bound) and (y <= lower_bound and y >= upper_bound):\n\t\treturn True\n\telse:\n\t\treturn False",
"def _should_handle_mouse_drag(self) -> bool:\n return self._currently_dragging",
"def zoom_mouse_active():\n try:\n return eye_zoom_mouse.zoom_mouse.enabled\n except AttributeError:\n return False",
"def check_clicked_inside_or_blocking(self, event: pygame.event.Event) -> bool:\n consumed_event = False\n if self.is_blocking and event.type == pygame.MOUSEBUTTONDOWN:\n consumed_event = True\n\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n scaled_mouse_pos = self.ui_manager.calculate_scaled_mouse_position(event.pos)\n if self.hover_point(scaled_mouse_pos[0],\n scaled_mouse_pos[1]) or (self.edge_hovering[0] or\n self.edge_hovering[1] or\n self.edge_hovering[2] or\n self.edge_hovering[3]):\n if self.is_enabled and self.bring_to_front_on_focused:\n self.window_stack.move_window_to_front(self)\n consumed_event = True\n\n return consumed_event",
"def check_mouse():\n events = pygame.event.get()\n for event in events:\n # if x clicked\n if event.type == pygame.QUIT:\n sys.exit()\n # if mousebutton pressed, return mouse position\n if event.type == pygame.MOUSEBUTTONDOWN:\n return pygame.mouse.get_pos()",
"def in_window(self):\n if self.actions == -1:\n return True\n else:\n return False",
"def mouse_on_button(self):\n\n # Get mouse position\n mouse_x, mouse_y = pygame.mouse.get_pos()\n\n # Evaluate if mouse x & button x and mouse y & button y overlap\n conditions = [\n self.button_x < mouse_x < (self.button_x + self.button_width),\n self.button_y < mouse_y < (self.button_y + self.button_height)\n ]\n\n # Return bool of whether or not all conditions are met\n return all(conditions)",
"def mouseIsLocked(self):\n return False",
"def is_hovered(self):\n return self.collidepoint(pygame.mouse.get_pos()) and not self.is_clicked()",
"def wait_for_mouse_click(self):\n def on_click(x, y, button, pressed):\n if pressed:\n return False\n\n with mouse.Listener(on_click=on_click) as listener:\n listener.join()",
"def _should_handle_mouse_hover(self) -> bool:\n return (\n self.definition.on_hover is not None\n or self.definition.on_unhover is not None\n or self.definition.hover_color is not None\n )",
"def mouse_entered(self):\n return False",
"def user32_IsTouchWindow(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\", \"pulFlags\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_IsWindowEnabled(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def canHaveGui(self):\n\n # Determine if a display is available. Note that\n # calling the IsDisplayAvailable function will\n # cause the application to steal focus under OSX!\n if self.__canHaveGui is None:\n try:\n import wx # pylint: disable=import-outside-toplevel\n self.__canHaveGui = wx.App.IsDisplayAvailable()\n except ImportError:\n self.__canHaveGui = False\n\n return self.__canHaveGui",
"def mouse_buttons(self):\n\n return pygame.mouse.get_pressed()",
"def mouse_jiggle():\n\tif _check_display():\n\t\t_log_output( 'Mouse button jiggle' )\n\t\tlibcvautomation.xte_mouseJiggle(_get_display() )\n\t\treturn True\n\telse:\n\t\t#Display not open\n\t\traise LibcvDisplayNotOpen( _get_name() )",
"def mouse_released(self, x, y, modifiers):\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test request cycle with one request without composite get condition. post, get, put
|
def testRequestSimpleCycle(self):
# test post method
requestName = self.insertRequest(self.rerecoCreateArgs)
## test get method
# get by name
response = self.getRequestWithNoStale('name=%s' % requestName)
self.assertEqual(response[1], 200, "get by name")
self.assertEqual(self.resultLength(response), 1)
# get by status
response = self.getRequestWithNoStale('status=new')
self.assertEqual(response[1], 200, "get by status")
self.assertEqual(self.resultLength(response), 1)
#this create cache
# need to find the way to reste Etag or not getting from the cache
# response = self.getRequestWithNoStale('status=assigned')
# self.assertEqual(response[1], 200, "get by status")
# self.assertEqual(self.resultLength(response), 0)
# get by prepID
response = self.getRequestWithNoStale('prep_id=%s' % self.rerecoCreateArgs["PrepID"])
self.assertEqual(response[1], 200)
self.assertEqual(self.resultLength(response), 1)
#import pdb
#pdb.set_trace()
response = self.getRequestWithNoStale('campaign=%s' % self.rerecoCreateArgs["Campaign"])
self.assertEqual(response[1], 200)
self.assertEqual(self.resultLength(response), 1)
response = self.getRequestWithNoStale('inputdataset=%s' % self.rerecoCreateArgs["InputDataset"])
self.assertEqual(response[1], 200)
self.assertEqual(self.resultLength(response), 1)
response = self.getRequestWithNoStale('mc_pileup=%s' % self.rerecoCreateArgs["MCPileup"])
self.assertEqual(response[1], 200)
self.assertEqual(self.resultLength(response), 1)
response = self.getRequestWithNoStale('data_pileup=%s' % self.rerecoCreateArgs["DataPileup"])
self.assertEqual(response[1], 200)
self.assertEqual(self.resultLength(response), 1)
# test put request with just status change
data = {'RequestStatus': 'assignment-approved'}
self.putRequestWithAuth(requestName, data)
response = self.getRequestWithNoStale('status=assignment-approved')
self.assertEqual(response[1], 200, "put request status change")
self.assertEqual(self.resultLength(response), 1)
# assign with team
# test put request with just status change
data = {'RequestStatus': 'assigned'}
data.update(self.rerecoAssignArgs)
self.putRequestWithAuth(requestName, data)
response = self.getRequestWithNoStale('status=assigned')
self.assertEqual(response[1], 200, "put request status change")
self.assertEqual(self.resultLength(response), 1)
response = self.getRequestWithNoStale('status=assigned&team=%s' %
self.rerecoAssignArgs['Team'])
self.assertEqual(response[1], 200, "put request status change")
self.assertEqual(self.resultLength(response), 1)
response = self.getMultiRequestsWithAuth([requestName])
self.assertEqual(self.resultLength(response), 1)
self.assertEqual(list(response[0]['result'][0])[0], requestName)
#response = self.cloneRequestWithAuth(requestName)
#self.assertEqual(response[1], 200, "put request clone")
#response = self.getRequestWithNoStale('status=new')
#self.assertEqual(self.resultLength(response), 1)
|
[
"def test_returns_true_if_request_method_is_get(self):\n self.request_mock.method = 'GET'\n self.assertTrue(is_get(self.get_response_mock, self.request_mock))",
"def test_returns_false_if_request_method_is_not_post(self):\n self.request_mock.method = 'FOOBAR'\n self.assertFalse(is_put(self.get_response_mock, self.request_mock))",
"def test_returns_false_if_request_hasnt_get_parameter(self):\n self.request_mock.GET = dict()\n self.assertFalse(self.has_parameter(self.get_response_mock, self.request_mock))",
"def test_returns_true_if_request_has_get_parameter(self):\n self.request_mock.GET = {self.parameter_name: 'foobar'}\n self.assertTrue(self.has_parameter(self.get_response_mock, self.request_mock))",
"def test_request_methods(self):\n gets = list_routes('GET')\n posts = list_routes('POST')\n no_posts = '/', 'about', 'faq', '/canaries'\n\n for route in gets:\n r = self.app.get(route, follow_redirects=True)\n self.assertEqual(r.status_code, 200)\n\n for route in posts:\n r = self.app.post(route, follow_redirects=True)\n \"\"\"Response can be 400 Bad Request since we don't send any\n data in the request.\"\"\"\n self.assertTrue(r.status_code == 200 or r.status_code == 400)\n\n for route in no_posts:\n r = self.app.post(route, follow_redirects=True)\n self.assertEqual(r.status_code, 405)\n\n # Test that PUT, DELETE are forbidden\n for route in list_routes():\n r = self.app.delete(route)\n self.assertEqual(r.status_code, 405)\n r = self.app.put(route)\n self.assertEqual(r.status_code, 405)",
"def test_returns_true_if_request_method_is_post(self):\n self.request_mock.method = 'PUT'\n self.assertTrue(is_put(self.get_response_mock, self.request_mock))",
"def test_req_is_read(self):\n self.test_req_create()\n url = reverse_lazy('hello:requests')\n resp = self.get_ajax(url)\n json_resp = self.json_response(resp)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(len(json_resp), 5)\n for num, resp in enumerate(json_resp, start=1):\n header = ast.literal_eval(resp['fields']['header'])\n self.assertEqual(resp['pk'], num)\n self.assertEqual(resp['fields']['is_read'], False)\n self.assertEqual(resp['fields']['ip'], '127.0.0.1')\n self.assertEqual(resp['fields']['page'], 'http://testserver/')\n self.assertEqual(header['SERVER_NAME'], 'testserver')\n self.assertEqual(header['REQUEST_METHOD'], 'GET')\n self.assertEqual(header['SERVER_PORT'], '80')\n\n resp = self.post_ajax(url, {'request_pk': num})\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(self.json_response(resp), {'success': True})\n self.assertEqual(HttpRequest.objects.filter(is_read=False).count(), 0)",
"def test_ajax_fall_through_method(self):\n req = self.req(\n \"POST\", \"/the/url?filter=value\", data={\"action-doit\": \"3\"},\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\")\n res = self.on_listobject_response(\n response(self.builder.one(resourceIdentity=make_identity(id=3))),\n request=req)\n\n self.assertEqual(res.request.method, \"GET\")\n self.assertEqual(res.request.POST, {})",
"def test_get_data(self):\n\n\t\t# Test to go here when best approach is decided for making requests.",
"def test_request_init(self):\n\n\t\tself.assertEqual(self.request.path, '/index')\n\t\tself.assertEqual(self.request.method, 'GET')\n\t\tself.assertEqual(self.request._get_data, None)\n\t\tself.assertEqual(self.request._post_data, None)",
"def test_call_makes_request_with_required_parameters(self):\n base.call(\"GET\", self.url, self.req_ctx)\n self.session.request.assert_called_once_with(\n \"GET\", self.url, auth=None, **self.OPTIONAL_REQUEST_ARGS)",
"def checkGet(inp):\n get = \"GET\"\n out = True\n if len(inp) != 3:\n out = False\n if inp != get:\n out = False\n return out",
"def test_admin_get_requests(self):\n headers = self.get_token_admin() \n\n # try fetch while no requests exist\n response1 = self.app_client.get('/api/v2/requests', headers=headers)\n self.assertEqual(response1.status_code, 404)\n\n #try fetch by id no requests\n response1 = self.app_client.get('/api/v2/requests/1', headers=headers)\n self.assertEqual(response1.status_code, 404)\n self.insert_requests()\n\n #test admin get by ID\n response = self.app_client.get('/api/v2/requests/1', headers=headers)\n self.assertEqual(response.status_code, 200)\n\n #test admin get all\n response = self.app_client.get('/api/v2/requests', headers=headers)\n self.assertEqual(response.status_code, 200)",
"def test_request_get(self):\n r = self.base._request('/get', 'GET', {\n 'foo': 'bar'\n })\n\n self.assertEqual(r['url'], 'https://httpbin.org/get?foo=bar')\n self.assertEqual(r['headers']['Client'], 'foo.bar')\n self.assertEqual(r['headers']['Token'], 'foobar')",
"def test_returns_false_if_request_hasnt_post_parameter(self):\n self.request_mock.POST = dict()\n self.assertFalse(self.has_parameter(self.get_response_mock, self.request_mock))",
"def test_action(self):\n\n for endpoint in ['api-stock-count', 'api-stock-add', 'api-stock-remove']:\n\n url = reverse(endpoint)\n\n data = {}\n\n # POST with a valid action\n response = self.doPost(url, data)\n self.assertContains(response, \"must contain list\", status_code=status.HTTP_400_BAD_REQUEST)\n\n data['items'] = [{\n 'no': 'aa'\n }]\n\n # POST without a PK\n response = self.doPost(url, data)\n self.assertContains(response, 'must contain a valid pk', status_code=status.HTTP_400_BAD_REQUEST)\n\n # POST with a PK but no quantity\n data['items'] = [{\n 'pk': 10\n }]\n \n response = self.doPost(url, data)\n self.assertContains(response, 'must contain a valid pk', status_code=status.HTTP_400_BAD_REQUEST)\n\n data['items'] = [{\n 'pk': 1234\n }]\n\n response = self.doPost(url, data)\n self.assertContains(response, 'must contain a valid quantity', status_code=status.HTTP_400_BAD_REQUEST)\n\n data['items'] = [{\n 'pk': 1234,\n 'quantity': '10x0d'\n }]\n\n response = self.doPost(url, data)\n self.assertContains(response, 'must contain a valid quantity', status_code=status.HTTP_400_BAD_REQUEST)\n \n data['items'] = [{\n 'pk': 1234,\n 'quantity': \"-1.234\"\n }]\n \n response = self.doPost(url, data)\n self.assertContains(response, 'must not be less than zero', status_code=status.HTTP_400_BAD_REQUEST)\n\n # Test with a single item\n data = {\n 'item': {\n 'pk': 1234,\n 'quantity': '10',\n }\n }\n\n response = self.doPost(url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_priority_requests(self):\n self.client.login(username='admin', password='admin')\n\n self.test_req_create() # first main page\n self.test_req_create('edit') # two edit page (priority)\n\n url = reverse_lazy('hello:requests')\n resp = self.get_ajax(url)\n json_resp = self.json_response(resp)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(len(json_resp), 10)\n count = 1\n for resp in json_resp:\n page = ''\n if count > 5:\n page = 'edit/'\n self.assertEqual(resp['fields']['page'],\n 'http://testserver/{}'.format(page))\n count += 1",
"def test_returns_true_if_request_has_post_parameter(self):\n self.request_mock.POST = {self.parameter_name: 'foobar'}\n self.assertTrue(self.has_parameter(self.get_response_mock, self.request_mock))",
"def testF_view_request(self):\n _, _, requestIds = self._inject(15) # creates x docs/requests\n requestView = self._getViewResults(\"request\")\n self.assertEqual(len(requestView), 15)\n for reqView in requestView:\n self.failUnless(reqView[u\"key\"] in requestIds)\n self.failUnless(reqView[u\"value\"][u\"state\"] == u\"NewlyHeld\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return LRP module from keras model
|
def read_kerasmodel(model):
lrpmodules = []
for layer in model.layers:
module, activation_module = get_lrpmodule(layer)
if module is not None:
lrpmodules.append(module)
if activation_module is not None:
lrpmodules.append(activation_module)
return LRPModel(lrpmodules)
|
[
"def _get_lr():\n raise NotImplementedError",
"def load_module(prefix, epoch, data_names, data_shapes):\n sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)\n\n # We don't need CTC loss for prediction, just a simple softmax will suffice.\n # We get the output of the layer just before the loss layer ('pred_fc') and add softmax on top\n pred_fc = sym.get_internals()['pred_fc_output']\n sym = mx.sym.softmax(data=pred_fc)\n\n mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None)\n mod.bind(for_training=False, data_shapes=data_shapes)\n mod.set_params(arg_params, aux_params, allow_missing=False)\n return mod",
"def import_keras_module(self):\n try:\n keras_module = importlib.import_module(\"tensorflow.keras.applications.\" + self.cnn_base)\n except ModuleNotFoundError as err:\n print(\"ERROR: Model not found in Keras application\")\n sys.exit(1)\n return keras_module",
"def load_pretrained_model(pretrained_model_name, pooling, model_weights_path = None):\n \n # initialize output\n model = None\n \n pretrained_model_name = pretrained_model_name.lower()\n \n ###########################\n ### import pretrained model\n ###########################\n if pretrained_model_name == \"xception\": \n from keras.applications.xception import Xception\n model = Xception(include_top=False, weights='imagenet', pooling=pooling)\n elif pretrained_model_name == \"vgg16\": \n from keras.applications.vgg16 import VGG16\n model = VGG16(include_top=False, weights='imagenet', pooling=pooling)\n elif pretrained_model_name == \"resnet50\": \n from keras.applications.resnet50 import ResNet50\n model = ResNet50(include_top=False, weights='imagenet', pooling=pooling)\n elif pretrained_model_name == \"inception_v3\": \n from keras.applications.inception_v3 import InceptionV3\n model = InceptionV3(include_top=False, weights='imagenet', pooling=pooling)\n elif pretrained_model_name == \"inception_resnet_v2\": \n from keras.applications.inception_resnet_v2 import InceptionResNetV2\n model = InceptionResNetV2(include_top=False, weights='imagenet', pooling=pooling)\n elif pretrained_model_name == \"mobilenetv2_1.00_224\": \n from keras.applications.mobilenet_v2 import MobileNetV2\n model = MobileNetV2(include_top=False, weights='imagenet', pooling=pooling)\n else:\n raise NameError('Invalid pretrained model name - must be one of [\"Xception\", \"VGG16\", \"ResNet50\", \"InceptionV3\", \"InceptionResNetV2\", \"MobileNetV2\"]')\n \n if model_weights_path is not None:\n if os.path.exists(model_weights_path):\n model.load_weights(model_weights_path)\n else:\n raise NameError('pretrained model weights not found')\n \n return model",
"def get_language_model(n_tok, em_sz, nhid, nlayers, pad_token, decode_train=True, dropouts=None):\n if dropouts is None: dropouts = [0.5,0.4,0.5,0.05,0.3]\n rnn_enc = RNN_Encoder(n_tok, em_sz, n_hid=nhid, n_layers=nlayers, pad_token=pad_token,\n dropouti=dropouts[0], wdrop=dropouts[2], dropoute=dropouts[3], dropouth=dropouts[4])\n rnn_dec = LinearDecoder(n_tok, em_sz, dropouts[1], decode_train=decode_train, tie_encoder=rnn_enc.encoder)\n return SequentialRNN(rnn_enc, rnn_dec)",
"def base_lm(hparams):\n\n # Language model.\n hparams.add_hparam(\"lm_type\", \"left2right\")\n hparams.add_hparam(\"lm_num_layers\", 2)\n hparams.add_hparam(\"lm_num_residual_layers\", 1)\n\n # Language model training and loss.\n hparams.add_hparam(\"lm_do_train\", False)\n hparams.add_hparam(\"lm_loss_coeff\", 0.001)\n hparams.add_hparam(\"lm_loss_enable_step\", 400000)\n\n return hparams",
"def load_model():\n # TODO: INSERT CODE\n # return model",
"def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),\n snt.LayerNorm()\n ])",
"def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([2] * 2, activate_final=True),\n snt.LayerNorm()\n ])",
"def rl_modelrl_l1_medium():\n hparams = rl_modelrl_medium()\n hparams.generative_model_params = \"next_frame_l1\"\n return hparams",
"def rl_modelrl_l2_medium():\n hparams = rl_modelrl_medium()\n hparams.generative_model_params = \"next_frame_l2\"\n return hparams",
"def _build_rnn_model(self):\n if self.rnn_layer.lower() == 'lstm':\n self.model = self._build_lstm_model()\n elif self.rnn_layer.lower() == 'gru':\n self.model = self._build_gru_model()\n else:\n self.model = self._build_simple_rnn_model()\n\n self.model.compile(optimizer=self.optimizer,\n loss=\"mse\",\n metrics=[\"mse\"])\n\n return self.model",
"def ImputedRNNModel():\n name = 'ImputedRNNModel'\n parameters = {\n 'rnn_type': 'gru' #lstm alternative\n }",
"def get_model(self,\n stage_id: int,\n old_model: tf.keras.Model = None) -> tf.keras.Model:\n pass",
"def open_model():\n return tfjs.converters.load_keras_model(\"Saved_Model\\\\model.json\")",
"def load_model():\n print('Importing TensorFlow...')\n from tensorflow.keras.models import load_model as tf_load_model\n\n print('Loading the Neural Network model...')\n path = os.path.join(CURR_DIR, 'self_drive_model_01.hdf5')\n model = tf_load_model(path)\n\n return model",
"def load_pretrained_lm(vocab) :\n lm = get_language_model(AWD_LSTM, len(vocab))\n model_path = untar_data('https://s3.amazonaws.com/fast-ai-modelzoo/wt103-1', data=False)\n fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]\n old_itos = pickle.load(open(fnames[1], 'rb'))\n old_stoi = {v:k for k,v in enumerate(old_itos)}\n wgts = torch.load(fnames[0], map_location=lambda storage, loc: storage)\n wgts = convert_weights(wgts, old_stoi, vocab)\n lm.load_state_dict(wgts)\n return lm",
"def get_language_model(attention_mask_func, num_tokentypes,\n init_method=None, scaled_init_method=None, get_key_value=False):\n args = get_args()\n\n if init_method is None:\n init_method = init_method_normal(args.init_method_std)\n\n if scaled_init_method is None:\n scaled_init_method = scaled_init_method_normal(args.init_method_std, args.num_layers)\n\n # Language model.\n language_model = TransformerLanguageModel(\n attention_mask_func=attention_mask_func,\n init_method=init_method,\n output_layer_init_method=scaled_init_method,\n num_tokentypes=num_tokentypes,\n get_key_value=get_key_value)\n # key used for checkpoints.\n language_model_key = 'language_model'\n\n return language_model, language_model_key",
"def convert_model(self, L):\n \n layers = []\n image_size = self.imagesize\n\n for i in xrange(37): # 37 layers in Matlab model\n l = L[0][i][0][0]\n tp = l[\"type\"][0]\n name = l[\"name\"][0]\n\n if tp == \"conv\":\n wt = l[\"weights\"][0,0]\n bias = l[\"weights\"][0,1]\n pad = l[\"pad\"][0]\n stride = l[\"stride\"][0]\n\n # WORK-AROUND to get to 7x7 output after last convolution\n if name == 'conv5_3':\n pad = [0, 1, 0, 1]\n\n if sum(pad) > 0:\n pad = [int(d) for d in pad]\n layer = Padding(pad)\n layer.image_size = image_size\n image_size = layer.get_dim(\"output\")[1:3]\n layers.append(layer)\n\n layer, outdim = self.conv_layer(name, wt, bias, image_size)\n layers.append(layer)\n image_size = outdim\n\n elif tp == \"pool\":\n method = l[\"method\"][0]\n pool = l[\"pool\"][0]\n stride = l[\"stride\"][0]\n pad = l[\"pad\"][0]\n\n stride = [int(d) for d in stride]\n pool = [int(d) for d in pool]\n pad = [int(d) for d in pad]\n\n layer, outdim = self.pool_layer(name, method, pool, pad, stride, image_size)\n layers.append(layer)\n image_size = outdim\n\n elif tp == \"relu\":\n layers.append(self.relu_layer(name))\n\n elif tp == \"softmax\":\n layers.append(Flattener('flatten'))\n layers.append(self.softmax_layer(name))\n\n print len(layers), 'layers created'\n return layers"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This is a utility function used to calculate the average of the last three datapoints in the series as a measure, instead of just the last datapoint. It reduces noise, but it also reduces sensitivity and increases the delay to detection.
|
def tail_avg(timeseries, end_timestamp, full_duration):
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
|
[
"def get_average(self, last_n=None):\n if not self.has_samples():\n msg = \"get_average() cannot be called when no samples exist\"\n raise IllegalStateError(msg)\n\n samples = self.get_samples(last_n)\n return reduce(add, samples) / float(len(samples))",
"def measure_average():\r\n \r\n distance1=measure()\r\n time.sleep(0.001)\r\n distance2=measure()\r\n time.sleep(0.001)\r\n distance3=measure()\r\n distance = distance1 + distance2 + distance3\r\n distance = distance / 3\r\n return distance",
"def get_avg(self):\n\t\treturn self.sum / max(len(self.window), 1)",
"def avg_3wk_spend(spend: pd.Series) -> pd.Series:\n return spend.rolling(3).mean()",
"def average(data):\n return 1.0*sum(data)/len(data)",
"def qd_mean(self):\n for i in range(10):\n accum = []\n self.acquisition()\n accum.append(max(self.line[self._from:self._to]))\n mean = np.mean(accum)\n return mean",
"def average(data):\n counts = len(data)\n total = sum(data)\n return total / counts",
"def compute_mean_wave_period(wave_periods):\n if len(wave_periods) < 5:\n return np.nan\n\n return np.mean(wave_periods)",
"def avg_price_last_five_events_observer():\n avg_price = last_len(5)['price'].mean()\n i1.critical(all_dfs['last_len', 5].dataframe)\n i1.critical('The average price of the last five events is: ' + str(avg_price))",
"def get_three_day_case_average(self):\n positives = list(map(lambda x: 0 if x['positive'] is None else x['positive'], ))\n return three_day_average(positives)",
"def calculate_average(self, n: int) -> int:\n total = 0\n counter = 0\n i = 0\n while counter != n:\n total += self.history[i]\n i += 1\n counter += 1\n return counter / n",
"def avData(self):\n\n return self.averageData(nsamples=10)",
"def moving_average(training_data: List[int], testing_data: List[int]):\n\n moving_average_training_data = []\n\n for i in range(len(training_data)):\n if i == 0:\n # First record is special because there is no record before it\n average_value = float(training_data[0] + training_data[1]) / 2\n elif i == len(training_data) - 1:\n # Last value is also special because there is no value behind it\n average_value = float(training_data[i-1] + training_data[i]) / 2\n else:\n # Take average value of item i and the values before and after\n average_value = float(training_data[i-1] + training_data[i] + training_data[i+1]) / 3\n moving_average_training_data.append(average_value)\n\n return moving_average_training_data, testing_data",
"def average_absolute(data):\n return average(absolute(data))",
"def calculate_new_average(previous_avg, count, value):\n total = previous_avg * (count - 1) + value\n return total / count",
"def get_average_segments(self):\r\n mean = self.data[0, ...]\r\n std_dev = self.data[1, ...]\r\n return self.__calculate_shifted_average(mean, std_dev)",
"def weighted_avg_price_last_five_events_observer():\n avg = weighted_avg(last_len(5), field='price', weight='index')\n i12.critical(all_dfs['last_len', 5].dataframe)\n i12.critical('The average price of the last five events is: ' + str(avg))",
"def get_three_day_death_average(self):\n positives = list(map(lambda x: 0 if x['positive'] is None else x['positive'], ))\n return three_day_average(positives)",
"def _avg3(image):\n\n # Cast to appropriate type for safe averaging\n if image.dtype == np.uint8:\n dtype = np.uint16\n elif image.dtype == np.uint16:\n dtype = np.uint32\n elif image.dtype == np.uint32:\n dtype = np.uint64\n elif image.dtype == np.int8:\n dtype = np.int16\n elif image.dtype == np.int16:\n dtype = np.int32\n elif image.dtype == np.int32:\n dtype = np.int64\n else:\n dtype = image.dtype\n\n # Store original data type, and cast to safe data type for averaging\n odtype = image.dtype\n image = image.astype(dtype)\n imgshape = image.shape\n\n # Account for dimensions with odd dimensions to prevent data loss\n ypos = imgshape[0]\n xpos = imgshape[1]\n zpos = imgshape[2]\n z_max = zpos - zpos % 2 # if even then subtracting 0. \n y_max = ypos - ypos % 2 # if odd then subtracting 1\n x_max = xpos - xpos % 2\n yxz_max = [y_max, x_max, z_max]\n\n # Initialize the output\n avg_imgshape = np.ceil([d/2 for d in imgshape]).astype(int)\n avg_img = np.zeros(avg_imgshape,dtype=dtype)\n\n # Do the work\n avg_img[0:int(y_max/2),0:int(x_max/2),0:int(z_max/2)]= (\n image[0:y_max-1:2,0:x_max-1:2,0:z_max-1:2] + \n image[1:y_max:2 ,0:x_max-1:2,0:z_max-1:2] + \n image[0:y_max-1:2,1:x_max:2 ,0:z_max-1:2] + \n image[1:y_max:2 ,1:x_max:2 ,0:z_max-1:2] + \n image[0:y_max-1:2,0:x_max-1:2,1:z_max:2 ] + \n image[1:y_max:2 ,0:x_max-1:2,1:z_max:2 ] + \n image[0:y_max-1:2,1:x_max:2 ,1:z_max:2 ] + \n image[1:y_max:2 ,1:x_max:2 ,1:z_max:2 ]\n )/8\n\n # Account for odd shaped dimensions to prevent data loss\n if z_max != image.shape[2]:\n avg_img[:int(y_max/2),:int(x_max/2),-1] = (image[0:y_max-1:2,0:x_max-1:2,-1] + \n image[1:y_max:2 ,0:x_max-1:2,-1] + \n image[0:y_max-1:2,1:x_max:2 ,-1] + \n image[1:y_max:2 ,1:x_max:2 ,-1])/4\n if y_max != image.shape[0]:\n avg_img[-1,:int(x_max/2),:int(z_max/2)] = (image[-1,0:x_max-1:2,0:z_max-1:2] + \\\n image[-1,0:x_max-1:2,1:z_max:2 ] + \\\n image[-1,1:x_max:2 ,0:z_max-1:2] + \\\n image[-1,1:x_max:2 ,1:z_max:2 ])/4\n if x_max != image.shape[1]:\n avg_img[:int(y_max/2),-1,:int(z_max/2)] = (image[0:y_max-1:2,-1,0:z_max-1:2] + \\\n image[0:y_max-1:2,-1,1:z_max:2 ] + \\\n image[1:y_max:2 ,-1,0:z_max-1:2] + \\\n image[1:y_max:2 ,-1,1:z_max:2 ])/4\n if (y_max != image.shape[0] and x_max != image.shape[1]) and (z_max != image.shape[2]):\n avg_img[-1,-1,-1] = image[-1,-1,-1]\n\n return avg_img.astype(odtype)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A timeseries is anomalous if the deviation of its latest datapoint with respect to the median is X times larger than the median of deviations.
|
def median_absolute_deviation(timeseries, end_timestamp, full_duration):
try:
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
except:
return None
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
if PANDAS_VERSION < '0.17.0':
try:
test_statistic = demedianed.iget(-1) / median_deviation
except:
return None
else:
try:
test_statistic = demedianed.iat[-1] / median_deviation
except:
return None
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
# As per https://github.com/etsy/skyline/pull/104 by @rugger74
# Although never seen this should return False if not > arbitary_value
# 20160523 @earthgecko
return False
|
[
"def errmedian(x):\n\t \n\ty = []\n\tfor i in range(1000):\n\t\txr = resample(x)\n\t\ty.append(Median(xr))\n\ty = np.array(y)\n\treturn np.std(y)",
"def nanmedian(x):\n try:\n return np.nanmedian(x)\n except:\n return np.median(x[np.isfinite(x)])",
"def std_from_mad(self, x):\n return 1.4826 * (np.median(np.abs(x - np.median(x))))",
"def medianAbsDev(dataVector):\n median = np.median(dataVector)\n absDevs = [np.abs(x-median) for x in dataVector]\n MAD = np.median(absDevs)\n \n return (MAD, median)",
"def anomaly_filter(stds, data, key):\n\n func = lambda x: abs(x - get(np.mean, data, key)[0]) > stds * get(np.std, data, key)[0]\n return data_filter(func, data, key)",
"def stddev_from_moving_average(timeseries, end_timestamp, full_duration):\n try:\n series = pandas.Series([x[1] for x in timeseries])\n if PANDAS_VERSION < '0.18.0':\n expAverage = pandas.stats.moments.ewma(series, com=50)\n stdDev = pandas.stats.moments.ewmstd(series, com=50)\n else:\n expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).mean()\n stdDev = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).std(bias=False)\n\n if PANDAS_VERSION < '0.17.0':\n return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)\n else:\n return abs(series.iat[-1] - expAverage.iat[-1]) > 3 * stdDev.iat[-1]\n# http://stackoverflow.com/questions/28757389/loc-vs-iloc-vs-ix-vs-at-vs-iat\n except:\n return None\n\n return False",
"def _moving_median(x, y, y_err, window_size):\n\n moving_med_x = np.array([]) # x in middle of bins\n moving_med_y = np.array([])\n i = 0\n while x[i] + window_size <= x[-1]:\n in_window = (x >= x[i]) & (x < x[i] + window_size)\n moving_med_x = np.append(moving_med_x, np.nanmedian(x[in_window]))\n moving_med_y = np.append(moving_med_y, np.nanmedian(y[in_window]))\n i += 1\n\n moving_med_func = interp1d(moving_med_x, moving_med_y)\n\n trim = (x >= moving_med_x[0]) & (x <= moving_med_x[-1])\n return x[trim], y[trim], y_err[trim], moving_med_func",
"def median(self,*,axis=1):\n try:\n medians = np.nanmedian(self.data, axis=axis).squeeze()\n if medians.size == 1:\n return np.asscalar(medians)\n return medians\n except IndexError:\n raise IndexError(\"Empty RegularlySampledAnalogSignalArray cannot calculate median\")",
"def median(self):\r\n\t\treturn np.median(self.dataset)",
"def high_storm_peaks(self):\n\n if (self.postprocessor.sim_storm_peaks > \n self.postprocessor.obs_storm_peaks): \n return True\n\n return False",
"def get_median(data):\n return statistics.median(data) if data else 0",
"def calc_median_absolute_deviation(x, mask=None, scale=1.4826):\n x_ma = np.ma.array(x, mask=mask, copy=True)\n return mad(x_ma.compressed(), scale=scale)",
"def median(signals, win_length):\r\n return nanfilter(signals, win_length, nanmedian)",
"def mad(data, sigma=True, axis=None):\n if axis>0:\n med = np.median(data.swapaxes(0,axis),axis=0)\n mad = np.median(np.abs(data.swapaxes(0,axis) - med),axis=0)\n else:\n med = np.median(data,axis=axis)\n mad = np.median(np.abs(data - med),axis=axis)\n if sigma==False:\n return mad\n else:\n return mad*1.4826",
"def test_scenario_a(self):\n median_filter = MedianFilter(3, 5)\n\n for scan, expected_res in zip(self.scans_a, self.res_a):\n median_filter.add_measurement(scan)\n median_filter.update()\n\n assert np.allclose(expected_res, median_filter.get_measurement()), \"Error, incorrect median found\"",
"def remove_outliers(self):\n Z = abs((self.series - self.series.median()) /\n (self.series.quantile(0.75) - self.series.quantile(0.25))) > 10\n for col, _ in self.series.iteritems():\n self.series[col][Z[col]] = np.nan",
"def median(data_set):\n data_set_length = len(data_set)\n sorted_data_set = sorted(data_set)\n midpoint = data_set_length // 2\n if data_set_length % 2:\n return sorted_data_set[midpoint]\n else:\n hi = sorted_data_set[midpoint]\n lo = sorted_data_set[midpoint - 1]\n return (hi + lo) / 2",
"def remove_outliers(series, stddev):\n return series[(series - series.mean()).abs() < stddev * series.std()]",
"def median_despike(y, siz, tol):\n assert siz % 2 == 1, \"Median filter length must be odd.\"\n y = np.asanyarray(y)\n k2 = (siz - 1) // 2\n io = 0\n ie = k2+1\n med = np.zeros_like(np.asarray(y))\n med[:, io] = np.median(y[:, io:ie], axis = 1) \n for n in range(1, ie):\n io = io + 1\n ie = ie + 1\n med[:, io] = np.median(y[:, (io-n):ie], axis = 1)\n \n for n in range(1, y.shape[1]-siz + 1):\n io = io + 1\n ie = ie + 1\n med[:, io] = np.median(y[:, (io-k2):ie], axis = 1)\n \n for n in range(1, k2+1):\n io = io + 1\n ie = ie + 1\n med[:, io] = np.median(y[:, (io-k2):(ie-n)], axis = 1)\n \n yn = np.ma.masked_where( np.abs(y - med) >= tol, y )\n return yn"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calcuate the simple average over 60 datapoints (maybe one hour), FULL_DURATION seconds ago. A timeseries is anomalous if the average of the last three datapoints are outside of three standard deviations of this value.
|
def first_hour_average(timeseries, end_timestamp, full_duration):
try:
int_end_timestamp = int(timeseries[-1][0])
int_start_timestamp = int(timeseries[0][0])
int_full_duration = int_end_timestamp - int_start_timestamp
# Determine data resolution
# last_hour_threshold = int_end_timestamp - (int_full_duration - 3600)
int_second_last_end_timestamp = int(timeseries[-2][0])
resolution = int_end_timestamp - int_second_last_end_timestamp
# @modified 20160814 - pyflaked
# ten_data_point_seconds = resolution * 10
sixty_data_point_seconds = resolution * 60
sixty_datapoints_ago = int_end_timestamp - sixty_data_point_seconds
# @modified 20221127 - Task #4738: Allow first_hour_average to handle different resolution
# last_hour_threshold = int_end_timestamp - (int_full_duration - sixty_datapoints_ago)
last_hour_threshold = int_end_timestamp - int_full_duration
# series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])
last_hour_threshold_end = last_hour_threshold + 3600
series = pandas.Series([x[1] for x in timeseries if x[0] > last_hour_threshold and x[0] < last_hour_threshold_end])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries, end_timestamp, full_duration)
return abs(t - mean) > 3 * stdDev
except:
return None
return False
|
[
"def tail_avg(timeseries, end_timestamp, full_duration):\n try:\n t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3\n return t\n except IndexError:\n return timeseries[-1][1]",
"def _get_duration_average(test_list):\n return arrow.get(sum([test._duration for test in test_list])/len(test_list)).format('HH:mm:ss') if test_list else NA",
"def mean_subtraction_cumulation(timeseries, end_timestamp, full_duration):\n\n try:\n series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])\n series = series - series[0:len(series) - 1].mean()\n stdDev = series[0:len(series) - 1].std()\n # @modified 20160814 - pyflaked\n # if PANDAS_VERSION < '0.18.0':\n # expAverage = pandas.stats.moments.ewma(series, com=15)\n # else:\n # expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=15).mean()\n\n if PANDAS_VERSION < '0.17.0':\n return abs(series.iget(-1)) > 3 * stdDev\n else:\n return abs(series.iat[-1]) > 3 * stdDev\n except:\n return None\n\n return False",
"def average_duration(self):\n duration_sum = 0\n for timer in self.timers:\n duration_sum = duration_sum + timer.duration\n return duration_sum / len(self.timers)",
"def compute_mean_wave_period(wave_periods):\n if len(wave_periods) < 5:\n return np.nan\n\n return np.mean(wave_periods)",
"def get_mean_ms(self, tag):\n return numpy.average(self._tags[tag][_DURATION])",
"def avg_price_time_length_batch_15_1_second():\n avg_price = time_length_batch(15, seconds=1)['price'].mean()\n i8.critical(all_dfs['time_length_batch', 15, 'seconds', 1].dataframe)\n i8.critical('The average price of all events in time_length_batch(15, seconds=1): ' + str(avg_price))",
"def stddev_from_moving_average(timeseries, end_timestamp, full_duration):\n try:\n series = pandas.Series([x[1] for x in timeseries])\n if PANDAS_VERSION < '0.18.0':\n expAverage = pandas.stats.moments.ewma(series, com=50)\n stdDev = pandas.stats.moments.ewmstd(series, com=50)\n else:\n expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).mean()\n stdDev = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).std(bias=False)\n\n if PANDAS_VERSION < '0.17.0':\n return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)\n else:\n return abs(series.iat[-1] - expAverage.iat[-1]) > 3 * stdDev.iat[-1]\n# http://stackoverflow.com/questions/28757389/loc-vs-iloc-vs-ix-vs-at-vs-iat\n except:\n return None\n\n return False",
"def success_rate(self):\n return mean_with_default(self._past_window, 0.)",
"def average_time_per_setpoint(self):\n all_setpoint_times = []\n current_setpoint_time = 0\n for i, row in enumerate(self.log_rows[:-1]): # ignore the last entry, since we can't know how long that one took (there is no next row to check the timestamp of)\n current_timestamp = row[self.std_col_map[Headers.TIMESTAMP]]\n next_timestamp = self.log_rows[i+1][self.std_col_map[Headers.TIMESTAMP]]\n current_setpoint_time += (next_timestamp - current_timestamp).total_seconds()\n if row[self.std_col_map[Headers.RESULT]]:\n all_setpoint_times.append(current_setpoint_time)\n current_setpoint_time = 0\n return np.average(all_setpoint_times)",
"def get_avg(self):\n\t\treturn self.sum / max(len(self.window), 1)",
"def average_time_in_system(self):\n raise NotImplementedError()",
"def measure_average():\r\n \r\n distance1=measure()\r\n time.sleep(0.001)\r\n distance2=measure()\r\n time.sleep(0.001)\r\n distance3=measure()\r\n distance = distance1 + distance2 + distance3\r\n distance = distance / 3\r\n return distance",
"def get_mean_ms_per_ts(self, tag):\n time_step_ms = FecDataView.get_simulation_time_step_ms()\n n_points = math.ceil(\n self._max_time / time_step_ms)\n endpoint = n_points * time_step_ms\n bins = numpy.linspace(0, endpoint, n_points + 1)\n mean_per_ts = scipy.stats.binned_statistic(\n self._tags[tag][_START_TIME], self._tags[tag][_DURATION],\n \"mean\", bins).statistic\n mean_per_ts[numpy.isnan(mean_per_ts)] = 0\n return numpy.average(\n mean_per_ts[numpy.logical_not(numpy.isnan(mean_per_ts))])",
"def act_time_average(self):\n return self.time_average(self.elapsed_data['elapsed_time'], self.elapsed_data['servers'])",
"def simple_average(self, lo_hr=50, hi_hr=200):\n ms = np.mean(self.signals, axis=1)\n\n if len(ms) < 30*20:\n warnings.warn(\"Your video is shorter than 30 seconds. Resolution of HR detection may be too low\")\n\n freq, power = scipy.signal.welch(ms - ms[0], fs=30, nperseg=len(ms))\n\n # poor man's filter:\n mask = ((freq * 60 > lo_hr) & (freq * 60 < hi_hr))\n power[~mask] = 0 \n hr = freq[np.argmax(power)]*60\n\n return hr",
"def average_duration(self):\r\n sum = 0\r\n for melody in self.__melodies:\r\n sum += melody.get_duration()\r\n return sum / len(self.__melodies)",
"def test_avg_second(self) -> None:\n for pause_second in (0.1, 0.15):\n timer = Timer()\n for t in (pause_second,) * 10:\n if timer.is_paused():\n timer.resume()\n time.sleep(t)\n timer.pause()\n self.assertTrue(\n math.isclose(pause_second, timer.avg_seconds(), rel_tol=1e-1),\n msg=\"{}: {}\".format(pause_second, timer.avg_seconds()),\n )",
"def avg_price_first_two_seconds_observer():\n df = first_time(seconds=2)\n avg_price = df['price'].mean()\n i4.critical(df)\n i4.critical('The average price of all events in the first two seconds after statement_start is: ' + str(avg_price))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A timeseries is anomalous if the absolute value of the average of the latest three datapoint minus the moving average is greater than three standard deviations of the moving average. This is better for finding anomalies with respect to the short term trends.
|
def stddev_from_moving_average(timeseries, end_timestamp, full_duration):
try:
series = pandas.Series([x[1] for x in timeseries])
if PANDAS_VERSION < '0.18.0':
expAverage = pandas.stats.moments.ewma(series, com=50)
stdDev = pandas.stats.moments.ewmstd(series, com=50)
else:
expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).mean()
stdDev = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).std(bias=False)
if PANDAS_VERSION < '0.17.0':
return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)
else:
return abs(series.iat[-1] - expAverage.iat[-1]) > 3 * stdDev.iat[-1]
# http://stackoverflow.com/questions/28757389/loc-vs-iloc-vs-ix-vs-at-vs-iat
except:
return None
return False
|
[
"def anomaly_filter(self, df):\n # calculate forward means\n df['30_PERIOD_FWD_MEAN'] = df[self.endog].rolling(30, min_periods=0).mean().tolist()\n df['30_PERIOD_FWD_MEAN'].fillna(inplace=True, method='bfill')\n df['30_PERIOD_FWD_MEAN'][1:] = df['30_PERIOD_FWD_MEAN'][:-1]\n\n # calculate reverse means\n reverse_mean = df[self.endog].sort_index(ascending=False).rolling(30, min_periods=0).mean().tolist()\n reverse_mean.reverse()\n df['30_PERIOD_BWD_MEAN'] = reverse_mean\n df['30_PERIOD_BWD_MEAN'].fillna(inplace=True, method='ffill')\n df['30_PERIOD_BWD_MEAN'][:-1] = df['30_PERIOD_BWD_MEAN'][1:]\n\n\n df['FWD_STD'] = (df[self.endog] - df['30_PERIOD_FWD_MEAN'])**2\n df['FWD_STD'] = np.sqrt(df['FWD_STD'].rolling(30, min_periods=0).mean())\n df['FWD_STD'].fillna(inplace=True, method='bfill')\n df['FWD_STD'][1:] = df['FWD_STD'][:-1]\n\n df['BWD_STD'] = (df[self.endog] - df['30_PERIOD_BWD_MEAN'])**2\n bkwd_std = np.sqrt(df['BWD_STD'].sort_index(ascending=False).rolling(30, min_periods=0).mean()).tolist()\n bkwd_std.reverse()\n df['BWD_STD'] = bkwd_std\n df['BWD_STD'].fillna(inplace=True, method='bfill')\n df['BWD_STD'][1:] = df['BWD_STD'][:-1]\n\n df['FILTER_VARIANCE'] = np.where(df['FWD_STD'] < df['BWD_STD'], df['BWD_STD'], df['FWD_STD'])\n\n df['HIGH_FILTER'] = df['30_PERIOD_FWD_MEAN']+df['FILTER_VARIANCE']*3\n df['LOW_FILTER'] = df['30_PERIOD_FWD_MEAN']-df['FILTER_VARIANCE']*3\n\n df[self.endog] = np.where(df[self.endog] > df['HIGH_FILTER'], df['HIGH_FILTER'], df[self.endog])\n df[self.endog] = np.where(df[self.endog] < df['LOW_FILTER'], df['LOW_FILTER'], df[self.endog])\n\n cleaned_timeseries = df[[self.date_header, self.endog]]\n\n return cleaned_timeseries",
"def anomaly_filter(stds, data, key):\n\n func = lambda x: abs(x - get(np.mean, data, key)[0]) > stds * get(np.std, data, key)[0]\n return data_filter(func, data, key)",
"def high_storm_peaks(self):\n\n if (self.postprocessor.sim_storm_peaks > \n self.postprocessor.obs_storm_peaks): \n return True\n\n return False",
"def checkLatestAnomaly(df):\n anomalies = df[df[\"anomaly\"] == 15]\n if anomalies.shape[0] > 0:\n lastAnomalyRow = anomalies.iloc[-1]\n anomalyTime = lastAnomalyRow[\"ds\"]\n higher = lastAnomalyRow[\"y\"] > lastAnomalyRow[\"upper\"]\n\n per = 0\n denom = lastAnomalyRow[\"upper\"] if higher else lastAnomalyRow[\"lower\"]\n if denom > 0:\n per = int(100 * (abs(lastAnomalyRow[\"y\"] - denom) / denom))\n\n return {\n \"highOrLow\": \"high\" if higher else \"low\",\n \"value\": float(lastAnomalyRow[\"y\"]),\n \"percent\": per,\n \"anomalyTimeISO\": dp.parse(anomalyTime).isoformat(),\n \"anomalyTime\": dp.parse(anomalyTime).timestamp() * 1000,\n }\n return {}",
"def grubbs_test(timeseries):\n series = scipy.array([x for x in timeseries])\n stdDev = np.std(series) \n mean = np.mean(series)\n tail_average = tail_avg(timeseries)\n z_score = (tail_average - mean) / stdDev\n return z_score",
"def low_storm_peaks(self):\n\n if (self.postprocessor.sim_storm_peaks < \n self.postprocessor.obs_storm_peaks): \n return True\n\n return False",
"def climate_anomaly(x, t):\n\t# climate normal period := January 1971 to December 1980\n\t# Seasonally normalized values w.r.t 30 years running mean \n\t# +1 is added to d2.year to include the last year in the analysis \n\td1 = dt.datetime(850, 1, 1)\n\td2 = dt.datetime(1850, 12, 31)\n\tx_anom = np.zeros((x.shape[0]))\n\tfor yy in range(d1.year,d2.year+1):\n\t\tlyear = yy-15\n\t\tuyear = yy+15\n\t\tif yy < d1.year+15:\n\t\t\tlyear = d1.year\n\t\t\tuyear = d1.year+30\n\t\tif yy > d2.year+1-15:\n\t\t\tlyear = d2.year+1-30\n\t\t\tuyear = d2.year\n\t\tidx1 = lyear - d1.year\n\t\tid = yy - d1.year\n\t\t#print(\"for year\",yy,\"lyear\",lyear,\"uyear\",uyear,\"idx1_m\")\n\t\tfor mm in range(12):\n\t\t\t#print(\"index\",id*12+mm,\"start_month\",idx1*12,\"end_month\",idx1*12+12*30)\n\t\t\tidx = np.arange(idx1*12+mm,idx1*12+12*30+mm,12)\t\n\t\t\tx_anom[id*12+mm] = x[id*12+mm] - x[idx].mean()\n\t\t\t#x_anom[id*12+mm] = (x[id*12+mm] - x[idx].mean())/x[idx].std()\n\t\t\t#print(\"Mean\",x[idx].mean()) \n\t\t\t#print(\"Std\",x[idx].std()) \n\t#print(x_norm) \n\t#print(x_norm.shape) \n\t#print(idx.shape) \n\t#print(x_norm.min()) \n\t#print(x_norm.max()) \n\t\n\treturn _quantile_normalization(x_anom, mode=\"mean\")\n\t#return x_norm",
"def compute_anomaly(da, time_group='time.month'):\n mthly_vals = da.groupby(time_group).mean('time')\n da = da.groupby(time_group) - mthly_vals\n\n return da",
"def explain_anomalies_rolling_std(y, window_size, sigma=1.0):\n avg = moving_average(y, window_size)\n avg_list = avg.tolist()\n residual = y - avg\n # Calculate the variation in the distribution of the residual\n testing_std = pd.rolling_std(residual, window_size)\n testing_std_as_df = pd.DataFrame(testing_std)\n rolling_std = testing_std_as_df.replace(np.nan,\n testing_std_as_df.ix[window_size - 1]).round(3).iloc[:, 0].tolist()\n std = np.std(residual)\n return {'stationary standard_deviation': round(std, 3),\n 'anomalies_dict': collections.OrderedDict([(index, y_i)\n for index, y_i, avg_i, rs_i in zip(count(),\n y, avg_list, rolling_std)\n if (y_i > avg_i + (sigma * rs_i)) | (\n y_i < avg_i - (sigma * rs_i))])}",
"def repeated_measurement_mean_and_error(values):\n array = sp.array(values)\n mean = array.mean()\n single_value_error = array.std(ddof=1)\n mean_error = single_value_error / sp.sqrt(len(array))\n\n return mean, mean_error",
"def anomalise(self, base_period = None, ts = None):\n\n delta = self.time[1] - self.time[0]\n seasonal_mean = np.zeros_like(self.data) if ts is None else np.zeros_like(ts)\n\n if base_period is None:\n ndx = np.arange(self.time.shape[0])\n else:\n ndx = np.logical_and(self.time >= base_period[0].toordinal(), self.time <= base_period[1].toordinal())\n d = self.data.copy() if ts is None else ts\n t = self.time.copy()\n self.time = self.time[ndx]\n\n if delta == 1:\n # daily data\n day_avg, mon_avg, _ = self.extract_day_month_year()\n self.time = t.copy()\n day_data, mon_data, _ = self.extract_day_month_year()\n d = d[ndx, ...]\n for mi in range(1,13):\n mon_mask_avg = (mon_avg == mi)\n mon_mask_data = (mon_data == mi)\n for di in range(1,32):\n sel_avg = np.logical_and(mon_mask_avg, day_avg == di)\n sel_data = np.logical_and(mon_mask_data, day_data == di)\n if np.sum(sel_avg) == 0:\n continue\n seasonal_mean[sel_data, ...] = np.nanmean(d[sel_avg, ...], axis = 0)\n if ts is None:\n self.data[sel_data, ...] -= seasonal_mean[sel_data, ...]\n else:\n ts[sel_data, ...] -= seasonal_mean[sel_data, ...]\n elif abs(delta - 30) < 3.0:\n # monthly data\n _, mon_avg, _ = self.extract_day_month_year()\n self.time = t.copy()\n _, mon_data, _ = self.extract_day_month_year()\n d = d[ndx, ...]\n for mi in range(1,13):\n sel_avg = (mon_avg == mi)\n sel_data = (mon_data == mi)\n if np.sum(sel_avg) == 0:\n continue\n seasonal_mean[sel_data, ...] = np.nanmean(d[sel_avg, ...], axis = 0)\n if ts is None:\n self.data[sel_data, ...] -= seasonal_mean[sel_data, ...]\n else:\n ts[sel_data, ...] -= seasonal_mean[sel_data, ...]\n else:\n raise Exception('Unknown temporal sampling in the field.')\n\n return seasonal_mean",
"def mean_anomaly(self, t):\n \n return ((t-self.t_peri) * self.mean_motion) % (2.*np.pi)",
"def average_absolute(data):\n return average(absolute(data))",
"def test_mean_to_true_anomaly(self):\n\n for i in np.arange(0.1, np.pi * 2, 0.1):\n m_test = i\n source = KepOrbElem()\n source.e = 0.01\n source.a = 7000\n source.i = 0.1\n source.w = 0.1\n source.O = 0.2\n source.m = m_test\n\n v_calc = source.v\n\n # reset m\n source.m = 0.\n self.assertNotAlmostEqual(m_test, source.m)\n\n source.v = v_calc\n\n self.assertAlmostEqual(m_test, source.m, places=10)",
"def low_pass_filter_anomaly_detection(df,\n column_name,\n number_of_stdevs_away_from_mean):\n #60-day rolling average\n df[column_name+'_Rolling_Average']=df[column_name].rolling(window=60, center=True).mean()\n #60-day standard deviation\n df[column_name+'_Rolling_StDev']=df[column_name].rolling(window=60, center=True).std()\n #Detect anomalies by determining how far away from the mean (in terms of standard deviation)\n #each data point is\n df[column_name+'_Low_Pass_Filter_Anomaly']=(abs(df[column_name]-df[\n column_name+'_Rolling_Average'])>(\n number_of_stdevs_away_from_mean*df[\n column_name+'_Rolling_StDev']))\n return df",
"def test_true_to_mean_anomaly(self):\n\n for i in np.arange(0.1,np.pi*2, 0.1):\n v_test = i\n source = KepOrbElem()\n source.e = 0.01\n source.a = 7000\n source.i = 0.1\n source.w = 0.1\n source.O = 0.2\n source.v = v_test\n\n mean = source.m\n\n # reset v\n source.v = 0.\n self.assertNotAlmostEqual(v_test, source.v)\n\n source.m = mean\n\n self.assertAlmostEqual(v_test, source.v, places=10)",
"def mean_subtraction_cumulation(timeseries, end_timestamp, full_duration):\n\n try:\n series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])\n series = series - series[0:len(series) - 1].mean()\n stdDev = series[0:len(series) - 1].std()\n # @modified 20160814 - pyflaked\n # if PANDAS_VERSION < '0.18.0':\n # expAverage = pandas.stats.moments.ewma(series, com=15)\n # else:\n # expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=15).mean()\n\n if PANDAS_VERSION < '0.17.0':\n return abs(series.iget(-1)) > 3 * stdDev\n else:\n return abs(series.iat[-1]) > 3 * stdDev\n except:\n return None\n\n return False",
"def detect_drop_off_cliff(timeseries, end_timestamp, full_duration):\n\n try:\n if len(timeseries) < 21:\n return False\n\n int_end_timestamp = int(timeseries[-1][0])\n # Determine resolution of the data set\n int_second_last_end_timestamp = int(timeseries[-2][0])\n resolution = int_end_timestamp - int_second_last_end_timestamp\n ten_data_point_seconds = resolution * 10\n ten_datapoints_ago = int_end_timestamp - ten_data_point_seconds\n\n # @modified 20210420 - Support #4026: Change from scipy array to numpy array\n # Deprecation of scipy.array\n # ten_datapoint_array = scipy.array([x[1] for x in timeseries if x[0] <= int_end_timestamp and x[0] > ten_datapoints_ago])\n ten_datapoint_array = np.array([x[1] for x in timeseries if x[0] <= int_end_timestamp and x[0] > ten_datapoints_ago])\n\n ten_datapoint_array_len = len(ten_datapoint_array)\n if ten_datapoint_array_len > 3:\n # DO NOT handle if negative integers in range, where is the bottom of\n # of the cliff if a range goes negative? The maths does not work either\n ten_datapoint_min_value = np.amin(ten_datapoint_array)\n if ten_datapoint_min_value < 0:\n return False\n ten_datapoint_max_value = np.amax(ten_datapoint_array)\n if ten_datapoint_max_value < 10:\n return False\n ten_datapoint_array_sum = np.sum(ten_datapoint_array)\n ten_datapoint_value = int(ten_datapoint_array[-1])\n ten_datapoint_average = ten_datapoint_array_sum / ten_datapoint_array_len\n ten_datapoint_value = int(ten_datapoint_array[-1])\n ten_datapoint_max_value = np.amax(ten_datapoint_array)\n if ten_datapoint_max_value == 0:\n return False\n if ten_datapoint_max_value < 101:\n trigger = 15\n if ten_datapoint_max_value < 20:\n trigger = ten_datapoint_average / 2\n if ten_datapoint_max_value < 1:\n trigger = 0.1\n if ten_datapoint_max_value > 100:\n trigger = 100\n if ten_datapoint_value == 0:\n # Cannot divide by 0, so set to 0.1 to prevent error\n ten_datapoint_value = 0.1\n if ten_datapoint_value == 1:\n trigger = 1\n if ten_datapoint_value == 1 and ten_datapoint_max_value < 10:\n trigger = 0.1\n if ten_datapoint_value == 0.1 and ten_datapoint_average < 1 and ten_datapoint_array_sum < 7:\n trigger = 7\n # Filter low rate and variable between 0 and 100 metrics\n if ten_datapoint_value <= 1 and ten_datapoint_array_sum < 100 and ten_datapoint_array_sum > 1:\n\n # @modified 20210420 - Support #4026: Change from scipy array to numpy array\n # Deprecation of scipy.array\n # all_datapoints_array = scipy.array([x[1] for x in timeseries])\n all_datapoints_array = np.array([x[1] for x in timeseries])\n\n all_datapoints_max_value = np.amax(all_datapoints_array)\n if all_datapoints_max_value < 100:\n # print \"max_value for all datapoints at - \" + str(int_end_timestamp) + \" - \" + str(all_datapoints_max_value)\n return False\n ten_datapoint_result = ten_datapoint_average / ten_datapoint_value\n if int(ten_datapoint_result) > trigger:\n return True\n except:\n return None\n\n return False",
"def detect_anomaly(self, dataframe, date_col, value_col, outliers_fraction=0.05):\n \n self.get_columns(dataframe, date_col, value_col)\n df=dataframe\n data = df[['time_epoch', 'value']]\n min_max_scaler = StandardScaler()\n np_scaled = min_max_scaler.fit_transform(data)\n data = pd.DataFrame(np_scaled)\n # train isolation forest \n model = IsolationForest(contamination = outliers_fraction)\n model.fit(data)\n # add the data to the main \n df['anomaly'] = pd.Series(model.predict(data))\n df['anomaly'] = df['anomaly'].map( {1: 0, -1: 1} )\n\n normal=df[df['anomaly']==0]\n anomaly=df[df['anomaly']==1]\n print(\"Normal: \", len(normal))\n print(\"Anomaly: \", len(anomaly))\n\n self.ran_detect_anomaly = True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A timeseries is anomalous if the value of the next datapoint in the series is farther than three standard deviations out in cumulative terms after subtracting the mean from each data point.
|
def mean_subtraction_cumulation(timeseries, end_timestamp, full_duration):
try:
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
# @modified 20160814 - pyflaked
# if PANDAS_VERSION < '0.18.0':
# expAverage = pandas.stats.moments.ewma(series, com=15)
# else:
# expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=15).mean()
if PANDAS_VERSION < '0.17.0':
return abs(series.iget(-1)) > 3 * stdDev
else:
return abs(series.iat[-1]) > 3 * stdDev
except:
return None
return False
|
[
"def demean(data):\n return data - data.mean()",
"def robustmean(arr):\n\t\n\t# First pass discarding points >3 sigma above mean\n\tmean = arr.mean()\n\tsd = arr.std()\n\tthresh = mean + 3.0*sd\n\tidx = numpy.where(arr < thresh)\n\tnewarr = arr[idx]\n\t\n\tif len(newarr) == 0:\n\t\t# Warning, all points discarded. Just return array mean\n\t\t_MATHUTIL_LOG.warning(\"All points discarded!, %f, %f\", mean, sd)\n\t\tfinalmean = mean\n\telse:\n\t\t# Second pass discarding points >3 sigma above mean\n\t\tnewmean = newarr.mean()\n\t\tnewsd = newarr.std()\n\t\tnewthresh = newmean+3.0*newsd\n\t\tnewidx = numpy.where(newarr < newthresh)\n\t\tfinalarr = newarr[newidx]\n\t\tif len(finalarr) == 0:\n\t\t\tfinalmean = newmean\n\t\telse:\n\t\t\t# Final mean of good points\n\t\t\tfinalmean = finalarr.mean()\n\n\treturn finalmean",
"def climate_anomaly(x, t):\n\t# climate normal period := January 1971 to December 1980\n\t# Seasonally normalized values w.r.t 30 years running mean \n\t# +1 is added to d2.year to include the last year in the analysis \n\td1 = dt.datetime(850, 1, 1)\n\td2 = dt.datetime(1850, 12, 31)\n\tx_anom = np.zeros((x.shape[0]))\n\tfor yy in range(d1.year,d2.year+1):\n\t\tlyear = yy-15\n\t\tuyear = yy+15\n\t\tif yy < d1.year+15:\n\t\t\tlyear = d1.year\n\t\t\tuyear = d1.year+30\n\t\tif yy > d2.year+1-15:\n\t\t\tlyear = d2.year+1-30\n\t\t\tuyear = d2.year\n\t\tidx1 = lyear - d1.year\n\t\tid = yy - d1.year\n\t\t#print(\"for year\",yy,\"lyear\",lyear,\"uyear\",uyear,\"idx1_m\")\n\t\tfor mm in range(12):\n\t\t\t#print(\"index\",id*12+mm,\"start_month\",idx1*12,\"end_month\",idx1*12+12*30)\n\t\t\tidx = np.arange(idx1*12+mm,idx1*12+12*30+mm,12)\t\n\t\t\tx_anom[id*12+mm] = x[id*12+mm] - x[idx].mean()\n\t\t\t#x_anom[id*12+mm] = (x[id*12+mm] - x[idx].mean())/x[idx].std()\n\t\t\t#print(\"Mean\",x[idx].mean()) \n\t\t\t#print(\"Std\",x[idx].std()) \n\t#print(x_norm) \n\t#print(x_norm.shape) \n\t#print(idx.shape) \n\t#print(x_norm.min()) \n\t#print(x_norm.max()) \n\t\n\treturn _quantile_normalization(x_anom, mode=\"mean\")\n\t#return x_norm",
"def stddev_from_moving_average(timeseries, end_timestamp, full_duration):\n try:\n series = pandas.Series([x[1] for x in timeseries])\n if PANDAS_VERSION < '0.18.0':\n expAverage = pandas.stats.moments.ewma(series, com=50)\n stdDev = pandas.stats.moments.ewmstd(series, com=50)\n else:\n expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).mean()\n stdDev = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).std(bias=False)\n\n if PANDAS_VERSION < '0.17.0':\n return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)\n else:\n return abs(series.iat[-1] - expAverage.iat[-1]) > 3 * stdDev.iat[-1]\n# http://stackoverflow.com/questions/28757389/loc-vs-iloc-vs-ix-vs-at-vs-iat\n except:\n return None\n\n return False",
"def calc_mean_std(self):\n\n # get ob_next sets from memory\n memory_len = len(self._memory)\n all_obs_next = []\n col_len = len(self._memory[memory_len - 1].obs_nex)\n \n for i in range(memory_len):\n all_obs_next.append(self._memory[i].obs_nex)\n \n # cacualte average and standard diviation for each features \n return (np.mean(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1), \n np.std(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1))",
"def ess(samples: np.ndarray,\n mean: t.Union[float, int],\n var: t.Union[float, int],\n verbose: bool = False) -> float:\n lag_size = 1\n stop_condition = False\n autocorr = [] # type: t.List[float]\n\n if not isinstance(samples, np.ndarray):\n samples = np.array(samples)\n\n shifted_samples = samples - mean\n\n while not stop_condition:\n autocorr.append(\n np.mean([\n shifted_samples[i] * shifted_samples[i + lag_size]\n for i in np.arange(samples.size - lag_size)\n ]))\n\n if lag_size >= 3 and lag_size % 2 == 1:\n stop_condition = (\n autocorr[lag_size - 1] + autocorr[lag_size - 2]) < 0\n\n lag_size += 1\n\n ess_stat = samples.size / (1 + 2 / var * np.sum(autocorr[:-2]))\n\n if verbose:\n print(\"Chain size: {} - ESS: {}\".format(samples.size, ess_stat))\n print(\"ESS / (chain size):\", ess_stat / samples.size)\n print(\"Last lag size effectively used:\", lag_size - 2)\n\n return ess_stat",
"def anomaly_filter(self, df):\n # calculate forward means\n df['30_PERIOD_FWD_MEAN'] = df[self.endog].rolling(30, min_periods=0).mean().tolist()\n df['30_PERIOD_FWD_MEAN'].fillna(inplace=True, method='bfill')\n df['30_PERIOD_FWD_MEAN'][1:] = df['30_PERIOD_FWD_MEAN'][:-1]\n\n # calculate reverse means\n reverse_mean = df[self.endog].sort_index(ascending=False).rolling(30, min_periods=0).mean().tolist()\n reverse_mean.reverse()\n df['30_PERIOD_BWD_MEAN'] = reverse_mean\n df['30_PERIOD_BWD_MEAN'].fillna(inplace=True, method='ffill')\n df['30_PERIOD_BWD_MEAN'][:-1] = df['30_PERIOD_BWD_MEAN'][1:]\n\n\n df['FWD_STD'] = (df[self.endog] - df['30_PERIOD_FWD_MEAN'])**2\n df['FWD_STD'] = np.sqrt(df['FWD_STD'].rolling(30, min_periods=0).mean())\n df['FWD_STD'].fillna(inplace=True, method='bfill')\n df['FWD_STD'][1:] = df['FWD_STD'][:-1]\n\n df['BWD_STD'] = (df[self.endog] - df['30_PERIOD_BWD_MEAN'])**2\n bkwd_std = np.sqrt(df['BWD_STD'].sort_index(ascending=False).rolling(30, min_periods=0).mean()).tolist()\n bkwd_std.reverse()\n df['BWD_STD'] = bkwd_std\n df['BWD_STD'].fillna(inplace=True, method='bfill')\n df['BWD_STD'][1:] = df['BWD_STD'][:-1]\n\n df['FILTER_VARIANCE'] = np.where(df['FWD_STD'] < df['BWD_STD'], df['BWD_STD'], df['FWD_STD'])\n\n df['HIGH_FILTER'] = df['30_PERIOD_FWD_MEAN']+df['FILTER_VARIANCE']*3\n df['LOW_FILTER'] = df['30_PERIOD_FWD_MEAN']-df['FILTER_VARIANCE']*3\n\n df[self.endog] = np.where(df[self.endog] > df['HIGH_FILTER'], df['HIGH_FILTER'], df[self.endog])\n df[self.endog] = np.where(df[self.endog] < df['LOW_FILTER'], df['LOW_FILTER'], df[self.endog])\n\n cleaned_timeseries = df[[self.date_header, self.endog]]\n\n return cleaned_timeseries",
"def errmedian(x):\n\t \n\ty = []\n\tfor i in range(1000):\n\t\txr = resample(x)\n\t\ty.append(Median(xr))\n\ty = np.array(y)\n\treturn np.std(y)",
"def repeated_measurement_mean_and_error(values):\n array = sp.array(values)\n mean = array.mean()\n single_value_error = array.std(ddof=1)\n mean_error = single_value_error / sp.sqrt(len(array))\n\n return mean, mean_error",
"def compute_anomaly(da, time_group='time.month'):\n mthly_vals = da.groupby(time_group).mean('time')\n da = da.groupby(time_group) - mthly_vals\n\n return da",
"def remove_outliers(this_series):\n no_outlier_mask = np.abs(this_series-this_series.mean()) <= (3*this_series.std())\n return this_series[no_outlier_mask]",
"def running_mean_variance():\n mean = (yield)\n k = 1\n s = 0 * mean # to ensure the type and shape is correct\n ret = yield (mean, s) # special case of first value, cannot estimate much\n if ret is not None:\n raise ValueError(_invalid_usage_msg)\n while True:\n k += 1\n value = (yield)\n delta = value - mean\n mean += delta / k\n delta2 = value - mean\n s += delta2 * delta\n ret = yield (mean, s / (k - 1))\n if ret is not None:\n raise ValueError(_invalid_usage_msg)",
"def mad(data):\n return sum(abs(mean(data)-x) for x in data)/len(data)",
"def _std(\n cls,\n count: pd.Series,\n mean: pd.Series,\n second_moment: pd.Series\n ) -> pd.Series:\n coef = count / (count - cls.ddof).clip(lower=EPSILON)\n variation = coef * (second_moment - np.square(mean))\n return np.sqrt(variation)",
"def test_nilearn_standardize_false():\n # Simulate data\n img, mask_conf, mask_rand, X = _simu_img(demean=True)\n\n # Check that most variance is removed\n # in voxels composed of pure confounds\n tseries_std = _tseries_std(img, mask_conf, X, False)\n assert np.mean(tseries_std < 0.0001)\n\n # Check that most variance is preserved\n # in voxels composed of random noise\n tseries_std = _tseries_std(img, mask_rand, X, False)\n assert np.mean(tseries_std > 0.9)",
"def anomaly_filter(stds, data, key):\n\n func = lambda x: abs(x - get(np.mean, data, key)[0]) > stds * get(np.std, data, key)[0]\n return data_filter(func, data, key)",
"def grubbs_test(timeseries):\n series = scipy.array([x for x in timeseries])\n stdDev = np.std(series) \n mean = np.mean(series)\n tail_average = tail_avg(timeseries)\n z_score = (tail_average - mean) / stdDev\n return z_score",
"def test_true_to_mean_anomaly(self):\n\n for i in np.arange(0.1,np.pi*2, 0.1):\n v_test = i\n source = KepOrbElem()\n source.e = 0.01\n source.a = 7000\n source.i = 0.1\n source.w = 0.1\n source.O = 0.2\n source.v = v_test\n\n mean = source.m\n\n # reset v\n source.v = 0.\n self.assertNotAlmostEqual(v_test, source.v)\n\n source.m = mean\n\n self.assertAlmostEqual(v_test, source.v, places=10)",
"def remove_outliers(series, stddev):\n return series[(series - series.mean()).abs() < stddev * series.std()]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A timeseries is anomalous if 2 sample KolmogorovSmirnov test indicates that data distribution for last 10 datapoints (might be 10 minutes) is different from the last 60 datapoints (might be an hour). It produces false positives on nonstationary series so Augmented DickeyFuller test applied to check for stationarity.
|
def ks_test(timeseries, end_timestamp, full_duration):
try:
int_end_timestamp = int(timeseries[-1][0])
# @modified 20160814 - pyflaked
# hour_ago = int_end_timestamp - 3600
# ten_minutes_ago = int_end_timestamp - 600
# Determine resolution of the data set
# reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
# probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
int_second_last_end_timestamp = int(timeseries[-2][0])
resolution = int_end_timestamp - int_second_last_end_timestamp
ten_data_point_seconds = resolution * 10
ten_datapoints_ago = int_end_timestamp - ten_data_point_seconds
sixty_data_point_seconds = resolution * 60
sixty_datapoints_ago = int_end_timestamp - sixty_data_point_seconds
# @modified 20210420 - Support #4026: Change from scipy array to numpy array
# Deprecation of scipy.array
# reference = scipy.array([x[1] for x in timeseries if x[0] >= sixty_datapoints_ago and x[0] < ten_datapoints_ago])
# probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_datapoints_ago])
reference = np.array([x[1] for x in timeseries if x[0] >= sixty_datapoints_ago and x[0] < ten_datapoints_ago])
probe = np.array([x[1] for x in timeseries if x[0] >= ten_datapoints_ago])
if reference.size < 20 or probe.size < 20:
return False
ks_d, ks_p_value = scipy.stats.ks_2samp(reference, probe)
if ks_p_value < 0.05 and ks_d > 0.5:
adf = sm.tsa.stattools.adfuller(reference, 10)
if adf[1] < 0.05:
return True
except:
return None
return False
|
[
"def test_time_series(self):\n\n assert False",
"def test_same_verifs_valid_time_no_nan(hindcast_hist_obs_1d):\n skill = hindcast_hist_obs_1d.verify(\n metric=\"rmse\",\n comparison=\"e2o\",\n dim=[], # important\n alignment=\"same_verifs\",\n )\n assert not skill.coords[\"valid_time\"].isnull().any()",
"def test_trend_same(self):\n self.assertEquals(self.data_item.compute_trend(20), 0)",
"def test_sparse_timeseries(self):\n timeseries = [1, None, None, None, None, 2]\n timestamps = list(range(len(timeseries)))\n\n self.assertRaises(ValueError,\n self.detector.detect_anomalies,\n timeseries, timestamps)",
"def detect_drop_off_cliff(timeseries, end_timestamp, full_duration):\n\n try:\n if len(timeseries) < 21:\n return False\n\n int_end_timestamp = int(timeseries[-1][0])\n # Determine resolution of the data set\n int_second_last_end_timestamp = int(timeseries[-2][0])\n resolution = int_end_timestamp - int_second_last_end_timestamp\n ten_data_point_seconds = resolution * 10\n ten_datapoints_ago = int_end_timestamp - ten_data_point_seconds\n\n # @modified 20210420 - Support #4026: Change from scipy array to numpy array\n # Deprecation of scipy.array\n # ten_datapoint_array = scipy.array([x[1] for x in timeseries if x[0] <= int_end_timestamp and x[0] > ten_datapoints_ago])\n ten_datapoint_array = np.array([x[1] for x in timeseries if x[0] <= int_end_timestamp and x[0] > ten_datapoints_ago])\n\n ten_datapoint_array_len = len(ten_datapoint_array)\n if ten_datapoint_array_len > 3:\n # DO NOT handle if negative integers in range, where is the bottom of\n # of the cliff if a range goes negative? The maths does not work either\n ten_datapoint_min_value = np.amin(ten_datapoint_array)\n if ten_datapoint_min_value < 0:\n return False\n ten_datapoint_max_value = np.amax(ten_datapoint_array)\n if ten_datapoint_max_value < 10:\n return False\n ten_datapoint_array_sum = np.sum(ten_datapoint_array)\n ten_datapoint_value = int(ten_datapoint_array[-1])\n ten_datapoint_average = ten_datapoint_array_sum / ten_datapoint_array_len\n ten_datapoint_value = int(ten_datapoint_array[-1])\n ten_datapoint_max_value = np.amax(ten_datapoint_array)\n if ten_datapoint_max_value == 0:\n return False\n if ten_datapoint_max_value < 101:\n trigger = 15\n if ten_datapoint_max_value < 20:\n trigger = ten_datapoint_average / 2\n if ten_datapoint_max_value < 1:\n trigger = 0.1\n if ten_datapoint_max_value > 100:\n trigger = 100\n if ten_datapoint_value == 0:\n # Cannot divide by 0, so set to 0.1 to prevent error\n ten_datapoint_value = 0.1\n if ten_datapoint_value == 1:\n trigger = 1\n if ten_datapoint_value == 1 and ten_datapoint_max_value < 10:\n trigger = 0.1\n if ten_datapoint_value == 0.1 and ten_datapoint_average < 1 and ten_datapoint_array_sum < 7:\n trigger = 7\n # Filter low rate and variable between 0 and 100 metrics\n if ten_datapoint_value <= 1 and ten_datapoint_array_sum < 100 and ten_datapoint_array_sum > 1:\n\n # @modified 20210420 - Support #4026: Change from scipy array to numpy array\n # Deprecation of scipy.array\n # all_datapoints_array = scipy.array([x[1] for x in timeseries])\n all_datapoints_array = np.array([x[1] for x in timeseries])\n\n all_datapoints_max_value = np.amax(all_datapoints_array)\n if all_datapoints_max_value < 100:\n # print \"max_value for all datapoints at - \" + str(int_end_timestamp) + \" - \" + str(all_datapoints_max_value)\n return False\n ten_datapoint_result = ten_datapoint_average / ten_datapoint_value\n if int(ten_datapoint_result) > trigger:\n return True\n except:\n return None\n\n return False",
"def test_smooth_data(self):\n timeseries = [1, 2, 1]\n self.detector.smooth_data(timeseries)\n\n self.assertEqual(timeseries[1], 1,\n \"Local max is not smoothed\")\n\n timeseries = [1, -1, 1]\n self.detector.smooth_data(timeseries)\n\n self.assertEqual(timeseries[1], 1,\n \"Local min is not smoothed\")",
"def stationarize(self, time_series):",
"def grubbs_test(timeseries):\n series = scipy.array([x for x in timeseries])\n stdDev = np.std(series) \n mean = np.mean(series)\n tail_average = tail_avg(timeseries)\n z_score = (tail_average - mean) / stdDev\n return z_score",
"def testFindOvertime(self):\r\n self.assertEqual(round(find_overtime([0, 40, 40.01, 41, 40]), 2),\r\n 1.01)",
"def test_no_prediction_time_outside_min_and_max_date(\n self, sampler: BinnedUniformSampler, raw_data: pd.DataFrame\n ):\n sampled = sampler.generate_samples(raw_data)\n max_date = sampler.max_date\n min_date = sampler.min_date\n assert np.all(sampled.prediction_time > min_date)\n assert np.all(sampled.prediction_time < max_date)",
"def test_mean2(self):\n x1 = [10, 11, 12, 13]\n x2 = [5, 6, 8, 9]\n m2 = welchs_ttest(x1, x2)[\"mean2\"]\n\n assert abs(m2-7.0) < 0.001",
"def test_sv_short(self):\n start = datetime.datetime(year=2017, month=3, day=17)\n end = datetime.datetime(year=2017, month=3, day=18)\n query = {'key': 'apple', 'geo': ''}\n series = SVSeries.univariate(self.connection, query, start, end)\n data = series.get_data()\n with self.subTest('result_normalized'):\n self.assertTrue(data.max() == 100)\n with self.subTest('result_daily'):\n self.assertEqual(len(data), (end - start).days + 1)",
"def test_y_data_but_no_x_data(self):\n lead_time = pd.to_timedelta(\"1d\")\n lookback = pd.to_timedelta(\"2y\")\n prediction_period = pd.to_timedelta(\"180d\")\n max_date = pd.to_datetime(\"2020-01-01\")\n min_date = (\n max_date - prediction_period - lead_time - lookback - pd.to_timedelta(\"1d\")\n )\n\n sampler = BinnedUniformSampler(\n min_date=min_date,\n max_date=max_date,\n lead_time=lead_time,\n prediction_period=prediction_period,\n samples_per_lookback=1,\n lookback=lookback,\n )\n\n # purchase that covers one prediction period, but no lookback\n customer_data = self.generate_data_for_one_customer(\n 1, max_date - prediction_period - lead_time, max_date, n_orders=12\n )\n\n # a sampler with max date greater than the maximum order date and\n # params such that exactly one sample is created for the customer\n samples = sampler.generate_samples(customer_data)\n\n assert samples.index.get_level_values(\"sample_id\").nunique() == 1\n assert samples.x_include.sum() == 0\n assert samples.y_include.sum() >= 1",
"def high_storm_peaks(self):\n\n if (self.postprocessor.sim_storm_peaks > \n self.postprocessor.obs_storm_peaks): \n return True\n\n return False",
"def test_data_timeseries(self):\n data = [0, 1, 2, 3]\n timestamps1 = [0.0, 0.1, 0.2, 0.3]\n timestamps2 = [1.0, 1.1, 1.2, 1.3]\n ts1 = TimeSeries(\n name=\"test_ts1\", data=data, unit=\"grams\", timestamps=timestamps1\n )\n ts2 = TimeSeries(\n name=\"test_ts2\", data=ts1, unit=\"grams\", timestamps=timestamps2\n )\n self.assertEqual(ts2.data, data)\n self.assertEqual(ts1.num_samples, ts2.num_samples)\n self.assertEqual(ts1.data_link, set([ts2]))",
"def test_no_starting_time(self):\n ts1 = TimeSeries(name=\"test_ts1\", data=[1, 2, 3], unit=\"unit\", rate=0.1)\n self.assertEqual(ts1.starting_time, 0.0)",
"def test_rolling_horizon(self):\n\n arr1 = np.array([i+1 for i in range(20)]).reshape(20, 1)\n arr2 = 10.0*arr1\n arr3 = 100.0*arr1\n arr4 = -1 * arr1\n arr5 = -2 * arr1\n df_check = pd.DataFrame(np.concatenate([arr1, arr2, arr3, arr4, arr5],\n axis=1))\n df_check.columns = ['a1', 'a2', 'a3', 'b1', 'b2']\n df_check['misc'] = np.random.rand(20)\n df_check['dates'] = [t for t in range(20)]\n\n times, x_cols, y_cols, x_roll, y_roll = \\\n lstmutil.TimeSeries.rolling_horizon(df_check,\n time_col=\"dates\",\n x_cols=['a1', 'a2', 'a3'],\n y_cols=['b1', 'b2'],\n in_window=5,\n out_window=2)\n\n # Check times\n self.assertTrue(np.all(np.isclose(\n np.array([ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),\n times\n )))\n\n # Check shapes\n self.assertEqual(len(times), len(x_roll))\n self.assertEqual(x_roll.shape, (14, 15))\n self.assertEqual(y_roll.shape, (14, 4))\n self.assertEqual(len(x_cols), 5*3)\n self.assertEqual(len(y_cols), 2*2)\n\n # Check first and last row\n self.assertTrue(\n np.all(np.isclose(x_roll[0, :],\n np.array([1, 10, 100,\n 2, 20, 200,\n 3, 30, 300,\n 4, 40, 400,\n 5, 50, 500]\n ))))\n self.assertTrue(\n np.all(np.isclose(y_roll[0, :],\n np.array([ -6, -12, -7, -14]))))\n\n self.assertTrue(\n np.all(np.isclose(x_roll[-1, :],\n np.array([14, 140, 1400,\n 15, 150, 1500,\n 16, 160, 1600,\n 17, 170, 1700,\n 18, 180, 1800,]\n ))))\n\n self.assertTrue(\n np.all(np.isclose(y_roll[-1, :],\n np.array([ -19, -38, -20, -40]))))",
"def test_clean_ts(self):\n\n df_raw = pd.DataFrame({\n 'dates' : ['2020.11.03',\n '11/6/2020',\n '2020-11-9 1:30PM',\n '11/10/2020 12:00AM',\n '11/13/2020 2:00PM',\n '11/21/2020',\n ],\n 'junk' : [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\"],\n })\n\n df_raw['values']=[160.25, 150.5, 'foo', 140, 145, 130]\n\n ts1=lstmutil.TimeSeries(begin=datetime(2020, 11, 5),\n end=datetime(2020, 11, 23)\n )\n\n # Two outer timestamps should be reject, and the non-numeric\n # value should be dropped.\n df_clean1 = ts1.clean_ts(df_raw, 'dates', 'values')\n self.assertEqual(len(df_clean1), 4)\n\n\n # Check interpolate within and beyond region\n df_interp1 = ts1.interp_ts(df_clean1,\n 'values',\n ts1.get_target_timestamps())\n\n self.assertEqual(df_interp1['values'].values[0], 150.5)\n self.assertEqual(df_interp1['values'].values[-1], 130.0)\n mask=df_interp1['date']=='2020-11-11'\n self.assertEqual(df_interp1[mask]['values'].values[0], 140.0)\n\n # Make sure we didn't lose good data\n df_merge1=df_interp1.merge(df_clean1,\n on='date',\n suffixes=['_i', '_c'],\n how='left')\n\n num_before=sum([not pd.isnull(t) for t in df_clean1['values']])\n num_after=sum([not pd.isnull(t) for t in df_merge1['values_c']])\n self.assertTrue(num_before, num_after)",
"def test_mean1(self):\n x1 = [10, 11, 12, 13]\n x2 = [5, 6, 8, 9]\n m1 = welchs_ttest(x1, x2)[\"mean1\"]\n\n assert abs(m1-11.5) < 0.001"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A timeseries is anomalous if the average of the last ten datapoints is times greater than the last data point. This algorithm is most suited to timeseries with most datapoints being > 100 (e.g high rate). The arbitrary values become more noisy with lower value datapoints, but it still matches drops off cliffs.
|
def detect_drop_off_cliff(timeseries, end_timestamp, full_duration):
try:
if len(timeseries) < 21:
return False
int_end_timestamp = int(timeseries[-1][0])
# Determine resolution of the data set
int_second_last_end_timestamp = int(timeseries[-2][0])
resolution = int_end_timestamp - int_second_last_end_timestamp
ten_data_point_seconds = resolution * 10
ten_datapoints_ago = int_end_timestamp - ten_data_point_seconds
# @modified 20210420 - Support #4026: Change from scipy array to numpy array
# Deprecation of scipy.array
# ten_datapoint_array = scipy.array([x[1] for x in timeseries if x[0] <= int_end_timestamp and x[0] > ten_datapoints_ago])
ten_datapoint_array = np.array([x[1] for x in timeseries if x[0] <= int_end_timestamp and x[0] > ten_datapoints_ago])
ten_datapoint_array_len = len(ten_datapoint_array)
if ten_datapoint_array_len > 3:
# DO NOT handle if negative integers in range, where is the bottom of
# of the cliff if a range goes negative? The maths does not work either
ten_datapoint_min_value = np.amin(ten_datapoint_array)
if ten_datapoint_min_value < 0:
return False
ten_datapoint_max_value = np.amax(ten_datapoint_array)
if ten_datapoint_max_value < 10:
return False
ten_datapoint_array_sum = np.sum(ten_datapoint_array)
ten_datapoint_value = int(ten_datapoint_array[-1])
ten_datapoint_average = ten_datapoint_array_sum / ten_datapoint_array_len
ten_datapoint_value = int(ten_datapoint_array[-1])
ten_datapoint_max_value = np.amax(ten_datapoint_array)
if ten_datapoint_max_value == 0:
return False
if ten_datapoint_max_value < 101:
trigger = 15
if ten_datapoint_max_value < 20:
trigger = ten_datapoint_average / 2
if ten_datapoint_max_value < 1:
trigger = 0.1
if ten_datapoint_max_value > 100:
trigger = 100
if ten_datapoint_value == 0:
# Cannot divide by 0, so set to 0.1 to prevent error
ten_datapoint_value = 0.1
if ten_datapoint_value == 1:
trigger = 1
if ten_datapoint_value == 1 and ten_datapoint_max_value < 10:
trigger = 0.1
if ten_datapoint_value == 0.1 and ten_datapoint_average < 1 and ten_datapoint_array_sum < 7:
trigger = 7
# Filter low rate and variable between 0 and 100 metrics
if ten_datapoint_value <= 1 and ten_datapoint_array_sum < 100 and ten_datapoint_array_sum > 1:
# @modified 20210420 - Support #4026: Change from scipy array to numpy array
# Deprecation of scipy.array
# all_datapoints_array = scipy.array([x[1] for x in timeseries])
all_datapoints_array = np.array([x[1] for x in timeseries])
all_datapoints_max_value = np.amax(all_datapoints_array)
if all_datapoints_max_value < 100:
# print "max_value for all datapoints at - " + str(int_end_timestamp) + " - " + str(all_datapoints_max_value)
return False
ten_datapoint_result = ten_datapoint_average / ten_datapoint_value
if int(ten_datapoint_result) > trigger:
return True
except:
return None
return False
|
[
"def high_storm_peaks(self):\n\n if (self.postprocessor.sim_storm_peaks > \n self.postprocessor.obs_storm_peaks): \n return True\n\n return False",
"def checkLatestAnomaly(df):\n anomalies = df[df[\"anomaly\"] == 15]\n if anomalies.shape[0] > 0:\n lastAnomalyRow = anomalies.iloc[-1]\n anomalyTime = lastAnomalyRow[\"ds\"]\n higher = lastAnomalyRow[\"y\"] > lastAnomalyRow[\"upper\"]\n\n per = 0\n denom = lastAnomalyRow[\"upper\"] if higher else lastAnomalyRow[\"lower\"]\n if denom > 0:\n per = int(100 * (abs(lastAnomalyRow[\"y\"] - denom) / denom))\n\n return {\n \"highOrLow\": \"high\" if higher else \"low\",\n \"value\": float(lastAnomalyRow[\"y\"]),\n \"percent\": per,\n \"anomalyTimeISO\": dp.parse(anomalyTime).isoformat(),\n \"anomalyTime\": dp.parse(anomalyTime).timestamp() * 1000,\n }\n return {}",
"def test_trend(self):\n bandwidth_list = list()\n bandwidth_value = 0\n while len(bandwidth_list) != 100:\n previous_bandwidth_value = bandwidth_value\n bandwidth_value = random.randint(0, 10000000)\n self._bw.change(bandwidth_value)\n if bandwidth_value > previous_bandwidth_value:\n bandwidth_list.append(1)\n elif bandwidth_value == previous_bandwidth_value:\n bandwidth_list.append(0)\n elif bandwidth_value < previous_bandwidth_value:\n bandwidth_list.append(-1)\n self.assertEqual(bandwidth_list, self._bw.historical_trend())",
"def first_hour_average(timeseries, end_timestamp, full_duration):\n\n try:\n int_end_timestamp = int(timeseries[-1][0])\n int_start_timestamp = int(timeseries[0][0])\n int_full_duration = int_end_timestamp - int_start_timestamp\n\n # Determine data resolution\n # last_hour_threshold = int_end_timestamp - (int_full_duration - 3600)\n int_second_last_end_timestamp = int(timeseries[-2][0])\n resolution = int_end_timestamp - int_second_last_end_timestamp\n # @modified 20160814 - pyflaked\n # ten_data_point_seconds = resolution * 10\n sixty_data_point_seconds = resolution * 60\n sixty_datapoints_ago = int_end_timestamp - sixty_data_point_seconds\n # @modified 20221127 - Task #4738: Allow first_hour_average to handle different resolution\n # last_hour_threshold = int_end_timestamp - (int_full_duration - sixty_datapoints_ago)\n last_hour_threshold = int_end_timestamp - int_full_duration\n\n # series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])\n last_hour_threshold_end = last_hour_threshold + 3600\n series = pandas.Series([x[1] for x in timeseries if x[0] > last_hour_threshold and x[0] < last_hour_threshold_end])\n\n mean = (series).mean()\n stdDev = (series).std()\n t = tail_avg(timeseries, end_timestamp, full_duration)\n\n return abs(t - mean) > 3 * stdDev\n except:\n return None\n\n return False",
"def tail_avg(timeseries, end_timestamp, full_duration):\n try:\n t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3\n return t\n except IndexError:\n return timeseries[-1][1]",
"def grubbs_test(timeseries):\n series = scipy.array([x for x in timeseries])\n stdDev = np.std(series) \n mean = np.mean(series)\n tail_average = tail_avg(timeseries)\n z_score = (tail_average - mean) / stdDev\n return z_score",
"def low_storm_peaks(self):\n\n if (self.postprocessor.sim_storm_peaks < \n self.postprocessor.obs_storm_peaks): \n return True\n\n return False",
"def anomalise(self, base_period = None, ts = None):\n\n delta = self.time[1] - self.time[0]\n seasonal_mean = np.zeros_like(self.data) if ts is None else np.zeros_like(ts)\n\n if base_period is None:\n ndx = np.arange(self.time.shape[0])\n else:\n ndx = np.logical_and(self.time >= base_period[0].toordinal(), self.time <= base_period[1].toordinal())\n d = self.data.copy() if ts is None else ts\n t = self.time.copy()\n self.time = self.time[ndx]\n\n if delta == 1:\n # daily data\n day_avg, mon_avg, _ = self.extract_day_month_year()\n self.time = t.copy()\n day_data, mon_data, _ = self.extract_day_month_year()\n d = d[ndx, ...]\n for mi in range(1,13):\n mon_mask_avg = (mon_avg == mi)\n mon_mask_data = (mon_data == mi)\n for di in range(1,32):\n sel_avg = np.logical_and(mon_mask_avg, day_avg == di)\n sel_data = np.logical_and(mon_mask_data, day_data == di)\n if np.sum(sel_avg) == 0:\n continue\n seasonal_mean[sel_data, ...] = np.nanmean(d[sel_avg, ...], axis = 0)\n if ts is None:\n self.data[sel_data, ...] -= seasonal_mean[sel_data, ...]\n else:\n ts[sel_data, ...] -= seasonal_mean[sel_data, ...]\n elif abs(delta - 30) < 3.0:\n # monthly data\n _, mon_avg, _ = self.extract_day_month_year()\n self.time = t.copy()\n _, mon_data, _ = self.extract_day_month_year()\n d = d[ndx, ...]\n for mi in range(1,13):\n sel_avg = (mon_avg == mi)\n sel_data = (mon_data == mi)\n if np.sum(sel_avg) == 0:\n continue\n seasonal_mean[sel_data, ...] = np.nanmean(d[sel_avg, ...], axis = 0)\n if ts is None:\n self.data[sel_data, ...] -= seasonal_mean[sel_data, ...]\n else:\n ts[sel_data, ...] -= seasonal_mean[sel_data, ...]\n else:\n raise Exception('Unknown temporal sampling in the field.')\n\n return seasonal_mean",
"def detect_peaks_1d(timeseries, delta_peak, threshold, peak_width=5):\n\n # Sort time series by magnitude.\n max_idx = np.squeeze(timeseries.argsort())[::-1]\n\n # Remove peaks within delta_peak to the array boundary\n max_idx = max_idx[max_idx > delta_peak]\n max_idx = max_idx[max_idx < np.size(timeseries) - delta_peak]\n\n max_values = np.zeros_like(timeseries[max_idx])\n max_values[:] = np.squeeze(timeseries[max_idx])\n\n # Number of peaks exceeding threshold\n num_big_ones = np.sum(timeseries > threshold)\n try:\n max_values = max_values[:num_big_ones]\n max_idx = max_idx[:num_big_ones]\n except:\n print(\"detect_peaks_1d: No peaks in the unmasked part of the array.\")\n return np.array([])\n\n # Mark the indices we need to skip here\n max_idx_copy = np.zeros_like(max_idx)\n max_idx_copy[:] = max_idx\n\n # Eliminate values exceeding the threshold within delta_peak of another\n # for idx, mv in enumerate(max_values):\n # print 'iterating over %d peaks' % ( np.size(max_idx))\n for i, idx in enumerate(max_idx):\n current_idx = max_idx_copy[i]\n if (max_idx_copy[i] == -1):\n # print 'idx %d is zeroed out' % (idx)\n continue\n\n # Check if this value is larger than the valueghbouring values of the\n # timeseries. If it is not, continue with next iteration of for loop\n if (timeseries[current_idx] < timeseries[\n current_idx - peak_width: current_idx + peak_width]).any():\n max_idx_copy[i] = -1\n continue\n\n # Zero out all peaks closer than delta_peak\n close_idx = np.abs(max_idx_copy - idx)\n close_ones = np.squeeze(np.where(close_idx < delta_peak)[0])\n max_idx_copy[close_ones] = -1\n # Copy back current value\n max_idx_copy[i] = max_idx[i]\n\n # Remove all entries equal to -1\n max_idx_copy = max_idx_copy[max_idx_copy != -1]\n max_idx_copy = max_idx_copy[max_idx_copy < np.size(timeseries)]\n\n # Return an ndarray with all peaks of large amplitude indices\n return max_idx_copy",
"def anomaly_filter(self, df):\n # calculate forward means\n df['30_PERIOD_FWD_MEAN'] = df[self.endog].rolling(30, min_periods=0).mean().tolist()\n df['30_PERIOD_FWD_MEAN'].fillna(inplace=True, method='bfill')\n df['30_PERIOD_FWD_MEAN'][1:] = df['30_PERIOD_FWD_MEAN'][:-1]\n\n # calculate reverse means\n reverse_mean = df[self.endog].sort_index(ascending=False).rolling(30, min_periods=0).mean().tolist()\n reverse_mean.reverse()\n df['30_PERIOD_BWD_MEAN'] = reverse_mean\n df['30_PERIOD_BWD_MEAN'].fillna(inplace=True, method='ffill')\n df['30_PERIOD_BWD_MEAN'][:-1] = df['30_PERIOD_BWD_MEAN'][1:]\n\n\n df['FWD_STD'] = (df[self.endog] - df['30_PERIOD_FWD_MEAN'])**2\n df['FWD_STD'] = np.sqrt(df['FWD_STD'].rolling(30, min_periods=0).mean())\n df['FWD_STD'].fillna(inplace=True, method='bfill')\n df['FWD_STD'][1:] = df['FWD_STD'][:-1]\n\n df['BWD_STD'] = (df[self.endog] - df['30_PERIOD_BWD_MEAN'])**2\n bkwd_std = np.sqrt(df['BWD_STD'].sort_index(ascending=False).rolling(30, min_periods=0).mean()).tolist()\n bkwd_std.reverse()\n df['BWD_STD'] = bkwd_std\n df['BWD_STD'].fillna(inplace=True, method='bfill')\n df['BWD_STD'][1:] = df['BWD_STD'][:-1]\n\n df['FILTER_VARIANCE'] = np.where(df['FWD_STD'] < df['BWD_STD'], df['BWD_STD'], df['FWD_STD'])\n\n df['HIGH_FILTER'] = df['30_PERIOD_FWD_MEAN']+df['FILTER_VARIANCE']*3\n df['LOW_FILTER'] = df['30_PERIOD_FWD_MEAN']-df['FILTER_VARIANCE']*3\n\n df[self.endog] = np.where(df[self.endog] > df['HIGH_FILTER'], df['HIGH_FILTER'], df[self.endog])\n df[self.endog] = np.where(df[self.endog] < df['LOW_FILTER'], df['LOW_FILTER'], df[self.endog])\n\n cleaned_timeseries = df[[self.date_header, self.endog]]\n\n return cleaned_timeseries",
"def stddev_from_moving_average(timeseries, end_timestamp, full_duration):\n try:\n series = pandas.Series([x[1] for x in timeseries])\n if PANDAS_VERSION < '0.18.0':\n expAverage = pandas.stats.moments.ewma(series, com=50)\n stdDev = pandas.stats.moments.ewmstd(series, com=50)\n else:\n expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).mean()\n stdDev = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).std(bias=False)\n\n if PANDAS_VERSION < '0.17.0':\n return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)\n else:\n return abs(series.iat[-1] - expAverage.iat[-1]) > 3 * stdDev.iat[-1]\n# http://stackoverflow.com/questions/28757389/loc-vs-iloc-vs-ix-vs-at-vs-iat\n except:\n return None\n\n return False",
"def end(Tb37V, SWE):\r\n peaks = detect_peaks(Tb37V.values, mph=0) #Find all positive peaks in 37V data\r\n aoi_sum = 0 #Set initial condition\r\n for p in peaks:\r\n peak = Tb37V.index[p]\r\n aoi = Tb37V[np.logical_and(Tb37V.index < peak + pd.Timedelta('3 days'), Tb37V.index > peak - pd.Timedelta('3 days'))] #Select values within 3 days\r\n if np.nanmean(aoi.values) > aoi_sum: \r\n aoi_sum = np.nanmean(aoi.values) #Replace initial condition if the peak is higher/thicker\r\n current_max = peak\r\n RollSWE = SWE.copy() #Create a copy of the SWE data\r\n minswe = np.nanmin(SWE.values) #Find yearly minimum SWE\r\n if np.isnan(np.nanmin(SWE.values)):\r\n minswe = 0\r\n RollSWE[SWE.values <= minswe + 2] = 1 #Classify the SWE series into 1s and 0s based on distance from min swe\r\n RollSWE[SWE.values > minswe + 2] = 0\r\n rm = pd.rolling_sum(RollSWE, 5) #Calculate a rolling sum on a 5 day window\r\n snow_clear = rm[rm.values >= 4].index.min() - pd.Timedelta('4 days') #Declear snow clearance if 4/5 days are 'snow clear'\r\n try:\r\n return pd.Series((snow_clear, current_max)).min() #Choose the minimum between the SWE clearance and the Tb37 max dates\r\n except:\r\n try:\r\n return pd.Series(snow_clear, Tb37V.idxmax()).min() #If no data was returned from the Tb37 peak find, use the maximum yearly value instead\r\n except:\r\n return snow_clear #If this also fails, use only the snow clearance date to determine date of snow melt off\r",
"def climate_anomaly(x, t):\n\t# climate normal period := January 1971 to December 1980\n\t# Seasonally normalized values w.r.t 30 years running mean \n\t# +1 is added to d2.year to include the last year in the analysis \n\td1 = dt.datetime(850, 1, 1)\n\td2 = dt.datetime(1850, 12, 31)\n\tx_anom = np.zeros((x.shape[0]))\n\tfor yy in range(d1.year,d2.year+1):\n\t\tlyear = yy-15\n\t\tuyear = yy+15\n\t\tif yy < d1.year+15:\n\t\t\tlyear = d1.year\n\t\t\tuyear = d1.year+30\n\t\tif yy > d2.year+1-15:\n\t\t\tlyear = d2.year+1-30\n\t\t\tuyear = d2.year\n\t\tidx1 = lyear - d1.year\n\t\tid = yy - d1.year\n\t\t#print(\"for year\",yy,\"lyear\",lyear,\"uyear\",uyear,\"idx1_m\")\n\t\tfor mm in range(12):\n\t\t\t#print(\"index\",id*12+mm,\"start_month\",idx1*12,\"end_month\",idx1*12+12*30)\n\t\t\tidx = np.arange(idx1*12+mm,idx1*12+12*30+mm,12)\t\n\t\t\tx_anom[id*12+mm] = x[id*12+mm] - x[idx].mean()\n\t\t\t#x_anom[id*12+mm] = (x[id*12+mm] - x[idx].mean())/x[idx].std()\n\t\t\t#print(\"Mean\",x[idx].mean()) \n\t\t\t#print(\"Std\",x[idx].std()) \n\t#print(x_norm) \n\t#print(x_norm.shape) \n\t#print(idx.shape) \n\t#print(x_norm.min()) \n\t#print(x_norm.max()) \n\t\n\treturn _quantile_normalization(x_anom, mode=\"mean\")\n\t#return x_norm",
"def stationarize(self, time_series):",
"def fix_anomaly(anomaly_def, channels, expected_timestamps, missing_value=-111, data_timestamp_ratio=1):\n\n # last good timestamp index\n last_good_index = anomaly_def[\"last_good_index\"]\n dtr = data_timestamp_ratio #this will be 1 for page-level data channels\n \n if anomaly_def[\"anomaly_type\"] == \"A\" or anomaly_def[\"anomaly_type\"] == \"C\":\n # timestamp index at recovery\n recovery_point = int(anomaly_def[\"recovery_point\"])\n for channel in channels:\n for i in range(last_good_index + 1,recovery_point, 1):\n channel.timestamps[i] = expected_timestamps[i]\n for i in range((last_good_index + 1)*dtr, recovery_point*dtr, 1): \n channel.data[i] = missing_value\n \n \n elif anomaly_def[\"anomaly_type\"] == \"B\":\n \n timestamps = np.array(channels[0].timestamps, copy=True)\n \n first_bad_timestamp = timestamps[last_good_index+1]\n last_good_timestamp = timestamps[last_good_index]\n \n normal_time_diff = timestamps[last_good_index-1] - timestamps[last_good_index-2]\n time_jump = first_bad_timestamp - last_good_timestamp - normal_time_diff\n\n a = last_good_timestamp + timedelta(microseconds=10)\n b = first_bad_timestamp - timedelta(microseconds=10)\n \n # insert a timestamp just after last_good_index and another just before last_good_index+1\n timestamps = np.insert(timestamps, last_good_index+1, np.array([a,b]))\n expected_timestamps = np.insert(expected_timestamps, last_good_index+1, np.array([a,b]))\n expected_timestamps[last_good_index + 3:] += time_jump\n \n anomaly_def[\"first_index_after_shift\"] = last_good_index + 3\n anomaly_def[\"first_timestamp_after_shift\"] = expected_timestamps[last_good_index + 3].strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n \n #insert missing_value into each channel to align with these new timestamps, and update timestamp arrays\n missing_value_array = np.tile(A=missing_value, reps=2*dtr)\n for channel in channels:\n # \"B\" anomalies can be the result of pauses in recording while the device is charging, so retain battery level prior to and after anomaly\n if channel.name == \"Battery\":\n anomaly_def[\"Battery_before_anomaly\"] = channel.data[last_good_index]\n anomaly_def[\"Battery_after_anomaly\"] = channel.data[last_good_index+2]\n channel.data = np.insert(channel.data, (last_good_index+1)*dtr, missing_value_array)\n channel.timestamps = timestamps\n \n \n elif anomaly_def[\"anomaly_type\"] == \"E\":\n recovery_point = int(anomaly_def[\"recovery_point\"])\n \n timestamps = np.array(channels[0].timestamps, copy=True)\n \n for channel in channels:\n end_point = min(len(timestamps)-1, recovery_point)\n for i in range(last_good_index + 1, end_point, 1):\n channel.timestamps[i] = expected_timestamps[i]\n \n for i in range((last_good_index + 1)*dtr, (end_point+1)*dtr, 1): \n channel.data[i] = missing_value\n \n \n # if recovery point is not the end of the file\n if recovery_point < len(timestamps)-1:\n time_jump = timestamps[recovery_point] - expected_timestamps[recovery_point]\n anomaly_def[\"time_jump_secs\"] = time_jump.total_seconds()\n anomaly_def[\"recovery_point_timestamp\"] = timestamps[recovery_point].strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n expected_timestamps[recovery_point:] += time_jump\n \n \n elif anomaly_def[\"anomaly_type\"] == \"D\" or anomaly_def[\"anomaly_type\"] == \"F\":\n # truncate each channel data after last good index \n for channel in channels:\n channel.data = channel.data[:(last_good_index)*dtr]\n channel.timestamps = channel.timestamps[:last_good_index]\n \n expected_timestamps = expected_timestamps[:last_good_index]\n \n return channels, expected_timestamps, anomaly_def",
"def evaluate_latest_gas_trend(n_records: int) -> float:\n gas_prices = []\n for gas in Gas.select().order_by(Gas.created_at.desc()).limit(n_records):\n gas_prices.append(gas.gas_price_average)\n\n return calculate_ema(gas_prices)",
"def test_big_gaps_getNewVals():\n timebin = 1.\n times = np.concatenate((np.random.uniform(0, 10, 50),\n np.random.uniform(30, 40, 50)))\n newtimes = wm.getNewTimes(times, timebin)\n rvs = np.random.normal(loc=0, scale=5, size=100)\n uncs = np.random.normal(loc=1., scale=0.5, size=100)\n newRVs, newUncs = wm.getNewVals(newtimes, times, rvs,\n uncs, timebin=timebin)\n fins = np.where(np.isfinite(newUncs))\n newRVs = newRVs[fins]\n newUncs = newUncs[fins]\n newtimes = newtimes[fins]\n assert np.median(newUncs) < np.median(uncs)",
"def test_find_better_Tpeaks(ecg_data):\n # Get peaks between 20 and 25 seconds\n peaks = find_better_Tpeaks(ecg_data[20*360:25*360])\n assert len(peaks) == 6 # Correct number of peaks identified\n # Returns indices as expected\n assert set(peaks) == {1382, 230, 1077, 1658, 794, 507}, \\\n \"Incorrect peak indices found\"",
"def last_event(self):\n last = -np.inf\n for series in self.data:\n if series[-1,0] > last:\n last = series[-1,0]\n return last"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints a circle with radius r
|
def circle(r):
for y in range(r*2 + 1):
for x in range(r*2 + 1):
time.sleep(0.1)
sys.stdout.flush()
dist = math.sqrt((x - r)**2 + (y - r)**2)
diff = abs(dist - r)
print("*" if diff <= 0.5 else " ", end="")
print()
|
[
"def drawCircle(tortle, radius):\n tortle.penup()\n tortle.goto(0,-1)\n tortle.pendown()\n tortle.circle(radius, steps=360)",
"def circle(r):\n A = pi * r**2\n return A",
"def svgCircle(center, radius):\n\tsvgTag('circle cx=\"%d\" cy=\"%d\" r=\"%s\"' % (center+(radius,)) )",
"def draw_circle(radius):\n moves = 0\n while True:\n forward(2 * math.pi * radius / 360)\n left(1)\n moves += 1\n\n if moves >= 360:\n break\n end_fill()",
"def circle(t, r):\n circumference = 2 * pi * r\n sides = 80\n length = circumference / sides\n polygon(t, sides, length)",
"def plot_circle( x0, y0, r, **style):\n plot_ellipse( x0, y0, r, r, 0, **style)",
"def plot_circle(radius: Number):\n v_area_circle = vectorize(area_circle)\n v_perimeter_circle = vectorize(perimeter_circle)\n\n rad = linspace(0, radius, 50)\n a = v_area_circle(rad)\n p = v_perimeter_circle(rad)\n\n plot(rad, a, '-b', label='Area')\n plot(rad, p, '-r', label='Perimeter')\n legend(loc='upper right')\n title('Geometrical properties of circles')\n xlabel('Radius')\n pylab.savefig('/home/bemsibom/PycharmProjects/Figures/circle_properties.png')\n show()",
"def drawCircle(color,radius):\n turtle.down()\n turtle.color(color,color)\n turtle.begin_fill()\n turtle.circle(radius)\n turtle.end_fill()\n turtle.up()",
"def _set_as_circle(self, radius):\n self.radius = radius\n self.perimeter = 2.0 * np.pi * self.radius\n self.thickness = 0.0\n self.width = self.perimeter / np.pi",
"def circle(self, circ, startang=0, endang=360):\n self.ps('%f %f %f %f %f arc stroke'\n % (circ.o.x, circ.o.y, circ.r, startang, endang))",
"def circle(x):\r\n turtle.circle(x)",
"def drawCircle(self, x, y, radius, color): \n dx = radius\n dy = 0\n xChange = 1 - 2 * radius\n yChange = 1\n radiusError = 0\n while (dx >= dy):\n self.drawPixel(x + dx, y + dy, color)\n self.drawPixel(x - dx, y + dy, color)\n self.drawPixel(x - dx, y - dy, color)\n self.drawPixel(x + dx, y - dy, color)\n self.drawPixel(x + dy, y + dx, color)\n self.drawPixel(x - dy, y + dx, color)\n self.drawPixel(x - dy, y - dx, color)\n self.drawPixel(x + dy, y - dx, color)\n dy = dy + 1\n radiusError += yChange\n yChange += 2\n if (2 * radiusError + xChange > 0):\n dx = dx - 1\n radiusError += xChange\n xChange += 2",
"def circle_ring(R,r):\n A = pi * (R**2 - r**2)\n return A",
"def emit_draw_circle(self, x, y, radius):\n\n self._append_line(\"$ctx.beginPath();\")\n self._append_line(\"$ctx.arc(%s, %s, %s, 0, 2 * Math.PI, false);\" % (str(x), str(y), str(radius)))\n self._append_line(\"$ctx.closePath();\")\n self._append_line(\"$ctx.stroke();\")",
"def circle(self, center, radius, color, thickness=1, shift=0):\n circle = cv2.circle(self.img, center, radius, color, thickness, shift)\n cv2.imshow('circle', self.img)",
"def drawBigCircle(a, b, r):\n t.up()\n t.goto(a,b-r)\n t.down()\n t.color(\"White\")\n t.circle(r)\n #t.ht()\n t.up()\n t.goto(a,b)",
"def writeCircle(self, x0, y0, r, color):\n self.drawCircle(x0, y0, r, color)\n self.np.write()",
"def draw_circle(position, radius, ax=None, **kwargs):\n ax = ax or plt.gca()\n\n height = 2 * np.sqrt(radius)\n\n circ = Ellipse(position, 2*height, 2*height, **kwargs)\n circ.set_facecolor('blue')\n circ.set_edgecolor('blue')\n\n ax.add_patch(circ)",
"def circle_perimeter(r):\n return 2.0*pi*r"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints a heart with size parametrized by r (must be an even number)
|
def heart(r):
if r % 2 != 0:
print("r must be even")
return
for y in range(r*2 + 1 - r//2):
for x in range(r*2 + 1):
# top two half-circles
if y <= r//2:
# first half circle
if x <= r:
dist = math.sqrt((x - r//2)**2 + (y - r//2)**2)
diff = abs(dist - r//2)
print("*" if diff <= 0.5 else " ", end="")
# second half circle
else:
dist = math.sqrt((x - (r//2 + r))**2 + (y - r//2)**2)
diff = abs(dist - r//2)
print("*" if diff <= 0.5 else " ", end="")
else:
print("*" if (x == (y - r//2) or x == (r*2 + r//2 - y)) else " ", end="")
print()
|
[
"def _print_heart(activity: Activity):\n lrp = LeftRightPrinter(left_width=60)\n _print_hr_data(activity, lrp)\n _print_hr_zones(activity, lrp)\n lrp.print()",
"def draw_heart(pos: np.ndarray) -> None:\n pygame.draw.lines(PSEUDO_SCREEN, colours['red'], False, (\n pos + (0, 1),\n pos + (0, 2),\n pos + (2, 4),\n pos + (4, 2),\n pos + (4, 1),\n pos + (3, 0),\n pos + (3, 2),\n pos + (2, 3),\n pos + (1, 2),\n pos + (1, 0),\n pos + (2, 1),\n pos + (2, 2)\n ))",
"def h_line():\r\n print(\"x\"*80)",
"def print_square(size):\n if type(size) is not int or (type(size) is float and size < 0):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if size is 0:\n print(\"\", end=\"\")\n for i in range(size):\n for k in range(size):\n print(\"#\", end=\"\")\n print()",
"def draw_star(t,sz):#Docstring\n\tfor i in range(5):\n\t\tt.forward(sz)\n\t\tt.right(144)\n\tmove_turtle(t)",
"def histogram(n, counts, height):\n percents = [0] * 54\n for i in xrange(54):\n percents[i] = counts[i] * 100.0 / n\n biggest = max(percents)\n scale = height / biggest\n stars = [x*scale for x in percents]\n\n for i in xrange(height + 3, -1, -1):\n char = lambda f: \"*\" if f > i else \" \"\n print \" \".join(\n \"%04s\" % char(c) for (i,c) in enumerate(stars)\n if i % 2 == 0\n )\n print \" \".join(\"%4d\"%i for i in xrange(54) if i % 2 == 0),\n print \" :: # Cards Left\"\n# print \" \".join(\"%04.1f\"%c for (i,c) in enumerate(stars) if i % 2 == 0),\n# print \" :: Star count\"\n print\n print\n print \" \".join(\"%04.1f\"%c for (i,c) in enumerate(percents) if i % 2 == 0),\n print \" :: Percentage\"",
"def hbar(length=80):\n return '='*length",
"def _print_hr_data(activity: Activity, lrp: LeftRightPrinter):\n lrp.add_left(\"\")\n lrp.add_left(\"\\x1B[34m\\x1B[1mHeart data\\x1B[0m\")\n lrp.add_left(\"\")\n lrp.add_left(f\" Average .............. {int(activity.avg_hr)} bpm\")\n lrp.add_left(f\" Maximum .............. {activity.max_hr} bpm\")",
"def print_christmas_tree(size):\n # replace the line below with your code\n out = \"\"\n for i in range(1, size + 1):\n out += \" \" * (size - i)\n out += \"*\" * (i * 2 - 1)+ \"\\n\"\n out += (\" \" * (size - 1)) + \"*\"\n return out",
"def make_shirt(size='large', message='I love python'):\n print(\"\\nSize of your t-shirt is :\" + size)\n print(\"Message that will be printed on t-shit : \" + message)",
"def print_n_stars(n):\n for i in range(n):\n print \"*\", # having a ',' tells print not to insert a new line after print\n\n print '' # inserts a new line after the loop\n return",
"def drawBigCircle(a, b, r):\n t.up()\n t.goto(a,b-r)\n t.down()\n t.color(\"White\")\n t.circle(r)\n #t.ht()\n t.up()\n t.goto(a,b)",
"def draw_measures(self, ind):\n measures = self.progression.length()\n hashes = self.time_signature\n num_lines = math.ceil(measures/8)\n bar_height = self.window_height/2/num_lines\n for i in range(num_lines): # i is the line number\n if i+1 == num_lines and measures%8 != 0:\n meas = measures%8\n else:\n meas = 8\n y = self.window_height/4 + bar_height*(i + 0.5)\n pygame.draw.line(self.display_surf, (0,0,0), (20,y-bar_height/6),(20,y+bar_height/6),6) # at start of line\n for j in range(meas): # j is the measure within that line\n meas_length = (self.window_width-40)/meas\n x = 20 + meas_length*(j+1)\n if (i*8+j == ind):\n pygame.draw.line(self.display_surf, (255,255,0), (x-meas_length+3,y),(x-3,y),50) # the highlight\n pygame.draw.line(self.display_surf, (0,0,0), (x,y-bar_height/6),(x,y+bar_height/6),6) # barlines between measures\n\n\n chord_idx = i*8+j\n # The root and tonality of the chord are displayed separately to faciltate click detection.\n chord_message = \"{} {}\".format(self.progression.chord_list[chord_idx][0], self.progression.chord_list[chord_idx][1])\n chord_text = pygame.font.Font(None, 30).render(chord_message, 1, (0,0,0))\n self.display_surf.blit(chord_text, (x-(meas_length+chord_text.get_width())/2, y-bar_height/5))\n self.progression.chord_pos[chord_idx] = (\\\n x-(meas_length+chord_text.get_width())/2,\\\n y-bar_height/5,\\\n x-(meas_length-chord_text.get_width())/2,\\\n y-bar_height/5+chord_text.get_height())\n\n # self.display_surf.blit(chord_text, (x-meas_length/2-chord_text.get_width()/2,y-bar_height/5)) # Chord labels\n\n for k in range(hashes): # k is the hash (beat) within that measure\n hashspace = meas_length/hashes\n x2 = x - meas_length+ hashspace*(k + 0.5)\n pygame.draw.line(self.display_surf, (0,0,0), (x2+10,y-bar_height/12),(x2-10,y+bar_height/12),4) # the hashes",
"def make_shirt(size, message):\n\tprint(\"The size of the shirt is \" + size + \".\")\n\tprint(\"Message: \" + message.title())",
"def draw(self, t, size):",
"def _draw_nice_clock(self):\n self._draw_nice_background()\n self._draw_hands()",
"def render_bar(self, color: bool, length: int) -> str:\n\n s = \"\"\n\n if color:\n s += str(colorama.Fore.GREEN)\n\n pc_per_block = 1.0 / length\n remaining_percent = self.percent\n\n for _ in range(length):\n this_pc = min(1.0, (1.0 / pc_per_block) * remaining_percent)\n remaining_percent -= min(remaining_percent, pc_per_block)\n s += \" \" if this_pc == 0.0 else chr(0x258F - floor(this_pc / (1.0 / 7)))\n\n if color:\n s += str(colorama.Fore.RESET)\n\n return s",
"def get_heart_rate(beats=None, sampling_rate=1000., smooth=False, size=3):\r\n\r\n # check inputs\r\n if beats is None:\r\n raise TypeError(\"Please specify the input beat indices.\")\r\n\r\n if len(beats) < 2:\r\n raise ValueError(\"Not enough beats to compute heart rate.\")\r\n\r\n # compute heart rate\r\n ts = beats[1:]\r\n hr = sampling_rate * (60. / np.diff(beats))\r\n\r\n # physiological limits\r\n indx = np.nonzero(np.logical_and(hr >= 40, hr <= 200))\r\n ts = ts[indx]\r\n hr = hr[indx]\r\n\r\n # smooth with moving average\r\n if smooth and (len(hr) > 1):\r\n hr, _ = smoother(signal=hr, kernel='boxcar', size=size, mirror=True)\r\n\r\n return ReturnTuple((ts, hr), ('index', 'heart_rate'))",
"def f7_10(self, strokeLen):\n return strokeLen",
"def main(n, height=25):\n counts = [0] * 54\n for i in xrange(n):\n counts[play()] += 1\n histogram(n, counts, height)\n print \"(Histogram of %d plays of Idiot's Solitaire)\" % n"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints a skewed square with side length a
|
def skewed(a):
for i in range(a):
# conscise: print(' '*i, end="")
for j in range(i):
print(' ')
for j in range(a):
print("*", end="")
print()
|
[
"def draw_square(self, side_length):\n self.draw_polygon(side_length, 4)",
"def draw_square(t, sz, col, ps, step):\t\n t.color(col)\n t.pensize(ps)\n for i in range(4):\n t.fd(sz)\n t.left(90)\n t.penup()\n t.goto(t.pos()+ (-step,-step))\n t.pendown()",
"def draw_square(t,sz):\t#Docstring\n\tfor i in range(4):\n\t\tdraw_quarter(t,sz)\n\t#After drawing a square, rotate 360/5 degrees to position self to draw next square\n\tt.left(360/5)",
"def draw_square(t, n, sz):\t\n x = sz\n for i in range(n):\n for j in range(4):\n t.right(89)\n t.fd(sz)\n sz = sz + x",
"def print_square(size):\n if type(size) is not int or (type(size) is float and size < 0):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if size is 0:\n print(\"\", end=\"\")\n for i in range(size):\n for k in range(size):\n print(\"#\", end=\"\")\n print()",
"def starbox(width, height):\n print(\"*\" * width) #print top edge of box\n\n # print sides of box\n for _ in range(height-2):\n print(\"*\" + \" \" * (width-2) + \"*\") \n\n print(\"*\" * width) #print bottom edge of box",
"def show_sub_square(self, start, size):\n end = start + size\n ret_str = ''\n for row in range(start, end):\n for col in range(start, end):\n ret = f'{self.get(row, col) :.2f} '\n ret_str += f'{ret: >8}'\n ret_str += '\\n'\n print(ret_str)\n return ret_str",
"def draw_rotated_square_shape(some_turtle, side_length, number_of_squares):\n for square in range(number_of_squares):\n draw_square_from_midpoint_on_bottom(some_turtle, side_length)\n some_turtle.left(360/number_of_squares)",
"def draw_Square():\r\n t.down()\r\n t.color(\"purple\")\r\n t.fillcolor(\"purple\")\r\n t.begin_fill()\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.end_fill()\r\n t.up",
"def skew(A):\n \n A_mat = np.array([[0.0, -A[2], A[1]], [A[2], 0.0, -A[0]], [-A[1], A[0], 0.0]])\n \n return A_mat",
"def square(text):\n text = text.split()\n text.insert(0, \"\")\n final = triangle(text)\n base = len(final[-1])\n final = [\"-\" * base] + final\n final.append(\"-\" * base)\n\n return \"\\n\".join(final)",
"def draw_multicolor_square(t,sz):\r\n for i in [\"red\",\"purple\",\"hotpink\",\"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)",
"def draw_square(animal, size):\n for _ in range(5):\n animal.forward(size)\n animal.left(90)\n animal.forward(size)\n animal.left(90)\n animal.forward(size)\n animal.left(90)\n animal.forward(size)\n animal.left(90)\n animal.penup()\n animal.forward(50)\n animal.pendown()",
"def skew(x):\n x = x.flatten()\n return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])",
"def square(t, x, y, s):\n\n t.penup()\n t.setx(x - s / 2)\n t.sety(y - s / 2)\n t.pendown()\n for i in range(4):\n t.forward(s)\n t.left(90)",
"def test_1_create_floyds_tringle():\n print(\"***********\")\n print(\"Creating a floyd's triangle with 5 rows\")\n print(\"***********\")\n\n num = 1\n for rows in range(1, 6):\n for col in range(0, rows):\n print(num, end=\"\")\n num = num + 1\n print(\"\\n\")",
"def print_square(num):\n print(\"Square: {}\\n\".format(num * num))",
"def draw_side_bar():\n width1 = 10 * sq_size\n height1 = w_height # w_height - sq_size//2\n height2 = 0 # sq_size//2\n pyglet.graphics.draw(4, pyglet.gl.GL_POLYGON,\n (\"v2i\", (width1, height1, width1, height2,\n width1 + 2, height2, width1 + 2, height1)),\n (\"c3B\", (0, 0, 0) * 4))",
"def plot_skew(sequence: str):\n skew_list = compute_skew.compute_skew(sequence)\n positions = [x for x in range(len(sequence))]\n\n # plot the skew\n plt.plot(positions, skew_list)\n plt.title(\"Skew Diagram\")\n plt.xlabel(\"Position\")\n plt.ylabel(\"Skew\")\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print frequency of each word in text. Type of text is string
|
def print_freq_words(text):
assert isinstance(text, str), "Please input parameter 'text' to string."
sent = nltk.word_tokenize(text)
fd = nltk.FreqDist(w.lower() for w in sent if re.match(r'[a-zA-Z0-9]',w))
for w in fd:
print('%12s, %3d' % (w, fd[w]))
|
[
"def word_frequency(text: str):\n # get individual words\n tokenized = text.split()\n\n # count the frequency\n word_counter = collections.Counter(tokenized)\n #frequencies = list(collections.Counter(tokenized).items())\n\n return word_counter",
"def word_frequency(text: str):\n # get individual words\n tokenized = text.split()\n\n # count the frequency\n word_counter = collections.Counter(tokenized)\n\n return word_counter",
"def main():\n # TODO: ask the user to input some `text`\n\n\n # TODO: count the number of occurences of each word in the text\n\n\n # TODO: sort by descending order of occurences and display the result",
"def freq_dict(self, text):\n freq = {}\n for char in text:\n if not char in freq:\n freq[char] = 0\n freq[char] += 1\n return freq",
"def print_frequency(some_text):\n if(not isinstance(some_text, str)):\n print(\"Error: only accepts strings\")\n return None\n elif(isinstance(some_text, int)):\n print(\"Error: only accepts strings\")\n return None\n elif (isinstance(some_text, float)):\n print(\"Error: only accepts strings\")\n return None\n alphabet = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n index = [0] * 26\n \n for i in some_text:\n if(i.lower() in alphabet):\n index[alphabet.index(i.lower())] = index[alphabet.index(i.lower())] + 1\n \n j = 0\n \n while j < 26:\n if(not (index[j] == 0)):\n print(alphabet[j] + \" \" + str(index[j]))\n j = j + 1",
"def char_freq(string):\n return Counter(string)",
"def word_frequency(self, text, words):\n\t\tstext = text.split()\n\t\tfrequency = 0\n\n\t\tif type(words) is list:\n\n\t\t\tfor word in stext:\n\n\t\t\t\tif word in words:\n\n\t\t\t\t\tfrequency += 1\n\n\t\t\treturn frequency\n\n\t\telif type(words) is str:\n\n\t\t\tfrequency_words = self.words_frequency_dict(text)\n\t\t\tfrequency = frequency_words[words]\n\n\t\t\treturn frequency\n\n\t\treturn",
"def word_count(self):\n\n # Split by non-alphanumerical boundaires\n split_text = re.split('\\W',self.text.lower())\n\n # Count occurences\n counts = {}\n for word in split_text:\n if word:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts",
"def get_char_frequency(text):\n\n total_count = len(text)\n frequency = defaultdict(float) # defaultdicts guarantees that unassigned keys have value 0\n for c in text:\n frequency[c] += 1\n for c in frequency:\n frequency[c] /= total_count\n # divinding a single time after summing integers increases accuracy and speed,\n # since you are not adding floats\n\n return frequency # { char: frequency }",
"def word_counts(text):\n # Could use syllable_counts, return length of lists; faster to do manually\n result = []\n counter = 0\n for (word, tag) in tag_text(text):\n if tag in PUNCTUATION_TAGS:\n result.append((counter, word))\n counter = 0\n else:\n counter += 1\n if counter:\n result.append((counter, None))\n return result",
"def letter_freq(txt):\n\n freq = {}\n\n for element in txt.lower():\n if element not in freq.keys():\n freq[element] = 1\n else:\n freq[element] += 1\n\n return freq",
"def summarize_text(text):\n return summarize(text, word_count=50)",
"def sentence_frequency(self, sentence, text):\n\t\tresult = re.findall(sentence + \"+\", text)\n\t\treturn len(result)",
"def stats_text_cn(text): # 统计中文词频 # 使用文档字符串说明\n if not isinstance(text,str): #如果不是字符串类型触发异常\n raise ValueError (\"input data is not string type\") \n countcn = {}\n for i in text:\n if u'\\u4e00' <= i <= u'\\u9fff':\n countcn[i] = text.count(i)\n countcn = sorted(\n countcn.items(), key=lambda item: item[1], reverse=True) # 按出现数字从大到小排列\n return countcn",
"def character_frequency_list(self, text):\n\t\tif type(text) is str:\n\n\t\t\tfrequency = {}\n\t\t\tfrequency_list = []\n\t\t\ttreated_text = text.replace(\" \",\"\")\n\n\t\t\tfor c in treated_text:\n\n\t\t\t\tif c in frequency:\n\n\t\t\t\t\tfrequency[c] += 1\n\n\t\t\t\telse:\n\n\t\t\t\t\tfrequency[c] = 1\n\n\t\t\tfor k, v in frequency.items():\n\n\t\t\t\tfrequency_list.append({\"character\": k, \"frequency\": v})\n\n\t\t\tfrequency_list.sort(key=lambda x: x[\"frequency\"], reverse=True)\n\t\t\treturn frequency_list\n\n\t\treturn",
"def term_frequency(contents, word):\n tf = 0\n for w in contents:\n if w == word:\n tf += 1\n return tf",
"def word_count(text):\r\n allwords = text.split() # a list of words\r\n words = set(allwords) # that is a set, therefore we avoid duplicates.\r\n return {word:allwords.count(word) for word in words}",
"def print_word_freq(file):\n file = open(file)\n text = file.read()\n \n line_break = text.replace(\"\\n\", \" \")\n split_text = line_break.split(' ')\n space_fix = line_break.replace(\" \", \" \")\n dubdub_dash = space_fix.replace(\"--\", \" \")\n cleaned_text = clean_clean(dubdub_dash)\n print(cleaned_text)\n \n word_list = []\n for word in split_text:\n if not word in STOP_WORDS:\n word_list.append(word)\n \n organized_words = sorted(word_list, key = str)\n\n word_count_list = dict()\n for word in organized_words:\n if word in word_count_list:\n word_count_list[word] += 1\n else:\n word_count_list[word] = 1\n word_count_list = {key:value for key, value in word_count_list.items() if value >=1}\n sorted_words = sorted(word_count_list.items(), key = lambda seq: seq[1], reverse=True)\n\n \n for key, value in sorted_words:\n print(key.rjust(20), \" | \", value, value * (\"*\"))",
"def generate_freq_dict(input_text):\n result_dict = {}\n for word in input_text:\n word = word.lower()\n if word not in result_dict:\n result_dict[word] = 1\n else:\n result_dict[word] += 1\n return(result_dict)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A ParentField should receive its parent form the form
|
def test_forminit(self, client):
## unfortunately, BaseForm depends on lots of stuff, including
## database access
class Form(BaseForm):
class Meta:
model = Type1
field = ParentField()
parent = mock.MagicMock()
form = Form(parent)
assert form.fields['field'].parent == parent
|
[
"def related_parent (field):\n return field.related.parent_model",
"def validate_parent(self, raises: type[Exception] = ValidationError):\n if not self._state.adding:\n if self.parent:\n # Prevent the model instance from being its own parent, which would\n # result in an infinite recursion.\n if self.parent == self:\n raise raises(f'{self} cannot be its own parent.')\n # Prevent the model instance from having one of its descendants as\n # its parent, which would result in an infinite recursion.\n elif self.descendants.filter(pk=self.parent.pk).exists():\n raise raises(\n f'{self} cannot have {self.parent} as its parent; '\n f'{self.parent} is a descendant of {self}.'\n )",
"def parent_changed(self, old, new):\n pass",
"def onParent(self, objects, parent, opts):\n pass",
"def set_parent ( self, parent ):\n self.parent_ref = get_object_ref ( parent )",
"def on_model_change(self, form, model, is_created):\n super(OrganizationAdmin, self).on_model_change(form, model, is_created)\n if (not is_root_organization(model)) and (getattr(form, \"parent\") is None or getattr(form, \"parent\")._data is None):\n raise ValidationError(gettext('Please specify parent organization(creation of top level organization not allowed)'))\n CycleReferenceValidator.validate(form, model, object_type='Organization', parent='parent',\n children='all_children', is_created=is_created)",
"def _clean_parent(self):\n self._clean_standalone()\n if self.has_stockrecords:\n raise ValidationError(\n _(\"A parent product can't have stockrecords.\"))",
"def _get_parent(self) -> \"adsk::core::Ptr< adsk::core::Base >\" :\n return _core.DropDownControl__get_parent(self)",
"def parent(self):\n return self.model.parent(self)",
"def set_ParentPost(self, value):\n super(CreateAuthenticatedPostInputSet, self)._set_input('ParentPost', value)",
"def test_parent(self):\n button = self.dlg.Alpha.find()\n self.assertEqual(button.parent(), self.dlg.find())",
"def parent_event(self, event):\n if self.is_active:\n old = event.old\n new = event.new\n with new.children_event_context():\n with old.children_event_context():\n if new is None:\n for obj in self.objects:\n obj.set_parent(None)\n else:\n new.insert_children(self, self.objects)",
"def OnCreate(self, form):\n # Get parent widget\n # print(\"......\")\n # print(type(form))\n self.parent = self.FormToPyQtWidget(form)\n self.PopulateForm()",
"def form_valid(self, form):\n form.instance.owner = self.request.user\n return super(ProjectCreate, self).form_valid(form)",
"def get_field_name_as_child(self):\n for k, v in self.get_parent().fields.items():\n if v is self:\n return k",
"def formfield_for_dbfield(self, db_field, **kwargs):\n if isinstance(db_field, ForeignKey) and \\\n db_field.name in self.related_search_fields:\n kwargs['widget'] = fk_lookup.FkLookup(db_field.rel.to)\n return super(CMSNewsAndEventsPlugin, self).formfield_for_dbfield(db_field, **kwargs)",
"def get_parent_value(self): # pragma: no cover\n return self.parent",
"def parent(self, parent):\n warnings.warn(\n \"Setting a parent is potentially dangerous. Consider using \"\n \"Topology.add_subtopology instead\"\n )\n if parent is None:\n raise NotImplementedError(\n \"Setting parents to None is not yet supported\"\n )\n self._parent = _validate_parent(parent)",
"def test_extends_form(self):\n self.assertTrue(issubclass(SelectCsvGeneratorForm, forms.Form))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
flip all the images in dir, and then save them to another dir
|
def do_all_flip(base_dir="F:/ad_samples/train_samples/ad_web_2/"):
# get all files
# base_dir = "F:/ad_samples/train_samples/ad_text/"
# base_dir = "F:/ad_samples/download_sample/14/"
# base_dir = "F:/ad_samples/train_samples/ad_web/"
# files = glob.glob(base_dir + "*.jpg")
files = glob.glob(base_dir + "/*.png")
# like ['E:/img\\1.jpg', 'E:/img\\10.jpg']
# start 3 process
# pool = ProcessPool(3)
pool = ThreadPool(3)
rets = pool.map(flip_img_save2dir, files)
pool.close()
pool.join()
print 'all images accomplish flip and save to dir'
|
[
"def flipImages(rootDir, imgFormat=None):\n if imgFormat is None:\n flipImages(rootDir, 'jpg')\n flipImages(rootDir, 'png')\n else:\n string = rootDir + \"/*/*.\" + imgFormat\n filenames = glob.glob(string)\n if len(filenames) == 0:\n string = rootDir + \"/*.\" + imgFormat\n filenames = glob.glob(string)\n for fileName in filenames:\n img = cv2.imread(fileName)\n if img is not None:\n flippedFilename = fileName.replace(\".\" + imgFormat, \"_flipped.\" + imgFormat)\n cv2.imwrite(filename=flippedFilename, img=cv2.flip(img, 1))\n print(\"Flipped \" + fileName + \"as \" + flippedFilename)",
"def read_write_img(self):\n for file in os.listdir(self.path):\n filelist = input_path + file\n img = cv2.imread(filelist)\n dst_img = self.replace_fast(img, (0, 0, 0))\n # cv2.imwrite(out_path + file[:-4] + '.jpg', re_img)\n plt.subplot(121), plt.imshow(img), plt.title('initial')\n plt.subplot(122), plt.imshow(dst_img), plt.title('result')\n plt.show()",
"def image_rotate_save(img_src, img_name):\n for rot_time in range(4):\n rot_img = np.rot90(img_src, rot_time)\n cv2.imwrite(img_name + str(rot_time) + '.bmp', rot_img)\n os.rename(img_name + str(rot_time) + '.bmp', img_name + str(rot_time))",
"def stage_images(self):\n if not os.path.exists(self.data_dir):\n os.mkdir(self.data_dir)\n for x in self.image_files():\n shutil.move(x, self.data_dir)",
"def convert_folder(datadir,target):\r\n images = np.array([datadir + f for f in os.listdir(datadir) if f.endswith(\".RAW\") ])\r\n\r\n for img in images:\r\n png = read_raw(img)\r\n save_img(target+img.split(\"/\")[-1].split(\".\")[0], png)",
"def finale(directory=None):\n \n if directory == None:\n #change directory to the 'before' folder\n os.chdir(\"before\")\n directory = os.getcwd()# Use working directory if unspecified\n os.chdir(\"..\")\n \n # Create a new directory 'after' folder in the same parent folder as the before 'folder'\n new_directory = os.path.join(os.getcwd(), 'after')\n try:\n os.mkdir(new_directory)\n except OSError:\n pass # if the directory already exists, proceed \n \n #load all the images\n image_list, file_list = get_images(directory) \n\n #go through the images and save modified versions\n for n in range(len(image_list)):\n # Parse the filename\n filename, filetype = os.path.splitext(file_list[n])\n \n # apply the filter function and store as new images\n new_image = negatify(image_list[n])\n #save the altered image, using PNG to retain transparency\n new_image_filename = os.path.join(new_directory, filename + '.png')\n new_image.save(new_image_filename)",
"def write_images(self):\n while self.cache:\n # pop the first and write it out\n fn, image = self.cache.pop(0)\n tifffile.imwrite(fn, image)",
"def _revise_dir(self,\n flow_filenames: Sequence[str]) -> Sequence[Sequence[str]]:\n img1_filenames = []\n img2_filenames = []\n for flow_filename in flow_filenames:\n idx = int(osp.splitext(osp.basename(flow_filename))[0])\n img1_filename = osp.join(self.img1_dir,\n f'{idx:05d}' + self.img1_suffix)\n img2_filename = osp.join(self.img2_dir,\n f'{idx:05d}' + self.img1_suffix)\n img1_filenames.append(img1_filename)\n img2_filenames.append(img2_filename)\n return img1_filenames, img2_filenames",
"def image_converter(input_path:str, output_path:str)->None:\n for filename in os.listdir(input_path):\n img = Image.open(input_path +\"/\"+ filename)\n img = img.convert('RGB')\n img.save(output_path +'/' +filename.split('.')[0] + '.jpg')\n del img",
"def rename_imgs(path):",
"def reload_image_folder():",
"def randflip():\n\n # See tasks.autoc.randflip() for explanation\n\n dataset = datagen.DataSet(datapath)\n dataset.load()\n\n for k, ens in enumerate(dataset.ensembles):\n\n print(k)\n ens.do_randflip()\n dataset.save(k)",
"def crop_images(old_dir, new_dir):\n print(\"crop_images\")\n os.mkdir(new_dir)\n\n H, S, V = None, None, None\n for filename in os.listdir(old_dir):\n if not filename.endswith(\".jpg\"):\n continue\n\n index = filename.find(\".jpg\")\n name = filename[:index]\n\n # Approximate coordinates of face\n coords = scipy.io.loadmat(old_dir + name + \".mat\")\n start_x = int(coords[\"x\"][0][0] - 0.5*(coords[\"x\"][1][0] - coords[\"x\"][0][0]))\n end_x = int(coords[\"x\"][1][0] + 0.5*(coords[\"x\"][1][0] - coords[\"x\"][0][0]))\n start_y = int(coords[\"y\"][0][0] - (coords[\"y\"][3][0] - coords[\"y\"][0][0]))\n end_y = int(coords[\"y\"][3][0] + (coords[\"y\"][3][0] - coords[\"y\"][2][0]))\n img = io.imread(old_dir + filename)\n face = img[start_y:end_y, start_x:end_x]\n # Save cropped image\n scipy.misc.imsave(new_dir + name + \".png\", face)",
"def Convert_files(data_dir, annotations_dir):\n\n for root, dirs, files in os.walk(data_dir, topdown=False):\n dirs = sorted(dirs)\n for dir_name in dirs:\n\n for dir_root, _, sub_dir_files in os.walk(os.path.join(root + \"/\" + dir_name)):\n sub_dir_files = sorted(sub_dir_files)\n for filename in sub_dir_files:\n if 'prefix' not in filename.split('_'):\n rename_file = dir_root + \"/\" + filename\n new_name = dir_root + \"/\" + \"prefix_\" + filename\n _, ext = os.path.splitext(filename)\n if ext == \".ppm\":\n os.rename(rename_file, new_name)\n i = cv2.imread(new_name)\n cv2.imwrite((new_name.strip(\".ppm\") + \".jpg\"), i)\n print(\"files converted\")\n else:\n os.rename(rename_file, new_name)\n new_file = annotations_dir + '/' + 'prefix_' + filename\n os.replace(new_name, new_file)\n # os.remove(rename_file)\n\n\n return print(\"All files have been converted, Renamed and moved succesfully\")",
"def combineImage(dir_name, out, count):\n try:\n # Read all images into a list\n images = [cv2.imread(f\"{dir_name}/{out}{i}.png\") for i in range(count)]\n stitched = cv2.vconcat(images)\n cv2.imwrite(f\"{out}.png\", stitched)\n print(f\"Saved combined image at {out}.png\")\n return \n except Exception as e:\n # Yes yes, terrible exception handling, gimme a break. :)\n print(e)",
"def image_augment(path_in):\n\n print('Starting image augmentation')\n\n # get the classes\n folders = os.listdir(path_in)\n\n # uses the path_in and walks in folders to square crop and reduce image\n for folder in folders:\n print('----{}'.format(folder))\n lst = os.listdir(path_in + os.sep + folder)\n\n for file in lst:\n # open the image:\n # open image\n ori = Image.open(path_in + os.sep + folder + os.sep + file)\n\n # save rotated versions of original image:\n img = ori.rotate(90)\n img.save(path_in + os.sep + folder + os.sep + 'aug_rot090_' + file)\n img = ori.rotate(180)\n img.save(path_in + os.sep + folder + os.sep + 'aug_rot180_' + file)\n img = ori.rotate(270)\n img.save(path_in + os.sep + folder + os.sep + 'aug_rot270_' + file)\n\n img = ori.transpose(Image.FLIP_TOP_BOTTOM)\n img.save(path_in + os.sep + folder + os.sep + 'aug_tb_rot000_' + file)\n img = ori.transpose(Image.FLIP_TOP_BOTTOM).rotate(90)\n img.save(path_in + os.sep + folder + os.sep + 'aug_tb_rot090_' + file)\n img = ori.transpose(Image.FLIP_TOP_BOTTOM).rotate(180)\n img.save(path_in + os.sep + folder + os.sep + 'aug_tb_rot180_' + file)\n img = ori.transpose(Image.FLIP_TOP_BOTTOM).rotate(270)\n img.save(path_in + os.sep + folder + os.sep + 'aug_tb_rot270_' + file)\n\n print('Image augmentation complete\\n')",
"def save_images(figs, save_path):\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n for fig in figs:\n filename = fig.layout.title.text.lower().replace(' ','_')\n file = save_path + '/' + filename + '.webp'\n\n fig.write_image(file)\n\n im = Image.open(file)\n im.show()",
"def saveImage(in_path, out_path, thresh):\r\n if os.path.exists(in_path):\r\n for img in os.listdir(in_path):\r\n temp = in_path + '/' + img\r\n img_pic = Image.open(temp)\r\n img_bir = grey_to_binary(img_pic, thresh)\r\n f, e = os.path.splitext(img)\r\n outname = f + \".jpg\"\r\n final_out_path = out_path + outname\r\n# print(final_out_path)\r\n try:\r\n img_bir.save(final_out_path)\r\n except IOError:\r\n print(\"Fail to save the file\")\r\n print(\"Binary pictures are successfully saved\")\r\n else:\r\n print(\"Input path is not exist.\")",
"def flip(self, image):\n enable = False\n dst = image\n if enable:\n dst = cv2.flip(image, randomint(0,1))\n return dst"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Executes dropEvent when taegetlocation is within first row
|
def dropEvent(self, evt):
#TODO: self.indexAt(index).row()==0 insted
if self.header().sectionSize(0)>evt.pos().x():#Only execute drop when on first column
QTreeView.dropEvent(self, evt)
|
[
"def at_drop(self, dropper):\r\n pass",
"def dropEvent(self, event):\n # copied from DocTree but not implemented yet\n # dragitem = self.selectedItems()[0]\n # dragparent = dragitem.parent()\n # dropitem = self.itemAt(event.pos())\n # if not dropitem:\n # # ## event.ignore()\n # return\n # qtw.QTreeWidget.dropEvent(self, event)\n # count = self.topLevelItemCount()\n # if count > 1:\n # for ix in range(count):\n # if self.topLevelItem(ix) == dragitem:\n # self.takeTopLevelItem(ix)\n # self.oldparent.insertChild(self.oldpos, dragitem)\n # self.setCurrentItem(dragitem)\n # break\n # return\n # self.parent.set_project_dirty(True)\n # self.setCurrentItem(dragitem)\n # dropitem.setExpanded(True)\n super().dropEvent(event)",
"def dropEvent(self, event):\n clickedNode = self.indexAt(event.pos()).internalPointer()\n if clickedNode:\n super().dropEvent(event)\n self.expand(clickedNode.index())\n self.selectionModel().selectNode(clickedNode)\n else:\n # avoid removal of \"moved\" items\n event.setDropAction(QtCore.Qt.CopyAction)\n event.ignore()",
"def on_drop(self):\n print(Fore.MAGENTA, f\"\\nYou have dropped {self.name} item!\")\n print()",
"def check_drop_down_in_scroller(scroller,pos):\n global is_draging\n global blocks\n is_draging=False #Block is being dropped down\n for block in blocks:\n if block.clicked: #Searching the clicker blocs\n block.clicked = False \n target_not_found = True # If no target snap point is found, the block is droppped\n for line in scroller.blocks:\n for snap in line: \n #Looking at all blocks in the scroller\n if snap.rect.collidepoint(scroller.global_coord_to_local(pos)):\n if isinstance(snap,GUI.models.Blocks.SNAP_BLOCK): #If the block is a snap point and the cursor is over it\n scroller.replace(snap,block)\n elif snap.is_movable:\n scroller.insert(snap,block)\n blocks.remove(block) # Replacing the snap point with the block and removing the block from the main window\n block.write_pos(add_tuple(scroller.global_coord_to_local(pos),(-block.width//2,-block.height//2)))\n #Updating the block coordinates to account for the changing frame of reference\n target_not_found = False\n return\n if target_not_found: #If the block was not dropped over a snap point\n blocks.remove(block) # The block is deleted",
"def __dropEventCallback(self, dropEvent, droppedItems, parentItem,\n childItemIndex):\n topLevelLocationPaths = self.__sceneGraphView.getTopLevelLocations()\n for droppedItem in droppedItems:\n locationPath = droppedItem.getLocationPath()\n if not locationPath in topLevelLocationPaths:\n self.__sceneGraphView.addTopLevelLocation(locationPath)\n dropEvent.accept()",
"def drop(self):\n\n if self._draggee is not None:\n messager.send('do drop',self._draggee)\n self._draggee.drop()\n self._draggee = None",
"def drag_start(self, event):\n # record the item and its location\n self._drag_data[\"item\"] = self.c.find_closest(event.x, event.y)[0]\n self._drag_data[\"x\"] = event.x\n self._drag_data[\"y\"] = event.y",
"def dropEvent(self, e):\n if isinstance(e.mimeData(), DataIndexMime):\n indices = e.mimeData().getDataIndices()\n for index in indices:\n self.setText(self.datatree.getItem(index).name)\n else:\n super(DropLineEdit, self).dropEvent(e)",
"def windowDropSlot(self, dropPos):\n self.onDropSignal.emit(self.objectName(), dropPos)",
"def dropEvent(self, e):\r\n\t\tif e.mimeData().hasUrls:\r\n\t\t\te.setDropAction(QtCore.Qt.CopyAction)\r\n\t\t\te.accept()\r\n\t\t\tfor url in e.mimeData().urls():\r\n\t\t\t\t# # Workaround for macOS dragging and dropping\r\n\t\t\t\t# if os.environ['IC_RUNNING_OS'] == \"MacOS\":\r\n\t\t\t\t# \tfname = str(NSURL.URLWithString_(str(url.toString())).filePathURL().path())\r\n\t\t\t\t# else:\r\n\t\t\t\t# \tfname = str(url.toLocalFile())\r\n\t\t\t\tfname = str(url.toLocalFile())\r\n\r\n\t\t\t#self.fname = fname\r\n\t\t\t#verbose.print_(\"Dropped '%s' on to window.\" %fname)\r\n\t\t\tprint(\"Dropped '%s' on to window.\" %fname)\r\n\t\t\tif os.path.isdir(fname):\r\n\t\t\t\t#self.renderPath = fname\r\n\t\t\t\tself.ui.path_lineEdit.setText(fname)\r\n\t\t\t\t#self.renderTableUpdate()\r\n\t\t\t\t#self.updateTaskListDir(fname)\r\n\t\t\telif os.path.isfile(fname):\r\n\t\t\t\tprint(\"File\")\r\n\t\t\t\t#self.updateTaskListFile(fname)\r\n\t\telse:\r\n\t\t\te.ignore()",
"def maybe_queue_drag() -> None:\n pass",
"def cause_dropped_item_gravity(self):\n for drop in self.drops_list:\n if drop.changey == 0:\n drop.changey = -self.base_y_gravity\n else:\n drop.changey -= self.gravity_acceleration\n\n drop.movey(drop.changey)\n hit_list = pygame.sprite.spritecollide(drop, self.block_list, False)\n for block in hit_list:\n if drop.changey > 0:\n drop.rect.bottom = block.rect.top\n elif drop.changey < 0:\n drop.rect.top = block.rect.bottom\n drop.changey = 0",
"def mousedown(self, event):\n self.deltax = event.x - (self.x + self.width // 2)\n self.deltay = event.y - (self.y + self.height // 2)\n if abs(self.deltax) < 50 and abs(self.deltay) < 50:\n self.dragging = True\n # only drag one bunny at a time - consume the event\n event.consumed = True",
"def drag_leave(self):\n return \"ignore\"",
"def test_drag_drop_item(self):\n create_form = self.bank.get_item_form_for_create([DRAG_DROP_ITEM_RECORD])\n create_form.display_name = \"Drag and Drop Test Item\"\n create_form.description = \"Item for testing Drag and Drop\"\n test_item = self.bank.create_item(create_form)\n\n create_form = self.bank.get_question_form_for_create(test_item.ident, [MULTI_LANG_DRAG_DROP_QUESTION_RECORD])\n droppable_1 = create_form.add_droppable(get_display_text('This is a Droppable'), 'Droppable 1')\n droppable_2 = create_form.add_droppable(get_display_text('This is another Droppable'), 'Droppable 2')\n droppable_3 = create_form.add_droppable(get_display_text('This is an unused Droppable'), 'Droppable 3')\n target = create_form.add_target(get_display_text('This is the Target'))\n corner_coordinate_10 = BasicCoordinate([10, 10])\n corner_coordinate_12 = BasicCoordinate([20, 20])\n corner_coordinate_20 = BasicCoordinate([50, 50])\n corner_coordinate_22 = BasicCoordinate([70, 70])\n corner_coordinate_30 = BasicCoordinate([68, 68])\n spatial_unit_1 = RectangularSpatialUnit(coordinate=corner_coordinate_10, width=10, height=10)\n spatial_unit_2 = RectangularSpatialUnit(coordinate=corner_coordinate_20, width=20, height=20)\n zone_A = create_form.add_zone(\n spatial_unit_1,\n target['id'],\n 'Zone A')\n zone_B = create_form.add_zone(\n spatial_unit_2,\n target['id'],\n 'Zone B')\n question = self.bank.create_question(create_form)\n # print question.object_map\n # print zone_A\n\n create_form = self.bank.get_answer_form_for_create(test_item.ident, [DRAG_AND_DROP_ANSWER_RECORD])\n create_form.add_zone_condition(droppable_1['id'], zone_A['id'])\n create_form.add_zone_condition(droppable_2['id'], zone_B['id'])\n correct_answer = self.bank.create_answer(create_form)\n # print correct_answer.object_map\n test_item = self.bank.get_item(test_item.ident)\n\n create_form = self.bank.get_item_form_for_create([DRAG_DROP_ITEM_RECORD])\n create_form.display_name = \"Drag and Drop Item for Testing Response\"\n create_form.description = \"Item for testing Drag and Drop Response Evaluation\"\n response_item = self.bank.create_item(create_form)\n\n # Create correct answer for response testing\n create_form = self.bank.get_answer_form_for_create(response_item.ident, [DRAG_AND_DROP_ANSWER_RECORD])\n create_form.add_coordinate_condition(droppable_1['id'], target['id'], BasicCoordinate([11, 18]))\n create_form.add_coordinate_condition(droppable_2['id'], target['id'], BasicCoordinate([55, 60]))\n correct_response = self.bank.create_answer(create_form)\n\n # Create incorrect answers for response testing\n create_form = self.bank.get_answer_form_for_create(response_item.ident, [DRAG_AND_DROP_ANSWER_RECORD])\n create_form.add_coordinate_condition(droppable_1['id'], target['id'], BasicCoordinate([10, 18]))\n incorrect_response_1 = self.bank.create_answer(create_form)\n\n create_form = self.bank.get_answer_form_for_create(response_item.ident, [DRAG_AND_DROP_ANSWER_RECORD])\n create_form.add_coordinate_condition(droppable_3['id'], target['id'], BasicCoordinate([12, 18]))\n incorrect_response_2 = self.bank.create_answer(create_form)\n\n create_form = self.bank.get_answer_form_for_create(response_item.ident, [DRAG_AND_DROP_ANSWER_RECORD])\n create_form.add_coordinate_condition(droppable_1['id'], target['id'], BasicCoordinate([11, 18]))\n create_form.add_coordinate_condition(droppable_2['id'], target['id'], BasicCoordinate([55, 60]))\n create_form.add_coordinate_condition(droppable_3['id'], target['id'], BasicCoordinate([12, 18]))\n incorrect_response_3 = self.bank.create_answer(create_form)\n\n response_item = self.bank.get_item(response_item.ident)\n\n assert BasicCoordinate([11, 18]) in spatial_unit_1\n assert BasicCoordinate([55, 60]) in spatial_unit_2\n\n assert test_item.is_response_correct(correct_response)\n assert not test_item.is_response_correct(incorrect_response_1)\n assert not test_item.is_response_correct(incorrect_response_2)\n assert not test_item.is_response_correct(incorrect_response_3)",
"def __dragMoveEventCallback(self, dragMoveEvent, draggedItems, parentItem,\n childItemIndex):\n topLevelLocationPaths = self.__sceneGraphView.getTopLevelLocations()\n if parentItem is None and childItemIndex >= len(topLevelLocationPaths):\n for draggedItem in draggedItems:\n locationPath = draggedItem.getLocationPath()\n if not locationPath in topLevelLocationPaths:\n dragMoveEvent.accept()\n break",
"def onMouseReleaseMiddle(self,event):\r\n #index = self.indexAt(event.pos())\r\n #print(index.row(),index.column())\r\n print(\"AbstractTableView: Release Middle event\")",
"def drag_existing_select_box(self):\n select_tool_move_end_pos = pygame.mouse.get_pos()\n dx = select_tool_move_end_pos[0] - self.select_tool_move_start_pos[0]\n dy = select_tool_move_end_pos[1] - self.select_tool_move_start_pos[1]\n for cell in self.select_tool_cells_selected: # move all the selected drawing cells\n cell.move(dx, dy)\n self.select_tool_rect = self.select_tool_rect.move(dx, dy) # move the select box itself\n self.select_tool_move_start_pos = select_tool_move_end_pos"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all n grams from l after normalizing.
|
def ngram(n, l):
filtered = normalize(l)
for start in range(0, len(filtered) - n + 1):
yield ''.join(filtered[start:start + n])
|
[
"def get_all_ngrams(self):\n flattened_list = [y for x in self.model.values() for y in x]\n print(f'This is every possible n gram {flattened_list}')\n return flattened_list\n pass",
"def list2ngrams(l, size):\n\tif size >= len(l):\n\t\treturn [tuple(l)]\n\treturn [tuple(l[i:i+size]) for i in range(len(l)-size+1)]",
"def get_ngrams(self,list_of_tokens, min_n, max_n):\n all_ngrams = list()\n for tokens in list_of_tokens:\n n_tokens = len(tokens)\n for i in range(n_tokens):\n for j in range(i + min_n, min(n_tokens, i + max_n) + 1):\n all_ngrams.append(\" \".join(tokens[i:j]))\n return all_ngrams",
"def ngram_perm_norm(score, n, xs):\n # dynamic programming solution\n start = (HALT,) * (n - 1)\n result = ngram_perm_norm_from((score, n, start, pbag(xs)))\n rfutils.get_cache(ngram_perm_norm_from).clear()\n return result",
"def ngram_slices(i, n, l):\n out = []\n\n a = i - n + 1\n if a < 0:\n a = 0\n\n b = i + 1\n if b + n > l:\n b = l - n + 1\n\n d = b - a\n\n for k in range(d):\n start = a + k\n stop = start + n\n out.append(slice(start, stop))\n\n return out",
"def chunks(l, n):\n o = int(np.round(len(l)/n))\n out = []\n # For item i in a range that is a length of l,\n for i in range(0, n):\n # Create an index range for l of n items:\n if i == n-1:\n sub = l[i*o:]\n else:\n sub = l[i*o:i*o+o]\n \n if len(sub):\n out.append(sub)\n return out",
"def normSumList(L, normalizeTo=1.0):\n return [float(i)/normalizeTo for i in [float(i)/sum(L) for i in L]]",
"def group(iterable, n):\n groups = []\n\n if isinstance(iterable, types.GeneratorType):\n iterable = list(iterable)\n\n num_groups = ceil(len(iterable) / n)\n iterable = iter(iterable)\n\n while len(groups) < num_groups:\n groups.append(list(islice(iterable, 0, n)))\n\n return groups",
"def splitEvenly(n, l):\n for i in range(0, len(l), n):\n yield l[i : i+n]",
"def groupsizes(total, len):\n if len == 1:\n yield (total,)\n else:\n for i in range(1, total - len + 1 + 1):\n for perm in groupsizes(total - i, len - 1):\n yield (i,) + perm",
"def chunks(l, n):\n \n if n<1:\n n=1\n return [l[i:i+n] for i in range(0, len(l), n)]",
"def generate_possible_freqL(pL,aL):",
"def get_n_grams(s, n):\n n_grams = []\n for i in range(0, len(s) - n + 1):\n n_grams.append(s[i:i+n])\n return n_grams",
"def grams(tonnes=None):\n g = tonnes * 1*10**(6)\n return g",
"def normalize_occurrences(occurences, n_docs):\n normalized = []\n tot_occurrences = 0\n for gram in occurences:\n tot_occurrences += gram[1]['occurrences']\n data = (gram[0], len(gram[1]['docs'])/n_docs, gram[1]['occurrences'], gram[1]['docs'], False)\n normalized.append(data)\n\n return normalized",
"def mapSqr(L):\n power = 2\n lst = []\n # have to make a new list so old is not mutated\n # cannot do better\n for x in L:\n #lst += [x ** power]\n # faster\n lst.append(x ** power)\n return lst",
"def L(q, g, x):\n return sum([g(q[i])*l(q,i,x) for i in range(len(q))])",
"def uniform_distribution(x_l, x_u, n):\n import numpy as np\n\n # Generate and return n different discrete uniform random sample\n return np.random.random_integers(x_l, x_u, n).tolist()",
"def RS_choices(L,n):\n\t#computes the total number of choices\n\tm = reduce(lambda x,y: x+y,map(len,L))\n\tX = np.zeros((m,n)); Y = np.empty(m)\n\ti = 0; j = 0\n\tfor sigma in L:\n\t\tS = list(range(n))\n\t\tfor i in range(len(sigma)):\n\t\t\tX[j,S] = 1\n\t\t\tassert np.sum(X[j,:])>0\n\t\t\tY[j]=sigma[i]\n\t\t\tS.remove(sigma[i])\n\t\t\tj+=1\n\treturn X,Y.astype(int)",
"def wordsTo5Grams(wordlist):\n grams = []\n for k in range(len(wordlist)-4):\n grams.append(wordlist[k:k+2]+wordlist[k+3:k+5]+wordlist[k+2:k+3])\n return grams"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the average transition prob from l through log_prob_mat.
|
def avg_transition_prob(l, log_prob_mat):
log_prob = 0.0
transition_ct = 0
for a, b in ngram(2, l):
log_prob += log_prob_mat[pos[a]][pos[b]]
transition_ct += 1
# The exponentiation translates from log probs to probs.
return math.exp(log_prob / (transition_ct or 1))
|
[
"def reward_log_prob(target_distribution, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])",
"def gen_aval_prob_log(L):\n s_list = gen_aval_list(L)\n s_range, s_prob = log_bin(s_list,0,1,1.2,'integer')\n return s_prob, s_range",
"def logprob_a(self, X, Xerr):\n if self.V is None or self.mu is None or self.weights is None:\n raise Exception(\"Model parameters not set.\")\n \n return self.GMM.logprob_a(X,Xerr)",
"def logprobchain(ind1, ind2, logA_l):\r\n p = 0\r\n M = len(logA_l)\r\n for m in np.arange(M):\r\n p += logA_l[m][ind1[m], ind2[m]]\r\n\r\n return p",
"def log_likelihood(p):\n\tp_full = np.append(p, [1.0 - sum(p)]) # one parameter for the probability of each review score\n\tprobability_list = binom.pmf(review_frequencies, nbr_reviews, p_full)\n\tlog_probability_sum = np.sum(np.log(probability_list))\n\t\n\tif np.isnan(log_probability_sum):\n\t\treturn -np.inf\n\telse:\n\t\treturn log_probability_sum",
"def log_prob(list):\n p=0\n for i in list:\n p += math.log10(i)\n return math.exp(p)",
"def _logprob(self):\n logp = -0.5 * tf.reduce_sum(self.log_vars)\n logp += -0.5 * tf.reduce_sum(tf.square(self.old_actions - self.means) /\n tf.exp(self.log_vars), axis=-1)\n\n self.logp = logp\n\n logp_old = -0.5 * tf.reduce_sum(self.log_vars)\n logp_old += -0.5 * tf.reduce_sum(tf.square(self.old_actions - self.old_means) /\n tf.exp(self.log_vars), axis=-1)\n self.logp_old = logp_old",
"def penalized_logp_score(mol, alpha=1):\r\n score = reward_penalized_log_p(mol)\r\n return alpha * score",
"def log_probability(self):\n return tf.reduce_sum(self.log_ps, axis=0)",
"def log_prob(self, ts):\n self.k_inv = np.linalg.inv(self.k)\n self.k_det = np.linalg.det(self.k)\n\n # calculate predictions at each time point\n predictors = self.munge(ts, order=self.order)\n predictions = self.a_full.dot(predictors.T)\n truths = ts[self.order:, :].T\n\n log_probs = self.log_prob_mvn(truths, means=predictions, cov_inv=self.k_inv, cov_det=self.k_det)\n return log_probs.sum()",
"def log_prob(self, weights):\n return self.dirichlet.logpdf(weights)",
"def mean_log_likelihood(model, test_data, string=True):\n if len(test_data) == 0:\n return -np.inf\n\n llh = 0.0\n\n for seq in test_data:\n if string:\n seq_llh = model.string_prob(seq, log=True)\n else:\n seq_llh = model.prefix_prob(seq, log=True)\n\n llh += seq_llh\n\n return llh / len(test_data)",
"def log_prob(sentence, LM, smoothing=False, delta=0, vocabSize=0):\n\t\n # Convert the sentence into an array form, including SENTSTART and SENTEND\n # Set up other variables before iterating\n log_prob = 0\n sentence = sentence.strip()\n sentence_array = [\"SENTSTART\"]\n sentence_array += sentence.split(' ')\n sentence_array += [\"SENTEND\"]\n prev_word = None\n \n if smoothing:\n # Delta-smoothing estimate of the sentence\n # P(wt|wt−1;δ,‖V‖) = (Count(wt−1,wt) +δ) / (Count(wt−1) +δ‖V‖).\n for word in sentence_array:\n if prev_word != None:\n num = 0\n den = 0\n if prev_word in LM['bi'] and word in LM['bi'][prev_word]: num = LM['bi'][prev_word][word]\n if prev_word in LM['uni']: den = LM['uni'][prev_word]\n log_prob += log((num + delta)/(den + delta*vocabSize), 2)\n prev_word = word\n else:\n # MLE of the sentence\n # P(wt|wt-1) = Count(wt−1,wt)/Count(wt−1)\n for word in sentence_array:\n if prev_word != None:\n if prev_word not in LM['uni'] or prev_word not in LM['bi'] or word not in LM['bi'][prev_word]:\n return float('-inf')\n log_prob += log(LM['bi'][prev_word][word]/LM['uni'][prev_word], 2)\n prev_word = word\n \n return log_prob",
"def log_p(observed_data: torch.FloatTensor,\n log_alpha: torch.FloatTensor) -> torch.FloatTensor:\n alpha = log_alpha.exp()\n return ((torch.log(observed_data) * (alpha - 1.0)).sum(-1) +\n torch.lgamma(alpha.sum(-1)) -\n torch.lgamma(alpha).sum(-1))",
"def score(self, X, Xerr):\n if self.V is None or self.mu is None or self.weights is None:\n raise Exception(\"Model parameters not set.\")\n \n logprob = self.GMM.logprob_a(X,Xerr)\n logLs = logsumexp(logprob,axis=-1)\n return np.mean(logLs)",
"def logprob(hmm, x):\n if isinstance(hmm, HMM):\n hmm = [hmm]\n if isinstance(hmm, list) and isinstance(hmm[0], HMM):\n n_objs = len(hmm)\n n_samples, n_features = x.shape\n logP = np.zeros((n_objs))\n for i in range(0, n_objs):\n logp_act = 0\n pX, logS = hmm[i].output_distr[0].prob(x, hmm[i].output_distr)\n alpha_hat, c = hmm[i].state_gen.forward(pX)\n # compute true probability with scale factor\n if np.isscalar(logS):\n logS = np.tile(logS, (n_samples))\n for j in range(0, n_samples):\n logp_act += np.log(c[j]) + logS[j]\n if len(c) == n_samples:\n # ln(c_0) + .. + ln(c_{T-1})\n logP[i] = logp_act\n else:\n logP[i] = logp_act + np.log(c[-1]) # c[-1] is not scaled\n else:\n raise ValueError(\"The first input must be an hmm object or a list of hmm objects\")\n return logP",
"def _logit_avg_expit(t):\n log_avg_prob = (\n tf.reduce_logsumexp(-tf.nn.softplus(-t), axis=0) -\n tf.math.log(tf.cast(tf.shape(t)[0], t.dtype)))\n return log_avg_prob - tf.math.log1p(-tf.exp(log_avg_prob))",
"def profile_multinomial_nll(true_profs, log_pred_profs, true_counts):\n num_samples = true_profs.shape[0]\n num_tasks = true_profs.shape[1]\n\n # Swap axes on profiles to make them N x T x 2 x O\n true_profs = np.swapaxes(true_profs, 2, 3)\n log_pred_profs = np.swapaxes(log_pred_profs, 2, 3)\n\n nll = -multinomial_log_probs(log_pred_profs, true_counts, true_profs)\n return np.mean(nll, axis=2) # Average strands",
"def rmse_log(p, gt):\n m = np.shape(p)[0]\n rmse_log = list()\n for i in range(m):\n rmse_log.append(np.sqrt(np.mean(np.square(np.log(p[i]) - np.log(gt[i])))))\n return rmse_log",
"def file_log_prob(file: Path, lm: LanguageModel, prior_prob: float) -> float:\n log_prob = 0.0\n for (x, y, z) in read_trigrams(file, lm.vocab):\n prob = lm.prob(x, y, z) * prior_prob # p(z | xy)\n log_prob += math.log(prob)\n return log_prob"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
For each student response, return the feedback, if any
|
def check_all_errors(student_resp_list, expected_resp_list):
all_errors = [] # list that will hold all the feedback
for student_resp, expected_resp in zip(student_resp_list, expected_resp_list):
if student_resp == "" or student_resp is None:
return "Nothing entered"
if "\n" in student_resp:
student_resp = student_resp.replace('\n', ' ')
error = False
text = []
# check for gibberish (used currently to ensure english is written and not other langauges)
if (avg_transition_prob(student_resp, model_mat) < threshold):
text.append("The sentence is not fully in English.")
error = True
# check for puntuation
# 1. ensure that the response is not empty
# 2. only check puntation for responses longer than 1 word
# 3. check the end of the response to see if it has a puntation
if len(student_resp) > 0 and len(student_resp.split()) > 1 and student_resp[-1] not in string.punctuation:
text.append("The sentence is not properly punctuated.")
error = True
matches = tool.check(student_resp)
if len(matches) > 0:
for m in matches:
msg = bytes(m.msg, 'utf-8').decode('utf-8', 'ignore')
# if the stundet's sentence does not start with an uppercase
# letter but the expected response does not start with an
# uppercase letter, DO NOT flag it
if msg == "This sentence does not start with an uppercase letter" and expected_resp[0].islower():
continue
text.append(msg)
error = True
if not error:
text.append("NO ERRORS!!")
all_errors.append({"feedback": text})
return all_errors
|
[
"def response_strings(self):\n for response in self.responses:\n yield string.join([\n self.student.student_id,\n response.item.item_id,\n str(response.time_taken),\n str(response.correct),\n ],\n ',')",
"def check_response(self, answer, student_input, **kwargs):\n # Unpack the given answer\n answers = answer['expect'] # The list of answers\n msg = answer['msg']\n grade_decimal = answer['grade_decimal']\n\n # Split the student response\n student_list = student_input.split(self.config['delimiter'])\n\n # Check for the wrong number of entries\n # This is done before empty entries, as this is the preferred error message\n # if both apply.\n if self.config['length_error'] and len(answers) != len(student_list):\n msg = 'List length error: Expected {} terms in the list, but received {}. ' + \\\n 'Separate items with character \"{}\"'\n raise MissingInput(msg.format(len(answers),\n len(student_list),\n self.config['delimiter']))\n\n # Check for empty entries in the list\n if self.config['missing_error']:\n bad_items = [idx+1 for (idx, item) in enumerate(student_list)\n if item.strip() == '']\n if bad_items:\n if len(bad_items) == 1:\n msg = 'List error: Empty entry detected in position '\n else:\n msg = 'List error: Empty entries detected in positions '\n msg += ', '.join(map(str, bad_items))\n raise MissingInput(msg)\n\n # We need to keep track of missing and extra answers.\n # Idea is:\n # use _AutomaticFailure to pad expect and answers to equal length\n # modify check to reject _AutomaticFailure\n pad_ans, pad_stud = get_padded_lists(answers, student_list)\n # Modify the check function to deal with the padding\n checker = padded_check(self.config['subgrader'].check)\n\n # Compute the results\n if self.config['ordered']:\n grade_list = [checker(*pair) for pair in zip(pad_ans, pad_stud)]\n else:\n grade_list = find_optimal_order(checker, pad_ans, pad_stud)\n\n # Convert the list of grades into the SingleListGrader result\n return self.process_grade_list(grade_list, len(answers), msg, grade_decimal)",
"def tag_responses(self):\n module_logger.info('------ Tag student responses')\n for dutch_prompt in self.__student_responses__:\n for res in self.__student_responses__[dutch_prompt]:\n response_text = res['response']\n response_tokens = nltk_helper.tokenize_sentence(response_text)\n response_pos = nltk_helper.tag_tokens(response_tokens)\n res['tokens'] = response_tokens\n res['pos'] = response_pos",
"def get_feedback(self, uid, rec_list):\n feedback = list()\n for iid, prob in rec_list:\n feedback.append((iid, random.randint(0, 1)))\n\n return feedback",
"def collect_correct_responses(self):\n module_logger.info('------ Collect correct responses')\n correct_responses = {}\n collected_count = 0\n for dutch_prompt in self.__student_responses__:\n for res in self.__student_responses__[dutch_prompt]:\n if res['language'] == 'correct' and res['meaning'] == 'correct':\n if dutch_prompt not in correct_responses:\n correct_responses[dutch_prompt] = set()\n correct_responses[dutch_prompt].add(res['response'])\n collected_count += 1\n module_logger.info('------ Correct responses ::: {}'.format(collected_count))\n return correct_responses",
"def get_attempt_stats(quiz, response):\n total_marks = 0\n correct_answer = 0\n incorrect_answer = 0\n total_number = Question.objects.filter(quiz=quiz, published=True).count()\n response_data = response.get_response()\n\n for qid in response_data:\n try:\n question = Question.objects.get(id=int(qid))\n except Question.DoesNotExists:\n # there might be other kind of data in response_data we don't care about\n continue\n question_type = QUESTION_TYPE[question.question_type]\n marks = question_type.get_marks(question, extract_response(response_data, qid))\n total_marks += marks\n if marks > 0:\n correct_answer += 1\n else:\n incorrect_answer += 1\n grade = round(total_marks / db.get_quiz_total_marks(quiz), 2)\n unanswered = total_number - (correct_answer + incorrect_answer)\n if quiz.quizsettings.showAnswersAfterAttempt:\n # Student allowed to see answer and hence the grade after attending quiz\n return dict(total_grade=grade, correct=correct_answer, incorrect=incorrect_answer, \n unanswered=unanswered, total_questions=total_number, showAnswer=True)\n return dict(total_grade='Shown after exam ends', unanswered=unanswered, total_questions=total_number, showAnswer=False)",
"def get_feedback(self, question_key, answer_key):\n\t\tprint \"QN %s\" % question_key\t\n\t\tfor question in self.structure['questions']:\n\t\t\tprint question['key']\n\t\t\tif question['key'] == question_key:\n\t\t\t\tfor answer in question['answers']:\n\t\t\t\t\tif answer[\"key\"] == answer_key:\n\t\t\t\t\t\treturn answer['feedback']\n\t\treturn \"fix me\"",
"def parse_feedback(request):\n\n result = request[\"queryResult\"]\n parameters = result[\"parameters\"]\n output_context = get_output_context(request, \"build-followup\")\n\n feedback = {}\n feedback[\"original_intent\"] = output_context[\"parameters\"][\"inputText\"]\n feedback[\"nile_intent\"] = output_context[\"parameters\"][\"intent\"]\n feedback[\"entity\"] = parameters[\"entity\"]\n feedback[\"value\"] = parameters[\"any\"]\n\n return feedback",
"def view_students(request, pk):\n student_rows = []\n total_completed_templates = 0\n assignment = Assignment.objects.get(pk=pk)\n students = Student.objects.filter(assignments=assignment)\n templates = ConversationTemplate.objects.filter(assignments=assignment)\n assigned_template_count = ConversationTemplate.objects.filter(assignments=assignment).count()\n\n # per student, count number of templates in assignment they have at least one submission for\n # as well as getting name and email. One student per row for table.\n for student in students:\n completed_template_count = TemplateResponse.objects.exclude(completion_date=None) \\\n .filter(assignment=assignment,\n template__in=templates,\n student=student) \\\n .values('template') \\\n .distinct().count()\n if completed_template_count > assigned_template_count:\n completed_template_count = assigned_template_count\n if completed_template_count < 0:\n completed_template_count = 0\n total_completed_templates = total_completed_templates + completed_template_count\n\n row_data = {}\n row_data.update({'id': student.id,\n 'name': student.first_name + ' ' + student.last_name,\n 'email_address': student.email,\n 'templates_completed': str(completed_template_count) + '/' + str(assigned_template_count)})\n student_rows.append(row_data)\n assigned_students_table = AssignedStudentsTable(student_rows)\n\n total_assigned_templates = ConversationTemplate.objects.filter(assignments=assignment).count() * \\\n assignment.students.count()\n if total_completed_templates > total_assigned_templates:\n total_completed_templates = total_assigned_templates\n if total_completed_templates < 0:\n total_completed_templates = 0\n if total_assigned_templates <= 0:\n completion_string = 'No students were given this assignment.'\n else:\n completion_percent = total_completed_templates / total_assigned_templates\n completion_percent = str(completion_percent * 100).split('.', 1)[0] + '%'\n completion_string = completion_percent + ' of assigned templates have been completed at least once.'\n return render(request, 'assignment_management/view_students_modal.html', {'table': assigned_students_table,\n 'completion_string': completion_string})",
"def feedback(request, user_id):\n request.session.set_expiry(1)\n user = UserResponse.objects.filter(response_id=user_id).get()\n quiz = user.parent_quiz\n\n # Get session variables for rendering\n quiz_data = user.response_data['quiz_data']\n norm_scores_dict = user.response_data['quiz_norm_scores']\n feed_dict = user.response_data['feedback_data']\n number_of_sections = 2\n sorted_scores_limited = {k: v\n for k, v in sorted(norm_scores_dict.items(), key=lambda item: item[1])[:number_of_sections]\n }\n\n return render(request, 'quizzes/feedback.html', {\n 'quiz': quiz,\n 'feed_dict': feed_dict,\n 'norm_scores': norm_scores_dict,\n 'sorted_scores_limit': sorted_scores_limited,\n 'user_id': user_id\n })",
"def feedback(self):\n if self._result is not None:\n return self._feedback\n else:\n self.run_test()\n return self._feedback",
"def _eqTest(self, correct, student, message):\n if correct == student:\n return 1\n else:\n self.feedback += message\n self.feedback += \"\\n\\tCorrect response: %s\"%correct\n self.feedback += \"\\n\\tStudent response: %s\"%student\n return 0",
"def feedback(self):\n if self._result is None:\n self.run_tests()\n return self._feedback",
"def use_transcripts(self):\n module_logger.info('------ Use transcript to replace provided response text')\n for dutch_prompt in self.__student_responses__:\n for res in self.__student_responses__[dutch_prompt]:\n res['response'] = res['transcript']",
"def inspect_responses(self, pid, response_list):\n if not len(response_list):\n print(f'No data for participant {pid}')\n return\n\n last_response_answer_set, last_authored, last_response_type = (None, None, None)\n last_position = 0\n answer_hashes = [r['answer_hash'] for r in response_list]\n has_completed_survey = False # Track if/when a COMPLETE survey response is detected\n\n for curr_position in range(len(response_list)):\n curr_response = response_list[curr_position]\n curr_authored, curr_response_type, curr_rsp_id = (curr_response.get('authored', None),\n curr_response.get('payload_type', None),\n curr_response.get('questionnaire_response_id', None))\n # Flag indeterminate ordering for two payloads w/ identical authored timestamps but different classification\n if last_authored and last_authored == curr_authored and last_response_type != curr_response_type:\n curr_response['reason'] = 'Same authored ts as last payload (indeterminate order)'\n\n if curr_response_type == QuestionnaireResponseClassificationType.COMPLETE:\n # Notable if more than one COMPLETED survey is encountered, or if the first COMPLETE survey was\n # not the first response in the participant's history. Does not impact classification\n if has_completed_survey:\n response_list[curr_position]['reason'] = ' '.join([response_list[curr_position]['reason'],\n 'Multiple complete survey payloads'])\n elif curr_position > 0:\n response_list[curr_position]['reason'] = ' '.join([response_list[curr_position]['reason'],\n 'Partial received before first complete survey'])\n has_completed_survey = True\n\n answers = curr_response.get('answers')\n # Some outlier cases where the payload had a FHIR doc containing question codes, but no\n # answer data was sent for any of them. See: questionnaire_response_ids 101422823 or 999450910\n # These will be ignored when producing diffs between chronologically adjacent authored responses\n if not answers:\n response_list[curr_position]['payload_type'] = QuestionnaireResponseClassificationType.NO_ANSWER_VALUES\n curr_response_answer_set = None\n else:\n # Sets are used here to enable check for subset/superset relationships between response data\n curr_response_answer_set = set(answers.items())\n if last_response_answer_set is not None:\n # index() will find the first location in the answer_hashes list containing the current response's\n # answer hash. If it doesn't match the current response's position, the current response is\n # a duplicate (in answer content) of the earlier response. Set classification based on whether\n # authored timestamp changed\n matching_hash_idx = answer_hashes.index(curr_response['answer_hash'])\n if matching_hash_idx != curr_position:\n if curr_authored == response_list[matching_hash_idx].get('authored'):\n reclassification = QuestionnaireResponseClassificationType.DUPLICATE\n else:\n reclassification = QuestionnaireResponseClassificationType.AUTHORED_TIME_UPDATED\n\n dup_rsp_id = response_list[matching_hash_idx].get('questionnaire_response_id')\n # Update the current response's classification\n response_list[curr_position]['payload_type'] = reclassification\n response_list[curr_position]['duplicate_of'] = dup_rsp_id\n response_list[curr_position]['reason'] = ' '.join([response_list[curr_position]['reason'],\n 'Duplicate answer hash'])\n\n # Check for the cascading response signature where last/subset is made a dup of current/superset\n elif (curr_response_answer_set and curr_response_answer_set.issuperset(last_response_answer_set)\n and last_position > 0):\n response_list[last_position]['payload_type'] = \\\n QuestionnaireResponseClassificationType.DUPLICATE\n response_list[last_position]['duplicate_of'] = curr_rsp_id\n response_list[last_position]['reason'] = ' '.join([response_list[curr_position-1]['reason'],\n 'Subset of a cascading superset response'])\n\n last_authored = response_list[curr_position]['authored']\n last_response_type = response_list[curr_position]['payload_type']\n last_response_answer_set = curr_response_answer_set\n last_position = curr_position\n\n if not has_completed_survey:\n # Flag the last entry with a note that participant has no full survey\n response_list[-1]['reason'] = ' '.join([response_list[-1]['reason'],\n 'Participant has no COMPLETE survey responses'])\n\n print(f'\\n===============Results for P{pid}====================\\n')\n self.output_response_history(pid, response_list)",
"def response():\n response_list = ['Yes', 'No', 'My sources point to yes', 'Maybe', 'The outcome does not look good',\n \"I can't say for sure\", \"Perhaps\", \"Don't count on it\", \"Everything is blurry... Ask again...\",\n \"The spirits say... Yes\", \"The spirits say... No\", \"Chances are not good\", \"Chances are good\",\n \"I think not\", \"No straight answer...\", \"You can count on it\", \"The outcome looks good\",\n \"My sources point to... No\", \"I think so\", \"The spirits have left... Try again in a moment...\",\n \"If I were you, I would bet on it.\", \"If I were you I wouldn't bet on it.\"]\n return random.choice(response_list)",
"def process_payment_feedback(self, request, response):\n # Note that this may raise an exception if the response is invalid.\n # For example: MissingFieldException, UnexpectedFieldException, ...\n # The code \"above\" should be prepared to deal with it accordingly.\n response.validate()\n\n # Then, we can extract the received data...\n success, status, details = response.process()\n txn_details = self.unpack_details(details)\n\n # ... and record the audit trail.\n self._record_audit_trail(request, status, txn_details)\n\n # ... prepare the feedback data...\n output_data = {\n 'method': Constants.ADYEN,\n 'status': status,\n 'txn_details': details,\n 'ip_address': self._get_origin_ip_address(request),\n }\n\n # ... also provide the \"unpacked\" version for easier consumption...\n output_data.update(txn_details)\n\n # ... and finally return the whole thing.\n return success, status, output_data",
"def getReplyResults():",
"def get_feedback_by_service(self):\n data = []\n responses = self.responses.exclude(service=None)\n by_service = survey_utils.group_responses(responses, 'service.id', 'service')\n for service, service_responses in by_service:\n by_question = survey_utils.group_responses(service_responses, 'question.label')\n responses_by_question = dict(by_question)\n service_data = []\n for label in ['Open Facility', 'Respectful Staff Treatment',\n 'Clean Hospital Materials', 'Charged Fairly']:\n if label in responses_by_question:\n question = self.questions[label]\n question_responses = responses_by_question[label]\n total_responses = len(question_responses)\n answers = [response.response for response in question_responses]\n percentage = survey_utils.analyze(answers, question.primary_answer)\n service_data.append(('{}%'.format(percentage), total_responses))\n else:\n service_data.append((None, 0))\n if 'Wait Time' in responses_by_question:\n wait_times = [r.response for r in responses_by_question['Wait Time']]\n mode = survey_utils.get_mode(\n wait_times, self.questions.get('Wait Time').get_categories())\n service_data.append((mode, len(wait_times)))\n else:\n service_data.append((None, 0))\n data.append((service, service_data))\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a command that requests actor to move entity to a new location.
|
def move(cls, actor, entity, location):
return Command(actor, "move", (entity, location))
|
[
"def move(self, new_location):\n pass",
"def _move_agent(self, agent_id: str, new_pos: Position):\n agent = self.agents[agent_id]\n if self.is_free(new_pos):\n agent.pos = new_pos",
"def move(self, direction, distance):\n\n return self.send('%s %s' % (direction, distance))",
"async def send_move(self, direction: Direction):",
"def move_to_joint_pos_delta(self, cmd):\n self.exec_position_cmd_delta(cmd)",
"def move(self, duration, xstep, ystep): \n \n #self.enableMotors()\n cmd = ('SM,%d,%d,%d\\r' %(duration, xstep, ystep))\n self.doCommand(cmd)\n #self.disableMotors()\n logger.info('Command sent: move x:%d y:%d in steps' % (xstep, ystep))",
"def request_player_move(self, newpos):\n pos = self.player.location.slot\n j, i = newpos\n j0, i0 = self.player.location.slot\n if self.maze.blocktype_at(i, j)['walkable']:\n self.move_player(newpos)\n elif self.maze.blocktype_at(i0, j)['walkable']:\n newpos[1] = i0\n self.move_player(newpos)\n elif self.maze.blocktype_at(i, j0)['walkable']:\n newpos[0] = j0\n self.move_player(newpos)\n self.norm_light = None",
"def move_to(self, ypos, xpos):\n # the screen's co-ordinates are 1 based, but the command is 0 based\n xpos -= 1\n ypos -= 1\n self.exec_command('MoveCursor({0}, {1})'.format(ypos, xpos).encode(\"utf-8\"))",
"def move(self, room):\n self._location = room",
"def move(self, agent: Adventurer, direction: Direction):\n self.last_actions[self.agents.index(agent)] = direction\n agent.pos = self.map.move(agent.pos, direction)\n return self.enter(agent, self.map[agent.pos])",
"def moveTo(target=None, new_id=None):",
"def move(self, action):\n raise NotImplementedError",
"def move(self):\n if self.direction == \"n\":\n self.position = (self.position[0]-1, self.position[1])\n\n elif self.direction == \"s\":\n self.position = (self.position[0]+1, self.position[1])\n\n elif self.direction == \"e\":\n self.position = (self.position[0], self.position[1]+1)\n\n elif self.direction == \"w\":\n self.position = (self.position[0], self.position[1]-1)",
"def exec_position_cmd_delta(self, cmd):\n self.set_joint_positions(self.angles() + cmd, self._joint_ids)",
"def request_move_target(self,idx,loc):\n self.crowd.requestMoveTarget(idx,pyrecast.uintp_getitem(loc[0],0),loc[1])",
"def move(self, tag, direction):\n return _move(self, tag, direction)",
"async def send_move(self, direction: Direction):\n self.move_direction = direction",
"def move_entity(self, entity, x, y, is_player = False):\n old_tile = self.tiles[entity.x][entity.y]\n new_tile = self.tiles[x][y]\n \n old_tile.entity = None\n new_tile.entity = entity\n \n entity.x = x\n entity.y = y\n \n if is_player and new_tile.inventory:\n ui.Screens.msg.add_message(\"You see %s on the ground.\" % new_tile.inventory.indef_name)",
"def mount_move(target, source):\n return mount(source=source, target=target, fs_type=None, mnt_flags=[MS_MOVE])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Divide the ranks into chunks, attempting to have `N` ranks in each chunk. This removes the master (0) rank, such that `N_ranks 1` ranks are available to be grouped
|
def split_ranks(N_ranks, N):
available = list(range(1, N_ranks)) # available ranks to do work
total = len(available)
extra_ranks = total % N
for i in range(total//N):
yield i, available[i*N:(i+1)*N]
if extra_ranks and extra_ranks >= N//2:
remove = extra_ranks % 2 # make it an even number
ranks = available[-extra_ranks:]
if remove: ranks = ranks[:-remove]
if len(ranks):
yield i+1, ranks
|
[
"def testNGroupSplit(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupSplitter(2)\n hs_reversed = NGroupSplitter(2, reverse=True)\n\n for isreversed, splitter in enumerate((hs, hs_reversed)):\n splits = list(splitter(self.data))\n self.failUnless(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.failUnless( len(p) == 2 )\n self.failUnless( p[0].nsamples == 50 )\n self.failUnless( p[1].nsamples == 50 )\n\n self.failUnless((splits[0][1-isreversed].uniquechunks == [0, 1, 2, 3, 4]).all())\n self.failUnless((splits[0][isreversed].uniquechunks == [5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][1-isreversed].uniquechunks == [5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][isreversed].uniquechunks == [0, 1, 2, 3, 4]).all())\n\n # check if it works on pure odd and even chunk ids\n moresplits = list(hs(splits[0][0]))\n\n for split in moresplits:\n self.failUnless(split[0] != None)\n self.failUnless(split[1] != None)\n\n # now test more groups\n s5 = NGroupSplitter(5)\n s5_reversed = NGroupSplitter(5, reverse=True)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5_reversed)):\n splits = list(s5splitter(self.data))\n\n # must have 10 splits\n self.failUnless(len(splits) == 5)\n\n # check split content\n self.failUnless((splits[0][1-isreversed].uniquechunks == [0, 1]).all())\n self.failUnless((splits[0][isreversed].uniquechunks == [2, 3, 4, 5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][1-isreversed].uniquechunks == [2, 3]).all())\n self.failUnless((splits[1][isreversed].uniquechunks == [0, 1, 4, 5, 6, 7, 8, 9]).all())\n # ...\n self.failUnless((splits[4][1-isreversed].uniquechunks == [8, 9]).all())\n self.failUnless((splits[4][isreversed].uniquechunks == [0, 1, 2, 3, 4, 5, 6, 7]).all())\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return [ (train, test) for (train, test) in spl(dat) ]\n s20 = NGroupSplitter(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def _split_into_groups(iterable, group_size):\n for g, group in itertools.groupby(\n enumerate(iterable),\n lambda items: items[0] // group_size\n ):\n yield [item for (i, item) in group]",
"def divide_data(data, n):\n if is_pandas(data):\n return np.array_split(data, n)\n else:\n return nchunks(data, n)",
"def divide_list_in_n_equal_chunks(_list, n):\n for i in range(0, len(_list), n):\n yield _list[i : i + n]",
"def split_into(xs: Collection, n: int) -> Collection:\n\n bucket_size, remainder = divmod(len(xs), n)\n\n # We need one fewer than `n`, since these become split positions.\n relative_splits = np.full(n - 1, bucket_size)\n # e.g. 10 by 3 -> 4, 3, 3\n relative_splits[:remainder] += 1\n\n return split(xs, np.cumsum(relative_splits))",
"def _get_subvol_ids_assigned_to_ranks(nx, ny, nz, nranks):\n ndivs_total = nx * ny * nz\n subvol_ids_assigned_to_rank = np.array_split(np.arange(ndivs_total), nranks)\n return subvol_ids_assigned_to_rank",
"def _chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]",
"def chunks(l, n):\n o = int(np.round(len(l)/n))\n out = []\n # For item i in a range that is a length of l,\n for i in range(0, n):\n # Create an index range for l of n items:\n if i == n-1:\n sub = l[i*o:]\n else:\n sub = l[i*o:i*o+o]\n \n if len(sub):\n out.append(sub)\n return out",
"def shrink_to_ranking_size(boxes, depths, labels, ranking_size):\n sizes = np.array([util.get_box_size(box) for box in boxes])\n sizes_to_take = np.argsort(sizes)[-ranking_size:]\n boxes = boxes[sizes_to_take]\n depths = depths[sizes_to_take]\n labels = labels[sizes_to_take]\n return boxes, depths, labels",
"def groupsizes_to_partition(*gsizes):\n idx = 0\n part = []\n for gs in gsizes:\n l = []\n for i in range(gs):\n l.append(idx)\n idx += 1\n part.append(l)\n return part",
"def max_ranks_per_node(rank_spec):",
"def group(iterable, n):\n groups = []\n\n if isinstance(iterable, types.GeneratorType):\n iterable = list(iterable)\n\n num_groups = ceil(len(iterable) / n)\n iterable = iter(iterable)\n\n while len(groups) < num_groups:\n groups.append(list(islice(iterable, 0, n)))\n\n return groups",
"def chunks_by_element(arr, n):\n return [arr[i:i+n] for i in range(0, len(arr), n)]",
"def split_into_chunks(alist, sizes):\n\n indices = np.cumsum(sizes)\n return np.split(alist, indices[:-1])",
"def chunks(l, n):\n \n if n<1:\n n=1\n return [l[i:i+n] for i in range(0, len(l), n)]",
"def split_boxes_rimwise(boxes, weights, nsplit):\n\tif len(boxes) < nsplit:\n\t\t# If we have fewer tods than processes, just assign one to each, and give empty\n\t\t# ones to the remainder\n\t\treturn [[[i]] for i in range(len(boxes))] + [[[]] for i in range(len(boxes),nsplit)]\n\tweights = np.asarray(weights)\n\t# Divide boxes into N groups with as equal weight as possible,\n\t# and as small bbox as possible\n\tn = len(boxes)\n\tgroups = []\n\t# Compute distance of every point from center. We will\n\t# start consuming points from edges\n\tcenters = np.mean(boxes,1)\n\tcenter_tot = np.mean(centers,0)\n\tcdist = calc_dist2(centers, center_tot[None])\n\ttotweight = np.sum(weights)\n\t# We keep track of which boxes have already been\n\t# processed via a mask.\n\tmask = np.full(n, True, dtype=np.bool)\n\tcumweight = 0\n\tfor gi in xrange(nsplit):\n\t\t# Compute the target weight for this group.\n\t\t# On average this should simply be totweight/nsplit,\n\t\t# but we adjust it on the fly to compensate for any\n\t\t# groups that end up deviating from this.\n\t\ttargweight = (totweight-cumweight)/(nsplit-gi)\n\t\tp = unmask(np.argmax(cdist[mask]),mask)\n\t\tmask[p] = False\n\t\t# Find distance of every point to this point. Ouch, this\n\t\t# makes the algorithm O(N^2) if one doesn't introduce gridding\n\t\tpdist = calc_dist2(centers[mask], centers[p,None])\n\t\tdinds = unmask(np.argsort(pdist),mask)\n\t\tcumw = np.cumsum(weights[dinds])\n\t\t# We will use as many of the closest points as\n\t\t# needed to reach the target weight, but not\n\t\t# so many that there aren't enough points left\n\t\t# for at least one per remaining mpi task.\n\t\tif gi == nsplit-1:\n\t\t\tnsel = None\n\t\telse:\n\t\t\tnsel = len(np.where(cumw < targweight)[0])\n\t\t\tnsel = max(0,min(nsel, np.sum(mask)-(nsplit-gi)))\n\t\tgroup = np.concatenate([[p],dinds[:nsel]])\n\t\tgroups.append([group])\n\t\tmask[group] = False\n\t\tcumweight += np.sum(weights[group])\n\treturn groups",
"def split_into_groups_of(groupsize, thelist):\n\tresult = []\n\n\tfor i in range(0, len(thelist), groupsize):\n\t\tresult.append(thelist[i:i+groupsize])\n\n\treturn result",
"def get_seed_chunks(\n graph: nx.Graph,\n num_chunks: int,\n num_dists: int,\n pop_target: Union[int, float],\n pop_col: str,\n epsilon: float,\n node_repeats: int = 1,\n method: Callable = partial(bipartition_tree_random, max_attempts=10000),\n) -> List[List[int]]:\n num_chunks_left = num_dists // num_chunks\n parts = range(num_chunks)\n new_epsilon = epsilon / (num_chunks_left * num_chunks)\n if num_chunks_left == 1:\n new_epsilon = epsilon\n\n chunk_pop = 0\n for node in graph.node_indices:\n chunk_pop += graph.nodes[node][pop_col]\n\n while True:\n epsilon = abs(epsilon)\n\n flips = {}\n remaining_nodes = set(graph.nodes)\n\n min_pop = pop_target * (1 - new_epsilon) * num_chunks_left\n max_pop = pop_target * (1 + new_epsilon) * num_chunks_left\n\n chunk_pop_target = chunk_pop / num_chunks\n\n diff = min(max_pop - chunk_pop_target, chunk_pop_target - min_pop)\n new_new_epsilon = diff / chunk_pop_target\n\n for i in range(len(parts[:-1])):\n part = parts[i]\n\n nodes = method(\n graph.subgraph(remaining_nodes),\n pop_col=pop_col,\n pop_target=chunk_pop_target,\n epsilon=new_new_epsilon,\n node_repeats=node_repeats,\n )\n\n if nodes is None:\n raise BalanceError()\n\n for node in nodes:\n flips[node] = part\n remaining_nodes -= nodes\n\n # All of the remaining nodes go in the last part\n for node in remaining_nodes:\n flips[node] = parts[-1]\n\n part_pop = 0\n for node in remaining_nodes:\n part_pop += graph.nodes[node][pop_col]\n part_pop_as_dist = part_pop / num_chunks_left\n fake_epsilon = epsilon\n if num_chunks_left != 1:\n fake_epsilon = epsilon / 2\n min_pop_as_dist = pop_target * (1 - fake_epsilon)\n max_pop_as_dist = pop_target * (1 + fake_epsilon)\n\n if part_pop_as_dist < min_pop_as_dist:\n new_epsilon = new_epsilon / 2\n elif part_pop_as_dist > max_pop_as_dist:\n new_epsilon = new_epsilon / 2\n else:\n break\n\n chunks: Dict[Any, List] = {}\n for key in flips.keys():\n if flips[key] not in chunks.keys():\n chunks[flips[key]] = []\n chunks[flips[key]].append(key)\n\n return list(chunks.values())",
"def partitioned(items, n=3):\n max_items_per_bucket, rem = divmod(len(items), n)\n if rem:\n max_items_per_bucket += 1\n bucket = []\n\n for item in items:\n if len(bucket) >= max_items_per_bucket:\n yield bucket\n bucket = []\n bucket.append(item)\n\n yield bucket"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize the `RSDFitDriver` object on all ranks
|
def initialize_driver(self):
# update with first value
itask = 0
# master will parse the args
this_config = None; rsdfit_args = None
if self.comm.rank == 0:
# get the kwargs
kwargs = self.task_kwargs(itask)
# this writes out the param file
with self.command.update(kwargs, self.formatter) as command:
this_config = command.param_file
self.logger.debug("creating temporary file: %s" %this_config)
rsdfit_args = command.args
# bcast the file name to all in the worker pool
this_config = self.comm.bcast(this_config, root=0)
rsdfit_args = self.comm.bcast(rsdfit_args, root=0)
self.temp_config = this_config
# get the args
self.logger.debug("calling rsdfit with arguments: %s" %str(rsdfit_args))
args = None
if self.comm.size > 1:
if self.comm.rank == 0:
args = self.parser.parse_args(rsdfit_args)
args = self.comm.bcast(args, root=0)
else:
args = self.parser.parse_args(rsdfit_args)
# load the driver for everyone but root
if self.comm.rank != 0:
args = vars(args)
mode = args.pop('subparser_name')
self.driver = rsdfit.RSDFitDriver(self.pool_comm, mode, **args)
|
[
"def __init__(self, ranker):\n super().__init__(Tautology() if ranker is None else ranker)",
"def __init__(self, ranker=None):\n super().__init__(Tautology() if ranker is None else ranker)",
"def rank_dependent_metrics(self):\n rank = self.last_extr_aut.nbS\n self.ranks.append(rank)\n print(\"Metrics for rank {0} :\".format(rank))\n self.y_test_extr = [self.last_extr_aut.val(w) for w in self.x_test]\n self.y_rnnw_extr = [self.last_extr_aut.val(w) for w in self.x_rnnw]\n self.y_test_extr_prefixes = proba_all_prefixes_aut(self.last_extr_aut, self.x_test)\n self.y_rnnw_extr_prefixes = proba_all_prefixes_aut(self.last_extr_aut, self.x_rnnw)\n self.kld_test_rnn_extr = scores.kullback_leibler(self.y_test_rnn, self.fix_probas(self.y_test_extr))\n self.ndcg1_test_rnn_extr = scores.ndcg(self.x_test, self.rnn_model, self.last_extr_aut, ndcg_l=1,\n dic_ref=self.y_test_rnn_prefixes, dic_approx=self.y_test_extr_prefixes)\n self.ndcg1_rnnw_rnn_extr = scores.ndcg(self.x_rnnw, self.rnn_model, self.last_extr_aut, ndcg_l=1,\n dic_ref=self.y_rnnw_rnn_prefixes, dic_approx=self.y_rnnw_extr_prefixes)\n self.ndcg5_test_rnn_extr = scores.ndcg(self.x_test, self.rnn_model, self.last_extr_aut, ndcg_l=5,\n dic_ref=self.y_test_rnn_prefixes, dic_approx=self.y_test_extr_prefixes)\n self.ndcg5_rnnw_rnn_extr = scores.ndcg(self.x_rnnw, self.rnn_model, self.last_extr_aut, ndcg_l=5,\n dic_ref=self.y_rnnw_rnn_prefixes, dic_approx=self.y_rnnw_extr_prefixes)\n t, e = scores.wer_aut(self.last_extr_aut, self.x_test)\n self.wer_test_extr = e / t\n t, e = scores.wer_aut(self.last_extr_aut, self.x_rnnw)\n self.wer_rnnw_extr = e / t\n self.eps_test_zeros_extr = len([x for x in self.y_test_extr if x <= 0.0]) / len(self.y_test_extr)\n self.eps_rnnw_zeros_extr = len([x for x in self.y_rnnw_extr if x <= 0.0]) / len(self.y_rnnw_extr)\n self.perprnn_test_extr = scores.pautomac_perplexity(self.y_test_rnn, self.fix_probas(self.y_test_extr))\n self.perprnn_rnnw_extr = scores.pautomac_perplexity(self.y_rnnw_rnn, self.fix_probas(self.y_rnnw_extr))\n\n if self.metrics_calc_level > 1:\n self.y_rand_extr = [self.last_extr_aut.val(w) for w in self.x_rand]\n self.perp_test_extr = scores.pautomac_perplexity(self.y_test_target, self.fix_probas(self.y_test_extr))\n self.kld_test_target_extr = scores.kullback_leibler(self.y_test_target, self.fix_probas(self.y_test_extr))\n self.ndcg1_test_target_extr = scores.ndcg(self.x_test, self.true_automaton, self.last_extr_aut, ndcg_l=1,\n dic_ref=self.y_test_target_prefixes,\n dic_approx=self.y_test_extr_prefixes)\n self.ndcg5_test_target_extr = scores.ndcg(self.x_test, self.true_automaton, self.last_extr_aut, ndcg_l=5,\n dic_ref=self.y_test_target_prefixes,\n dic_approx=self.y_test_extr_prefixes)\n self.perp_rand_extr = scores.pautomac_perplexity(self.y_rand_target, self.fix_probas(self.y_rand_extr))\n self.kld_rand_rnn_extr = scores.kullback_leibler(self.fix_probas(self.y_rand_rnn),\n self.fix_probas(self.y_rand_extr))\n self.kld_rand_extr_rnn = scores.kullback_leibler(self.y_rand_extr, self.fix_probas(self.y_rand_rnn))\n self.kld_rand_target_extr = scores.kullback_leibler(self.y_rand_target, self.fix_probas(self.y_rand_extr))\n self.eps_kl_rand_target_extr = neg_zero(self.y_rand_extr, self.y_rand_target)\n self.eps_rand_zeros_extr = len([x for x in self.y_rand_extr if x <= 0.0]) / len(self.y_rand_extr)\n # self.l2dis_target_extr = scores.l2dist(self.true_automaton, extr_aut, l2dist_method=\"gramian\")\n\n # pr(self.quiet, \"\\tEvaluating words and prefixes...\")\n # pr(self.quiet, \"\\tRank-dependent metrics...\")\n\n self.metrics[(rank, \"perp-test-extr\")] = self.perp_test_extr\n self.metrics[(rank, \"perp-test-extr-eps\")] = self.eps_test_zeros_extr\n self.metrics[(rank, \"perp-rand-extr\")] = self.perp_rand_extr\n self.metrics[(rank, \"perp-rand-extr-eps\")] = self.eps_rand_zeros_extr\n self.metrics[(rank, \"kld-test-rnn-extr\")] = self.kld_test_rnn_extr\n self.metrics[(rank, \"kld-test-rnn-extr-eps\")] = self.eps_test_zeros_extr\n self.metrics[(rank, \"kld-test-target-extr\")] = self.kld_test_target_extr\n self.metrics[(rank, \"kld-test-target-extr-eps\")] = self.eps_test_zeros_extr\n self.metrics[(rank, \"kld-rand-rnn-extr\")] = self.kld_rand_rnn_extr\n self.metrics[(rank, \"kld-rand-rnn-extr-eps\")] = self.eps_rand_zeros_extr\n self.metrics[(rank, \"kld-rand-extr-rnn\")] = self.kld_rand_extr_rnn\n self.metrics[(rank, \"kld-rand-target-extr\")] = self.kld_rand_target_extr\n self.metrics[(rank, \"kld-rand-target-extr-eps\")] = self.eps_rand_zeros_extr\n self.metrics[(rank, \"(1-wer)-test-extr\")] = (1 - self.wer_test_extr if self.wer_test_extr is not None else None)\n self.metrics[(rank, \"(1-wer)-rnnw-extr\")] = (1 - self.wer_rnnw_extr if self.wer_rnnw_extr is not None else None)\n self.metrics[(rank, \"ndcg1-test-rnn-extr\")] = self.ndcg1_test_rnn_extr\n self.metrics[(rank, \"ndcg1-test-target-extr\")] = self.ndcg1_test_target_extr\n self.metrics[(rank, \"ndcg1-rnnw-rnn-extr\")] = self.ndcg1_rnnw_rnn_extr\n self.metrics[(rank, \"ndcg5-test-rnn-extr\")] = self.ndcg5_test_rnn_extr\n self.metrics[(rank, \"ndcg5-test-target-extr\")] = self.ndcg5_test_target_extr\n self.metrics[(rank, \"ndcg5-rnnw-rnn-extr\")] = self.ndcg5_rnnw_rnn_extr\n # self.metrics[(rank, \"l2dis-target-extr\")] = self.l2dis_target_extr\n self.metrics[(rank, \"perprnn-test-rnn\")] = self.perprnn_test_rnn\n self.metrics[(rank, \"perprnn-test-extr-eps\")] = self.eps_test_zeros_extr\n self.metrics[(rank, \"perprnn-test-extr\")] = self.perprnn_test_extr\n self.metrics[(rank, \"perprnn-rnnw-rnn\")] = self.perprnn_rnnw_rnn\n self.metrics[(rank, \"perprnn-rnnw-extr-eps\")] = self.eps_rnnw_zeros_extr\n self.metrics[(rank, \"perprnn-rnnw-extr\")] = self.perprnn_rnnw_extr",
"def __init__(self, rank_name, rank_strenght):\n\n self.rank_name = rank_name\n self.rank_strenght = rank_strenght",
"def init_sdr():\n\n LOGGER.info(\"Performing SDR initialisation on application startup\")\n sdr.start(samples_callback=spectrum_density_estimator(broadcast))",
"def __init__(self, dataset, minibatch, num_workers, size, rank):\n if dataset not in datasets_list:\n print(\"Existing datasets are: \", datasets_list)\n raise\n self.dataset = datasets_list.index(dataset)\n self.batch = minibatch * num_workers\n self.num_workers = num_workers\n self.num_ps = size - num_workers\n self.rank = rank",
"def init(self):\n # Support both distributed and non-distributed training\n local_rank = os.environ.get(\"LOCAL_RANK\")\n if local_rank is not None:\n dist.init_process_group(\n \"nccl\", timeout=timedelta(seconds=self.nccl_timeout)\n )\n assert (\n th.cuda.is_available()\n ), \"CUDA must be available for distributed training\"\n th.cuda.set_device(self.local_rank)",
"def init_driver(cls):\n pass",
"def __init__(self, n_estimators: int = 100,\n max_depth: Optional[int] = 1,\n learning_rate: float = 0.1,\n init_est_type: str = \"mean\",\n use_deterministic_trees: bool = False):\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.learning_rate = learning_rate\n self.init_est_type = init_est_type\n self.use_deterministic_trees = use_deterministic_trees",
"def initialize_support(\n self, criterion, optimizer, scheduler\n ):\n\n self.criterion = criterion\n self.optimizer = optimizer\n self.scheduler = scheduler",
"def __init__(self, base_learners_dict=None, stacked_learner=\"logistic\", param_dict={'C': 10},\n nfolds=5, results_dir=None):\n self.base_learners_dict = base_learners_dict\n self.s_type = stacked_learner\n self.nfolds = nfolds\n\n if stacked_learner == \"logistic\":\n self.stacked_learner = LogisticRegression()\n elif stacked_learner == \"linear_svm\":\n self.stacked_learner = SVC(kernel=\"linear\", probability=True)\n elif stacked_learner == \"rbf_svm\":\n self.stacked_learner = SVC(kernel=\"rbf\", probability=True)\n else:\n raise RuntimeError(\"Incorrect choice of stacked learner.\")\n\n if results_dir is None:\n results_dir = \".\"\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n self.param_dict = param_dict\n self.tr_preds = []\n self.tr_probs = []\n self.tr_aucs = []\n self.models = []\n self.model_name = []\n self.labels = []\n self.results_dir = []\n self.classes_ = []\n self.splitter = StratifiedKFold(n_splits=self.nfolds)",
"def __init__(self, statistics=None, fitter_name=None, *, rpo_scale=1.0,\n rlo_scale=1.0, rpo_size=1.0, rlo_size=1.0,\n mean_y_const=False, var_y_const=False, mean_y_func=None,\n var_y_func=None, mean_y_extents=None, var_y_extents=None,\n mean_r_const=True, mean_r_func=None, mean_r_extents=None,\n **kwargs):\n self._init_switcher = dict(mean_x=self.initialize_mean_x,\n var_x=self.initialize_var_x,\n mean_y=self.initialize_mean_y,\n var_y=self.initialize_var_y,\n mean_r=self.initialize_mean_r)\n self._fitter_names = dict.fromkeys(self.__class__._fitter_types)\n self._get_name(fitter_name)\n self.logger = init_logger(self.name)\n self._fitters = dict.fromkeys(self._fitter_types, None)\n self.logger.debug(\"Set bin sizes and separation scaling\")\n self.rpo_size = rpo_size\n self.rlo_size = rlo_size\n self.rpo_scale = rpo_scale\n self.rlo_scale = rlo_scale\n self._mean_y_const = mean_y_const\n self._var_y_const = var_y_const\n self._mean_r_const = mean_r_const\n self._mean_y_extents = mean_y_extents\n self._var_y_extents = var_y_extents\n self._mean_r_extents = mean_r_extents\n self.logger.debug(\"Add statistics\")\n self.add_stats(statistics)\n self.logger.debug(\"__init__ complete\")",
"def __init__(self,sc,dataset_path, mlInstance = None):\r\n\r\n self.sc = sc\r\n self.spark = SparkSession.builder.appName(self.sc).getOrCreate()\r\n self.data = self.spark.read.csv(dataset_path,inferSchema = True, header = True)\r\n self.training, self.test = self.data.randomSplit([0.7,0.3])\r\n self.rank = 8\r\n self.maxIter = 10\r\n self.regParam = 0.01\r\n self.__train_ALS_model()\r\n\r\n if mlInstance == None:\r\n self.__train_ALS_model()\r\n else:\r\n self.__import_model(mlInstance)",
"def _init_subset_loader(self):\n # All strategies start with random selection\n self.subset_indices = self._init_subset_indices()\n self.subset_weights = torch.ones(self.budget)\n self._refresh_subset_loader()",
"def __init__(self, envs, discrete_sampler=DiscreteUniformSampler()):\n np.random.seed(None)\n self.envs = envs\n self.sampler = discrete_sampler",
"def __init__(self, design):\n super(ols_model, self).__init__()\n self.initialize(design)",
"def init_fitness_and_inds(self):\n pass",
"def __init__(\n self,\n ax=None,\n fig=None,\n algorithm=None,\n features=None,\n show_feature_names=True,\n **kwargs\n ):\n super(RankDBase, self).__init__(ax=ax, fig=fig, features=features, **kwargs)\n\n # Data Parameters\n self.ranking_ = algorithm\n\n # Display parameters\n self.show_feature_names_ = show_feature_names",
"def __init__(self):\n self.knn = KNeighborsClassifier()\n self.lda = LinearDiscriminantAnalysis()\n self.randomforest = RandomForestClassifier()\n self.feature_importances = []"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify signing out a user with an invalid token throws an error.
|
def test_get_signout_invalid_user(self):
with self.client:
response = self.client.get(
'/auth/signout',
headers={'Authorization': 'Bearer invalid'}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['message'], 'Invalid token. Signin again.')
self.assertEqual(response.content_type, 'application/json')
self.assert401(response)
|
[
"def test_user_with_invalid_token(self):\n result = self.app.post(url_prefix+'/auth/logout',\n headers={'Content-Type': 'application/json',\n 'Authorization': \"abcdefghijklm\"})\n self.assertEqual(result.status_code, 401)\n self.assertIn('Invalid token. Please register or login',\n str(result.data))\n no_token = self.app.post(url_prefix+'/auth/logout',\n headers={'Content-Type': 'application/json'})\n self.assertEqual(no_token.status_code, 401)\n self.assertIn('Please register or login', str(no_token.data))",
"def test_get_signout_user_with_expired_token(self):\n\n user = add_user(USERNAME, EMAIL, PASSWORD)\n with self.client:\n token = get_jwt(self.client, user.email)\n time.sleep(4)\n response = self.client.get(\n '/auth/signout',\n headers={'Authorization': 'Bearer ' + token}\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data['status'], 'error')\n self.assertEqual(data['message'], 'Signature expired. Signin again.')\n self.assert401(response)",
"def logout():\n if g._current_user is None:\n raise UserNotLoggedInException()\n\n if not invalidate(g._current_session_token):\n raise UserNotLoggedInException()",
"def test_user_profile_invalid_token():\n clear()\n user = auth_register(\"test@test.com\", \"password\", \"firstName\", \"lastName\")\n # Logging out invalidates your token\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n user_profile(user['token'], user['u_id'])",
"def test_invalid_token_when_user_has_no_token(self):\n self.assertTrue(invalid_token(\"wrong_user_id\", \"fake_token\"))",
"def _terminate_bad_token(error, unverified_claims, oidc_id_token):\n log.info(\n 'OpenID Connect ID Token login failed. Bad (malicious?) JWT. Send '\n 'generic 401 response. Error: %s. Token: %s, Unverified claims: %s',\n error,\n oidc_id_token,\n unverified_claims\n )\n\n raise falcon.HTTPUnauthorized(\n description='OpenID Connect ID Token login failed: bad token',\n )",
"def test_delete_invalid_token(self):\n rv = self.delete('/group/{group_id}/'.format(group_id=self.group.id),\n token='invalid')\n self.assertJsonError(rv, 404, 'User not found (via token)')\n return",
"def signOut():\n authenticator.authenticate()\n token = flask.request.headers.get('auth_token')\n models.AuthTokenBlacklist(token=token).save()",
"def logout_user(token):\n is_success = False\n for user in db.DATABASE['users']:\n if user[\"token\"] == token:\n user['token'] = \"\"\n is_success = True\n return is_success",
"def test_invalid_token(self):\n register = self.client.post(\n self.SIGN_UP_URL,\n self.user_data,\n format=\"json\",\n ) \n login = self.client.post(\n self.SIGN_IN_URL,\n self.user_data,\n format=\"json\")\n\n token = json.loads(login.content)['user']['token']\n\n #tamper with the token authorizarion header\n self.client.credentials(HTTP_AUTHORIZATION=\"Bearer \" + 'token')\n\n #try acessing a secured endpoint\n get_user = self.client.get(\n self.USER_URL\n )\n\n self.assertTrue('cannot decode token', json.loads(get_user.content)['user']['detail'])",
"def test_google_login_invalid_token_user_not_new(\n self, verify_google_oauth2_token):\n verify_google_oauth2_token.return_value = {\n 'email': 'jon@mail.com',\n 'name': 'Jon'\n }\n self.client.post(self.url_google,\n data=json.dumps(\n invalid_google_token),\n content_type='application/json')\n response = self.client.post(self.url_google,\n data=json.dumps(\n invalid_google_token),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_dont_create_token_with_invalid_credentials(self):\n create_user(email='email@test.com', password='wert1234', name='Test')\n\n res = self.client.post(TOKEN_URL, self.payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn('token', res.data)\n # res.data => {'non_field_errors': [ErrorDetail(string='Unable to authenticate with provided credentials', code='authentication')]}",
"def test_delete_user_invalid_email(self):\n print('(' + self.test_delete_user_invalid_email.__name__ + ')', self.test_delete_user_invalid_email.__doc__)\n nickname = 'Mystery'\n resp = self.client.delete(resources.api.url_for(resources.User, nickname=nickname)\n + '?author_email=animal%40mymail.com')\n self._assertErrorMessage(resp, 401, 'Wrong authentication')",
"def test_fail_token_revoked(self, token_context):\n _, _, token = token_context\n token.revoke()\n\n @gate()\n def fn():\n return \"ok\"\n\n with pytest.raises(api_res.TokenExpired):\n fn()",
"def token_should_fail_to_verify(self, r):\n expect(r).to_be_an_error()\n expect(str(r)).to_equal('nbf claim not present')",
"def _assert_access_token_destroyed(self, user):\n assert not dot_access_token.objects.filter(user=user).exists()\n assert not dot_refresh_token.objects.filter(user=user).exists()",
"def test_blacklisted_token(self):\n self.request_logic('/api/auth/logout',data=None, code=200,\n msg='User Successfully logged out')\n\n res = self.requester_method(url='/api/auth/logout', method='post',\n data=None)\n result = json.loads(res.data.decode())\n self.assertEqual(result['msg'], 'Token has been revoked')",
"def test_delete_an_interest_by_unauthenticated_user_fails(self):\n response = self.client.delete(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_body[\"SubCode\"], \"InvalidToken\")",
"def test_create_token_user_does_not_exist(self):\n\n payload = {'email': \"test@test.com\",\n 'password': \"wrongpassword\"}\n\n # send auth request\n res = self.client.post(self.TOKEN_URL,\n payload)\n # verify response code\n self.assertEqual(res.status_code,\n status.HTTP_400_BAD_REQUEST)\n # verify that token not in the response\n self.assertNotIn('token', res.data)\n # verify that no password in response\n self.assertNotIn('password', res.data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify signing out a user with an expired token throws an error.
|
def test_get_signout_user_with_expired_token(self):
user = add_user(USERNAME, EMAIL, PASSWORD)
with self.client:
token = get_jwt(self.client, user.email)
time.sleep(4)
response = self.client.get(
'/auth/signout',
headers={'Authorization': 'Bearer ' + token}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['message'], 'Signature expired. Signin again.')
self.assert401(response)
|
[
"def test_get_signout_invalid_user(self):\n\n with self.client:\n response = self.client.get(\n '/auth/signout',\n headers={'Authorization': 'Bearer invalid'}\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data['status'], 'error')\n self.assertEqual(data['message'], 'Invalid token. Signin again.')\n self.assertEqual(response.content_type, 'application/json')\n self.assert401(response)",
"def logout():\n if g._current_user is None:\n raise UserNotLoggedInException()\n\n if not invalidate(g._current_session_token):\n raise UserNotLoggedInException()",
"def test_user_with_invalid_token(self):\n result = self.app.post(url_prefix+'/auth/logout',\n headers={'Content-Type': 'application/json',\n 'Authorization': \"abcdefghijklm\"})\n self.assertEqual(result.status_code, 401)\n self.assertIn('Invalid token. Please register or login',\n str(result.data))\n no_token = self.app.post(url_prefix+'/auth/logout',\n headers={'Content-Type': 'application/json'})\n self.assertEqual(no_token.status_code, 401)\n self.assertIn('Please register or login', str(no_token.data))",
"def test_expired_token_failing_jwt_auth(self):\n payload = utils.jwt_payload_handler(self.user)\n payload[\"exp\"] = 1\n token = utils.jwt_encode_handler(payload)\n\n auth = \"Bearer {0}\".format(token)\n response = self.client.get(\n self.protected_url, content_type=\"application/json\", HTTP_AUTHORIZATION=auth\n )\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response[\"WWW-Authenticate\"], 'JWT realm=\"api\"')\n expected_error = [\"Signature has expired.\"]\n self.assertEqual(response.json()[\"errors\"], expected_error)",
"def test_authenticate_expired_token(self):\n data = {\n 'username': self.user.username,\n 'password': 'Test123!'\n }\n\n response = self.client.post(reverse('token_api'), data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n token = TemporaryToken.objects.get(\n user__username=self.user.username,\n )\n token.expire()\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n\n # This could be any url and any method. It is only used to test the\n # token authentication.\n response = self.client.delete(\n reverse(\n 'authentication-detail',\n kwargs={'pk': 'invalid_token'},\n ),\n )\n\n content = {'detail': 'Token has expired'}\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_get_signout_inactive_user(self):\n\n user = add_user(USERNAME, EMAIL, PASSWORD)\n user.active = False\n db.session.commit()\n with self.client:\n token = get_jwt(self.client, user.email)\n response = self.client.get(\n '/auth/signout',\n headers={'Authorization': 'Bearer ' + token}\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data['status'], 'error')\n self.assertEqual(data['message'], 'Something went wrong. Please contact us.')\n self.assert401(response)",
"def test_delete_expired_users(self):\n expired_user = SignupManager.create_user(**self.user_info)\n expired_user.date_joined -= datetime.timedelta(days=auth_settings.BAPH_ACTIVATION_DAYS + 1)\n expired_user.save()\n\n deleted_users = SignupManager.delete_expired_users()\n\n self.failUnlessEqual(deleted_users[0].username, 'alice')",
"def _assert_access_token_destroyed(self, user):\n assert not dot_access_token.objects.filter(user=user).exists()\n assert not dot_refresh_token.objects.filter(user=user).exists()",
"def test_fail_token_revoked(self, token_context):\n _, _, token = token_context\n token.revoke()\n\n @gate()\n def fn():\n return \"ok\"\n\n with pytest.raises(api_res.TokenExpired):\n fn()",
"def my_expired_token_callback():\n\n\tlog.debug(\"-@- expired token checker\")\n\n\t### if user is not confirmed, delete user from DB\n\t### otherwise return a link to refresh refresh_token\n\n\treturn jsonify({\n\t\t\t'msg'\t\t: 'The token has expired',\n\t\t\t'status'\t: 401,\n\t\t\t'sub_status': 42,\n\t}), 401",
"def test_token_expired(self):\n self.token.created = self.token.created - datetime.timedelta(days=40)\n self.token.save()\n response = self.csrf_client.post(\n '/token/', {'example': 'example'},\n HTTP_AUTHORIZATION='Token %s' % self.token.key, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def signOut():\n authenticator.authenticate()\n token = flask.request.headers.get('auth_token')\n models.AuthTokenBlacklist(token=token).save()",
"def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)",
"def test_user_profile_invalid_token():\n clear()\n user = auth_register(\"test@test.com\", \"password\", \"firstName\", \"lastName\")\n # Logging out invalidates your token\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n user_profile(user['token'], user['u_id'])",
"def logout_user(token):\n is_success = False\n for user in db.DATABASE['users']:\n if user[\"token\"] == token:\n user['token'] = \"\"\n is_success = True\n return is_success",
"def is_session_access_token_expired(request, user=None):\n user = user if user is not None else request.user\n now = time.time()\n return not user.is_authenticated \\\n or 'ACCESS_TOKEN' not in request.session \\\n or 'ACCESS_TOKEN_EXPIRES_AT' not in request.session \\\n or request.session['ACCESS_TOKEN_EXPIRES_AT'] < now",
"def logout_user(request):\n request.user.auth_token.delete()\n return response.Response()",
"def is_token_expired(self):\n now = datetime.now()\n dt = now - self.token_time\n return dt.total_seconds() > (60 * 30)",
"def test_service_logout_user_v1(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify signing out an inactive user throws an error.
|
def test_get_signout_inactive_user(self):
user = add_user(USERNAME, EMAIL, PASSWORD)
user.active = False
db.session.commit()
with self.client:
token = get_jwt(self.client, user.email)
response = self.client.get(
'/auth/signout',
headers={'Authorization': 'Bearer ' + token}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['message'], 'Something went wrong. Please contact us.')
self.assert401(response)
|
[
"def logout():\n if g._current_user is None:\n raise UserNotLoggedInException()\n\n if not invalidate(g._current_session_token):\n raise UserNotLoggedInException()",
"def test_get_signout_invalid_user(self):\n\n with self.client:\n response = self.client.get(\n '/auth/signout',\n headers={'Authorization': 'Bearer invalid'}\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data['status'], 'error')\n self.assertEqual(data['message'], 'Invalid token. Signin again.')\n self.assertEqual(response.content_type, 'application/json')\n self.assert401(response)",
"def test_get_signout_user_with_expired_token(self):\n\n user = add_user(USERNAME, EMAIL, PASSWORD)\n with self.client:\n token = get_jwt(self.client, user.email)\n time.sleep(4)\n response = self.client.get(\n '/auth/signout',\n headers={'Authorization': 'Bearer ' + token}\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data['status'], 'error')\n self.assertEqual(data['message'], 'Signature expired. Signin again.')\n self.assert401(response)",
"def test_log_out_user(self):\n pass",
"def test_service_logout_user_v1(self):\n pass",
"def signOut():\n authenticator.authenticate()\n token = flask.request.headers.get('auth_token')\n models.AuthTokenBlacklist(token=token).save()",
"def signout():\n read_bash_return('op signout')",
"def signout():\n read_bash_return(\"op signout\")",
"def logout_user(self):",
"def test_user_invalidated_offline(self):\n orig_user = Session.Storage.users[self.regular_user_id]\n\n self.login(self.regular_user_id)\n\n self.assertStatus(self.client.get('/auth'), 200)\n\n del Session.Storage.users[self.regular_user_id]\n\n self.assertStatus(self.client.get('/auth'), 401)\n\n Session.Storage.users[self.regular_user_id] = orig_user",
"def test_user_with_invalid_token(self):\n result = self.app.post(url_prefix+'/auth/logout',\n headers={'Content-Type': 'application/json',\n 'Authorization': \"abcdefghijklm\"})\n self.assertEqual(result.status_code, 401)\n self.assertIn('Invalid token. Please register or login',\n str(result.data))\n no_token = self.app.post(url_prefix+'/auth/logout',\n headers={'Content-Type': 'application/json'})\n self.assertEqual(no_token.status_code, 401)\n self.assertIn('Please register or login', str(no_token.data))",
"def persona_logout():\n if 'user_id' in session:\n del session['user_id']\n return 'OK'",
"def test_user_account_lockout(self):\n if (CONF.identity.user_lockout_failure_attempts <= 0 or\n CONF.identity.user_lockout_duration <= 0):\n raise self.skipException(\n \"Both CONF.identity.user_lockout_failure_attempts and \"\n \"CONF.identity.user_lockout_duration should be greater than \"\n \"zero to test this feature\")\n\n password = self.creds.password\n\n # First, we login using the correct credentials\n self.non_admin_token.auth(user_id=self.user_id, password=password)\n\n # Lock user account by using the wrong password to login\n bad_password = data_utils.rand_password()\n for _ in range(CONF.identity.user_lockout_failure_attempts):\n self.assertRaises(exceptions.Unauthorized,\n self.non_admin_token.auth,\n user_id=self.user_id,\n password=bad_password)\n\n # The user account must be locked, so now it is not possible to login\n # even using the correct password\n self.assertRaises(exceptions.Unauthorized,\n self.non_admin_token.auth,\n user_id=self.user_id,\n password=password)\n\n # If we wait the required time, the user account will be unlocked\n time.sleep(CONF.identity.user_lockout_duration + 1)\n self.non_admin_token.auth(user_id=self.user_id, password=password)",
"def test_login_unregistered_user(self):\n response = self.user_login_req(data=self.user_login)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['errors']['error'][0],\n \"Incorrect email or password.\")",
"def logout_verify(self, response):\n\t\tpass",
"def test_auth_login_inactive(self):\n user = get_user_model().objects.get(username=self.user_data[0])\n user.is_active = False\n user.save()\n response = self.client.post(\n reverse(\"auth_login_api\"), data=self.serializer_data\n )\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response.json[\"nonFieldErrors\"][0],\n _(\"User account is not activated.\"),\n )",
"def test_logout_employee_fail(self):\n\n # logout global user\n self.client.get(reverse('employees:logout'), follow=True)\n\n logged_in = self.client.login(username='marta', password='passs')\n self.assertFalse(logged_in)\n\n response = self.client.get(reverse('employees:logout'), follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'employees/login.html')",
"def test_users_can_logout(self):\n\n with self.client:\n self.client.post('/login',\n data={'username': 'marcel',\n 'password': '1234'},\n follow_redirects=True)\n response = self.client.get('/logout')\n\n username = current_user.username if not current_user.is_anonymous else 'anonymous'\n self.assertTrue(current_user.is_anonymous, msg=f'user <{username}> is '\n f'still logged in')\n self.assertTrue(status.is_redirect(response.status_code))",
"def test_logout(self):\n self.login()\n\n response = self.logout()\n\n assert_status_with_message(200, response, \"You have been logged out\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify user cannot get profile with invalid token.
|
def test_get_profile_invalid_token(self):
with self.client:
response = self.client.get(
'/auth/profile',
headers={'Authorization': 'Bearer invalid'}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['message'], 'Invalid token. Signin again.')
self.assertEqual(response.content_type, 'application/json')
self.assert401(response)
|
[
"def test_user_profile_invalid_token():\n clear()\n user = auth_register(\"test@test.com\", \"password\", \"firstName\", \"lastName\")\n # Logging out invalidates your token\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n user_profile(user['token'], user['u_id'])",
"def test_user_profile_invalid_user_id():\n clear()\n user = auth_register(\"test@test.com\", \"password\", \"firstName\", \"lastName\")\n with pytest.raises(InputError):\n user_profile(user['token'], -1)",
"def validate_token(auth_token):\n try:\n # create an instance of the facebook graph\n facebook_graph = facebook.GraphAPI(\n access_token=auth_token, version=\"3.0\"\n )\n # Get user data\n user_data = facebook_graph.request('/me?fields=id,name,email')\n return user_data\n except facebook.GraphAPIError:\n msg = \"Invalid or expired token\"\n return msg",
"def testUserWithNoProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n\n access_checker = access.HAS_PROFILE_ACCESS_CHECKER\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)",
"def test_profile_unauthenticated(self):\n response = self.client.get('/profile/')\n eq_(response.status_code, 403)",
"def test_invalid_token_when_user_has_no_token(self):\n self.assertTrue(invalid_token(\"wrong_user_id\", \"fake_token\"))",
"def test_refresh_token_invalid_scope(self):\n self.do_refresh_token_check(scope=['openid', 'profile'])",
"def testUserWithProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedNDBProfile(self.program.key(), user=user)\n\n access_checker = access.HasNoProfileAccessChecker()\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)",
"def test_get_user_info_invalid_token(self):\n response = self.client.get(\n 'user_info',\n headers={'authorization': 'faketoken'},\n content_type='application/json'\n )\n response_data = json.loads(response.data)\n\n self.assertEqual(response_data['data']['message'],\n 'Unauthorized. The authorization token supplied is invalid')\n self.assertEqual(response_data['status'], 'fail')\n self.assert401(response)",
"def test_invalid_token(self):\n register = self.client.post(\n self.SIGN_UP_URL,\n self.user_data,\n format=\"json\",\n ) \n login = self.client.post(\n self.SIGN_IN_URL,\n self.user_data,\n format=\"json\")\n\n token = json.loads(login.content)['user']['token']\n\n #tamper with the token authorizarion header\n self.client.credentials(HTTP_AUTHORIZATION=\"Bearer \" + 'token')\n\n #try acessing a secured endpoint\n get_user = self.client.get(\n self.USER_URL\n )\n\n self.assertTrue('cannot decode token', json.loads(get_user.content)['user']['detail'])",
"def test_get_profiles_forbidden(self):\n oauth2_header = self._get_oauth_header(client_index=0)\n\n res = self.client.get('/v1/profiles/', **oauth2_header)\n self.assertEquals(res.status_code, 403)",
"def test_user_with_invalid_token(self):\n result = self.app.post(url_prefix+'/auth/logout',\n headers={'Content-Type': 'application/json',\n 'Authorization': \"abcdefghijklm\"})\n self.assertEqual(result.status_code, 401)\n self.assertIn('Invalid token. Please register or login',\n str(result.data))\n no_token = self.app.post(url_prefix+'/auth/logout',\n headers={'Content-Type': 'application/json'})\n self.assertEqual(no_token.status_code, 401)\n self.assertIn('Please register or login', str(no_token.data))",
"def verify_profile_availability(self, profile):\n pass",
"def token_should_fail_to_verify(self, r):\n expect(r).to_be_an_error()\n expect(str(r)).to_equal('nbf claim not present')",
"def test_view_shared_simulation_invalid_token(self):\n luke = User(\n **{\n 'email': 'luke@arclytics.io',\n 'first_name': 'Luke',\n 'last_name': 'Skywalker'\n }\n )\n luke.set_password('NeverJoinYou')\n luke.verified = True\n luke.save()\n\n bad_token = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaa'\n url = generate_url('share.view_shared_simulation', bad_token)\n\n with self.client as client:\n test_login(client, luke.email, 'NeverJoinYou')\n resp = client.get(url, content_type='application/json')\n\n data = json.loads(resp.data.decode())\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(data['message'], 'Invalid token.')",
"def testUserWithBannedProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedNDBProfile(\n self.program.key(), user=user,\n status=ndb_profile_model.Status.BANNED)\n\n access_checker = access.HAS_PROFILE_ACCESS_CHECKER\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)",
"def test_google_login_invalid_token_user_not_new(\n self, verify_google_oauth2_token):\n verify_google_oauth2_token.return_value = {\n 'email': 'jon@mail.com',\n 'name': 'Jon'\n }\n self.client.post(self.url_google,\n data=json.dumps(\n invalid_google_token),\n content_type='application/json')\n response = self.client.post(self.url_google,\n data=json.dumps(\n invalid_google_token),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_token_user_does_not_exist(self):\n\n payload = {'email': \"test@test.com\",\n 'password': \"wrongpassword\"}\n\n # send auth request\n res = self.client.post(self.TOKEN_URL,\n payload)\n # verify response code\n self.assertEqual(res.status_code,\n status.HTTP_400_BAD_REQUEST)\n # verify that token not in the response\n self.assertNotIn('token', res.data)\n # verify that no password in response\n self.assertNotIn('password', res.data)",
"def testUserWithStudentProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedSOCStudent(self.program, user=user)\n\n access_checker = access.NON_STUDENT_PROFILE_ACCESS_CHECKER\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify getting the profile of an inactive user throws an error.
|
def test_get_profile_inactive_user(self):
user = add_user(USERNAME, EMAIL, PASSWORD)
user.active = False
db.session.commit()
with self.client:
token = get_jwt(self.client, user.email)
response = self.client.get(
'/auth/profile',
headers={'Authorization': 'Bearer ' + token}
)
data = json.loads(response.data.decode())
self.assertEqual(data['status'], 'error')
self.assertEqual(data['message'], 'Something went wrong. Please contact us.')
self.assert401(response)
|
[
"def verify_profile_availability(self, profile):\n pass",
"def testUserWithProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedNDBProfile(self.program.key(), user=user)\n\n access_checker = access.HasNoProfileAccessChecker()\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)",
"def testUserWithNoProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n\n access_checker = access.HAS_PROFILE_ACCESS_CHECKER\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)",
"def test_user_profile_invalid_user_id():\n clear()\n user = auth_register(\"test@test.com\", \"password\", \"firstName\", \"lastName\")\n with pytest.raises(InputError):\n user_profile(user['token'], -1)",
"def testUserWithNoProfileAccessGranted(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n\n access_checker = access.HasNoProfileAccessChecker()\n access_checker.checkAccess(self.data, None)",
"def test_user_has_profile_is_not_hirable(self):\n this_user = self.users[0]\n this_user.profile.hireable = False\n this_user.save()\n self.assertTrue(self.users[0].profile.hireable is False)",
"def testUserWithBannedProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedNDBProfile(\n self.program.key(), user=user,\n status=ndb_profile_model.Status.BANNED)\n\n access_checker = access.HAS_PROFILE_ACCESS_CHECKER\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)",
"def test_user_profile_invalid_token():\n clear()\n user = auth_register(\"test@test.com\", \"password\", \"firstName\", \"lastName\")\n # Logging out invalidates your token\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n user_profile(user['token'], user['u_id'])",
"def test_get_profile_invalid_token(self):\n\n with self.client:\n response = self.client.get(\n '/auth/profile',\n headers={'Authorization': 'Bearer invalid'}\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data['status'], 'error')\n self.assertEqual(data['message'], 'Invalid token. Signin again.')\n self.assertEqual(response.content_type, 'application/json')\n self.assert401(response)",
"def test_profile_unauthenticated(self):\n response = self.client.get('/profile/')\n eq_(response.status_code, 403)",
"def validate_profile_exists(self):\n\n if self.args.profile_name not in self.profiles:\n self.handle_error('Could not find profile \"{}\"'.format(self.args.profile_name))",
"def test_get_user_inexistent_user(self):\n user = self.cm.get_user(\"NonExistent\")\n self.assertEqual(user, None)",
"def testUserWithActiveProfileAccessGranted(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedNDBProfile(self.program.key(), user=user)\n\n access_checker = access.HAS_PROFILE_ACCESS_CHECKER\n access_checker.checkAccess(self.data, None)",
"def check_profile_exists(cls, user_id):\n profile = cls.c.execute(\n select([cls.table]).where(cls.table.c.user_id == user_id)\n ).fetchone()\n\n return profile is not None",
"def test_get_profiles_forbidden(self):\n oauth2_header = self._get_oauth_header(client_index=0)\n\n res = self.client.get('/v1/profiles/', **oauth2_header)\n self.assertEquals(res.status_code, 403)",
"def check_profile(profile=None):\n from conf.profiles import getAllProfilesObjects\n from core.exceptions import ProfileDoesNotExist\n\n if not profile:\n return False\n\n profile_available = []\n [profile_available.append(p.name) for p in getAllProfilesObjects()]\n try:\n for p in profile:\n if p not in profile_available: # Check profile exist\n raise ProfileDoesNotExist(\"Profile %s doesnt exist !\" % profile)\n else:\n return True\n except ProfileDoesNotExist as pne:\n print pne\n exit(pne.code)",
"def testUserWithStudentProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedSOCStudent(self.program, user=user)\n\n access_checker = access.NON_STUDENT_PROFILE_ACCESS_CHECKER\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)",
"def testUserProfileForAnotherProgramAccessDenied(self):\n other_program = program_utils.seedProgram(sponsor_key=self.sponsor.key())\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedNDBProfile(other_program.key(), user=user)\n\n access_checker = access.HAS_PROFILE_ACCESS_CHECKER\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)",
"def test_get_user_bad_id(self):\n res = self.backend.get_user(-1)\n\n self.assertIsNone(res)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This command reads the digital port tristate register. The tristate register determines if the latch register value is driven onto the port pin. A '1' in the tristate register makes the corresponding pin an input, a '0' makes it an output_data.
|
def DTristateR(self):
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = 0
value, = self.udev.controlRead(request_type, self.DTRISTATE, wValue, wIndex, 1, self.HS_DELAY)
return value
|
[
"def DTristateW(self, value):\n request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)\n request = self.DTRISTATE\n wValue = value & 0xff\n wIndex = 0\n self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], self.HS_DELAY)",
"def digitalRead(self,pinNum):\r\n self.mcuserial.write('r' + chr(pinNum) + chr(0))\r\n bt = self.mcuserial.read(1)\r\n if bt == '0' or bt == '1':\r\n return int(bt)\r\n else:\r\n return 0",
"def switch_1_state():\n switch1_value = switch_1.read()\n return switch1_value",
"def switch_4_state():\n switch4_value = switch_4.read()\n return switch4_value",
"def switch_3_state():\n switch3_value = switch_3.read()\n return switch3_value",
"def _get_read_register(self, port_number):\n part = self._get_part(port_number)\n if part == self._PART_A:\n return 0x12\n elif part == self._PART_B:\n return 0x13",
"def pinState(gpio_pin):\r\n # With sysfs driver this is identical to digitalRead()\r\n return digitalRead(gpio_pin)",
"def switch_2_state():\n switch2_value = switch_2.read()\n return switch2_value",
"def read_state(self, port: int) -> np.uint8:\n return np.uint8(0)",
"def _strobeReadBit(self):\n\t\tself.fpga.write_int(self.controller_name, 0x20 , offset = commandReg,blindwrite=True)",
"def _read_register(self, regsiter_addr):\n self.cs.low() \n self.spi.write(bytes([_READ, regsiter_addr]))\n regVal = self.spi.read(1)\n self.cs.high()\n\n return int.from_bytes(regVal, 'big')",
"def __read_register8(self, register_addr):\n\t\t\n\t\t# Create a read command\n\t\tcommand = 0b01000000 | ((register_addr & 0b111) << 3)\n\t\tself.spi.write(bytearray([command]))\n\t\t\n\t\t# Read the result\n\t\tresult = bytearray(1)\n\t\tself.spi.readinto(result)\n\t\t\n\t\t# Return the result\n\t\treturn result[0]",
"def _sync_read_pin(self, pin):\n RPIO.setup(pin, RPIO.IN,\n pull_up_down=(RPIO.PUD_UP if self.invert_logic else RPIO.PUD_DOWN))\n val = bool(RPIO.input(pin))\n if self.invert_logic:\n val = not val\n return val",
"def read_led(self, pin):\n value = 0 # Default to nowt\n if self.iface.connected:\n try:\n value = self.iface.get_PWM_dutycycle(pin)\n except (AttributeError, IOError, pigpio.error):\n logger.error(\" Cannot read PWM of pin #%s\" % (pin,))\n else:\n logger.error(\" Interface not connected. Cannot read PWM of pin #%s.\" % (pin,))\n return value",
"def PortRead():\r\n global gTelnetConn\r\n if gTelnetConn == None:\r\n OpenTelnet()\r\n \r\n data = gTelnetConn.read()\r\n return data;",
"def get_temperature_control_status(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 09\"))\n temp = self.board_socket.recv(1024)\n return(temp[3])",
"def state(port, on, off):\n\n if on and off:\n print(\"Error : Please specify '--on' or '--off', not both.\")\n return\n\n if on or off:\n if port is None:\n print(\"Error : Please specify at least one port with '--port' flag\")\n return\n else:\n port = [int(p) for p in port.split(\",\")]\n\n if on:\n hub.data_enable(ports=port)\n elif off:\n hub.data_disable(ports=port)\n else:\n _print_row(PORTS)\n _print_row(hub.data_state())\n _print_row(hub.speeds())",
"def read_gpio(port):\n self._state = orangepi_gpio.read_input(self._port)\n self.schedule_update_ha_state()",
"def read(pin):\n hum, temp = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, pin)\n if hum is not None and temp is not None and hum <= 100:\n print(\"{0:0.1f},{1:0.1f}\".format(temp, hum))\n else:\n print(\"failure\")\n sys.exit(1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This command writes the digital port tristate register. The tristate register determines if the latch register value is driven onto the port pin. A '1' in the tristate register makes the corresponding pin an input, a '0' makes it an output_data.
|
def DTristateW(self, value):
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.DTRISTATE
wValue = value & 0xff
wIndex = 0
self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], self.HS_DELAY)
|
[
"def DTristateR(self):\n request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)\n wValue = 0\n wIndex = 0\n value, = self.udev.controlRead(request_type, self.DTRISTATE, wValue, wIndex, 1, self.HS_DELAY)\n return value",
"def set_tmds_output(self, value):\n assert type(value) is bool\n read = bytearray(self._device.readRaw(PI3HDMI336_TOTAL_BYTES))\n read[PI3HDMI336_OFFSET_BYTE0] = \\\n (read[PI3HDMI336_OFFSET_BYTE0] & (~PI3HDMI336_BYTE0_TMDS_OUTPUT_ENABLE)) | (value << 5)\n self._device.writeRaw(read)",
"def setDTR(self, value:bool)->None:\n self.serial.setDTR(value)",
"def PortWrite( data ):\r\n global gTelnetConn\r\n if gTelnetConn == None:\r\n OpenTelnet()\r\n \r\n gTelnetConn.write( data )\r\n \r\n return;",
"def set_dtr(self, value, *args, **kwargs):\n with self.change_connection():\n self.connection.dtr = bool(value)",
"def led_set(state):\n l = Pin(LED, Pin.OUT)\n l.value(state)",
"def send_traffic(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n # print(pack)\n serialport.write(pack)",
"def set_port_resistor(self, port, value):\n assert type(value) is bool\n assert 0 <= port <= 2\n port = 7 - port\n read = bytearray(self._device.readRaw(PI3HDMI336_TOTAL_BYTES))\n read[PI3HDMI336_OFFSET_BYTE1] = \\\n (read[PI3HDMI336_OFFSET_BYTE1] & (~(0x01 << port))) | (value << port)\n self._device.writeRaw(read)",
"def digitalState(self,pinNum, io):\r\n if type(io) == str:\r\n if io.lower() == 'input':\r\n self.mcuserial.write('=' + chr(pinNum) + chr(1))\r\n elif io.lower() == 'output':\r\n self.mcuserial.write('=' + chr(pinNum) + chr(0))\r\n else:\r\n sys.stderr.write('Need to Specify either \"input\" or \"output\"')\r\n return\r\n elif type(io) == int:\r\n if io == 1:\r\n self.mcuserial.write('=' + chr(pinNum) + chr(1))\r\n elif io == 0:\r\n self.mcuserial.write('=' + chr(pinNum) + chr(0))\r\n else:\r\n sys.stderr.write('Need to Specify either 1 for input or 0 for output')\r\n return",
"def state(port, on, off):\n\n if on and off:\n print(\"Error : Please specify '--on' or '--off', not both.\")\n return\n\n if on or off:\n if port is None:\n print(\"Error : Please specify at least one port with '--port' flag\")\n return\n else:\n port = [int(p) for p in port.split(\",\")]\n\n if on:\n hub.data_enable(ports=port)\n elif off:\n hub.data_disable(ports=port)\n else:\n _print_row(PORTS)\n _print_row(hub.data_state())\n _print_row(hub.speeds())",
"def _strobeWriteBit(self):\n\t\tself.fpga.write_int(self.controller_name, 0x10 , offset = commandReg,blindwrite=True)",
"def write(self, state):\n assert state < 256, \"State >= 256\"\n try:\n self.I2C.send(state, self.Address)\n except:\n print(\"Unable to set I/O state\")",
"def LRST_TX_A(self, value):\n if value not in [0, 1]:\n raise ValueError(\"Value must be [0,1]\")\n self._writeReg('CHIPCFG', 'LRST_TX_A', value)",
"def serial_tx(string):\r\n Serial2.println(string)",
"def set_timer(self, boolean):\r\n if str(boolean).upper() in ['ON', 'OFF']:\r\n self.inst.write(f\"OUTP:TIM {boolean}.upper()\")\r\n if int(boolean) in range(2):\r\n self.inst.write(f\"OUTP:TIM {boolean}\")\r\n else:\r\n raise ValueError(\"Value Error. Please enter ON, OFF, 0, or 1.\")",
"def setdtr(self, dtr):\n try:\n self.ser.setDTR(dtr)\n self.log(\"DTR set to \"+`dtr`)\n return True\n except SilentException:\n return False",
"def write_bit(self, value):\n self.ds2482.single_bit(value)",
"def output(self, pin, state):\n led_dict = {PIN_RED_LED_0: \"The first red LED\",\n PIN_RED_LED_1: \"The second red LED\",\n PIN_RED_LED_2: \"The third red LED\",\n PIN_BLUE_LED: \"The blue LED\"}\n\n if not (pin in led_dict.keys()):\n show_error_and_exit(\n \"Output pin is out of range! Please use valid LED pins!\")\n else:\n if not (state in {self.LOW, self.HIGH}):\n show_error_and_exit('invalid LED state!')\n else:\n state_str_dict = {self.HIGH: \"ON\", self.LOW: \"OFF\"}\n if self.pin_states[pin] != state:\n verb = 'becomes'\n else:\n verb = 'is still'\n print(\"%s %s %s.\" %\n (led_dict[pin], verb, state_str_dict[state]))\n self.pin_states[pin] = state",
"def toggle(gpio_pin):\r\n digitalWrite(gpio_pin, digitalRead(gpio_pin) ^ 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This command reads the value of an analog input channel. This command will result in a bus stall if an AInScan is currently running.
|
def AIn(self, channel):
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = channel
wIndex = 0x0
value, = unpack('H', self.udev.controlRead(request_type, self.AIN, wValue, wIndex, 2, timeout=100))
data = round(float(value) * self.table_AIn[channel].slope + self.table_AIn[channel].intercept)
if data >= 65536:
value = 65535
elif data < 0:
value = 0
else:
value = data
return value
|
[
"def analog_read_once (ser_port, channel):\n\n\tprint 'Analog ', channel, '\\r'\n\tser_port.write (str (channel))\n\n\tch_in = '\\0'\n\twhile ch_in != '\\n':\n\t\tch_in = ser_port.read ()\n\t\tprint ch_in",
"def ReadAnalogVal(self, pin):\r\n \r\n if (type(pin) != type(1)):\r\n raise TypeError, \"Read analog value was expecting integer type argument\"\r\n \r\n packet = self.START_FLAG + self.ANA_IP+str(pin)\r\n print \"The packet we are sending out is %s \" %packet\r\n self.comm.write(packet)\r\n val = self.comm.read()\r\n print val\r\n if val[1] == True:\r\n digi_val = self.__make_sense_of_the_data__(val[0])\r\n if (digi_val == \"\"):\r\n raise IOError, \"No value received from the board \"\r\n try:\r\n ana_val = float(self.REF_VLTG/(self.RES-1))* int(digi_val)\r\n except:\r\n raise IOError, \"The board did not respond with an integer number \"\r\n ana_val = str(ana_val)\r\n return ana_val\r\n else :\r\n raise ControllerExceptions.ResponseTimedOutException, \"The board did not respond, we timed out \"",
"def raw_sample(self, channel):\n\n if channel >= len(input_lines):\n raise ValueError, \"Unrecognized channel\"\n\n if digihw.get_channel_type(channel) != Analog:\n raise ValueError, \"Not an analog input channel\"\n\n\t# Calibrate every calInterval seconds\n\tnow = time.clock()\n\tif debug:\n\t print \"time is %f, calTime is %f\" % (now, calTime)\n\tif now >= calTime + calInterval or now < calTime:\n\t self.calibrate()\n\n result = self.XBeeCommandGet(\"is\")\n\n val = float(parseIS(result)[\"AI%d\" % channel])\n val = scale * val\n val1 = int(round(val))\n return val1",
"def read_adc(self, channel):\n s_data = bytearray([1, 160|(channel<<6), 0])\n r_data = self.spi_rack.read_data(self.module, 1, MCP320x_MODE, MCP320x_SPEED, s_data)\n return (r_data[1]&0xF)<<8 | r_data[2]",
"def IN(channel):\n channel.join_reader()\n return ChannelEndRead(channel)",
"def readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if ((adcnum > 7) or (adcnum < 0)):\n return -1\n GPIO.output(cspin, True)\n\n GPIO.output(clockpin, False) # start clock low\n GPIO.output(cspin, False) # bring CS low\n\n commandout = adcnum\n commandout |= 0x18 # start bit + single-ended bit\n commandout <<= 3 # we only need to send 5 bits here\n for i in range(5):\n if (commandout & 0x80):\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n\n adcout = 0\n # read in one empty bit, one null bit and 10 ADC bits\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if (GPIO.input(misopin)):\n adcout |= 0x1\n\n GPIO.output(cspin, True)\n\n adcout >>= 1 # first bit is 'null' so drop it\n return adcout",
"def get_analog(jPin):\n return j_pins[jPin-1].read_analog()",
"def read_value(self, channel):\n value = None\n reply = self.comm(47 + channel)\n if self.ranges[channel]['action'] == 'voltage':\n num_value = reply - 2 ** 15\n scale = 1.0 * 2 ** 15 / float(self.ranges[channel]['fullrange'])\n value = num_value / scale\n if self.ranges[channel]['action'] == 'tc':\n scale = 1.0 * 2 ** 16 / 1400\n value = (reply/scale) - 150\n return value",
"def read_analog(cls, row: int, column: int)->typing.Union[int, None]:\n\t\treturn int(cls.wooting_dll.wooting_read_analog(*(ctypes.c_ubyte(v) for v in [row, column])))",
"def get_adc_value(self, channel):\r\n if channel == 1:\r\n command = 0x10\r\n elif channel == 2:\r\n command = 0x20\r\n elif channel == 3:\r\n command = 0x40\r\n elif channel == 4:\r\n command = 0x80\r\n else:\r\n raise ValueError(\"channel must be 1, 2, 3, or 4\")\r\n\r\n # Tell the ADC to convert a specific channel\r\n self.bus.write_byte(self.ADC_ADDRESS, command)\r\n\r\n # Get the conversion (read always gets most recent conversion)\r\n data = self.bus.read_word_data(self.ADC_ADDRESS, 0x00)\r\n\r\n # Process the data\r\n data = self.endian_swap(data)\r\n data = self.mask_high(data)\r\n # 52 is the value we got for 0.5V, so subtract this so we can work from 0 rather than 52.\r\n data -= 52\r\n\r\n # 1538 is the range in the values the ADC can give us between 0.5V and 2.5V\r\n # If were out of the scaled range we specified, set to the max or min value for our range.\r\n if data < 0:\r\n data = 0\r\n elif data > 1538:\r\n data = 1538\r\n \r\n return data",
"def read(self, channel):\n\n if not isinstance(channel, str): #Sets string for channel\n channel = \"In{}\".format(channel)\n \n response = self.get_variable(\"{}.value\".format(channel))\n \n # Extract the response using a regex in case verbose mode is on\n match = re.search(r\"[-+]?\\d*\\.\\d+\", response.decode(\"utf-8\"))\n \n if match is not None:\n return float(match.group())\n else:\n raise RuntimeError(\"Unable to read from channel {}\".format(channel))",
"def get_analog_value(self, port):\n self.i += 1\n return np.sin(self.i/10)*Q_('V')",
"def analog_out(self, channel, value=None, verify_only=False):\n if channel not in [1, 2]:\n self.logger.error('incorrect channel number')\n return\n upr = self.properties['ao'][channel]['upper_limit']\n lwr = self.properties['ao'][channel]['lower_limit']\n if value > upr:\n self.logger.info(f'{value} exceeds ch{channel} limit, clipping to {upr}')\n value = upr\n elif value < lwr:\n self.logger.info(f'{value} exceeds ch{channel} limit, clipping to {lwr}')\n value = lwr\n if not verify_only:\n self.instrument.write_analog(value, channel-1)\n return value",
"def grove_analog(self):\n return _read_sysfs(self._tla2021 + '/in_voltage0_raw')",
"def readValue(self):\n\n data = self.bus.read_i2c_block_data(self.addr, ADS1115_REG_CONVERT, 2)\n \n # Convert the data\n raw_adc = data[0] * 256 + data[1]\n\n if raw_adc > 32767:\n raw_adc -= 65535\n raw_adc = int(float(raw_adc)*self.coefficient)*4\n return raw_adc",
"def _sync_read_pin(self, pin):\n RPIO.setup(pin, RPIO.IN,\n pull_up_down=(RPIO.PUD_UP if self.invert_logic else RPIO.PUD_DOWN))\n val = bool(RPIO.input(pin))\n if self.invert_logic:\n val = not val\n return val",
"def analog_input_setup(self, conditions):\r\n pass",
"def read_barcode():\n print 'Scan barcode now!'\n line = sys.stdin.readline().strip()\n os.system('clear')\n out = int(line)\n return out",
"def get_fire_value(self):\n\n #Analog Input (A0)\n return self.flame_pin_analog.read()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This command allow for reading and writing the nonvolatile user memory. wLength specifies the number of bytes to read or write. The user memory is 256 bytes (address 00xFF)
|
def UserMemoryR(self, address, count):
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = address & 0xffff # force to be 16 bits
wIndex = 0
if count > 256:
raise ValueError('UserMemoryR: max bytes that can be read is 256.')
if address > 0xff:
raise ValueError('UserMemoryR: address must be in the range 0 - 0xff.')
try:
result = self.udev.controlRead(request_type, self.USER_MEMORY, wValue, wIndex, count, timeout=100)
return result
except:
logging.debug("UserMemoryR: controlRead error")
|
[
"def writemem(self, address, data):\n\n if 'qemu-' in os.path.realpath('/proc/%i/exe' % self.pid):\n self.error(\"Cannot use leaker on binaries under QEMU.\")\n\n with open('/proc/%i/mem' % self.pid, 'wb') as mem:\n mem.seek(address)\n return mem.write(data)",
"def readMemory(mbed, address, count):\n write(mbed, 0xfe)\t\t# Send the read memory opcode\n write32(mbed, address)\t# Send the 32 bits start address (4 bytes)\n write16(mbed, count)\t# Send how many bytes we want to read (2 bytes)\n return read(mbed, count)\t# Read the number of bytes from the device",
"def MBDMemoryR(self, address, count):\n request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)\n wValue = address & 0xffff # force to be 16 bits\n wIndex = 0\n\n if count > 1023:\n raise ValueError('MBDMemoryR: max bytes that can be read is 512.')\n\n if address > 0x3ff:\n raise ValueError('MBDMemoryR: address must be in the range 0 - 0x3ff.')\n\n try:\n result = self.udev.controlRead(request_type, self.SETTINGS_MEMORY, wValue, wIndex, count, timeout=100)\n\n return result\n except:\n logging.debug(\"MBDMemoryR: controlRead error\")",
"def writeMemory(mbed, address, count, data):\n write(mbed, 0xfd)\t\t# Send the write memory opcode\n write32(mbed, address)\t# Send the 32 bits destination address (4 bytes)\n write16(mbed, count)\t# Send how many bytes we want to write (2 bytes)\n for i in range(len(data)):\t# Write data\n write(mbed, data[i])\n return 0",
"def op_loadw(self, base, offset):\n val = self._memory.read_word(base + 2*offset)\n self._write_result(val)",
"def _read(self, command, nbytes, arguments=b\"\"):\n padding = b\"\\x00\" * nbytes\n tx = struct.pack(\"B\", command) + arguments + padding\n self.programmer.flash_mode()\n self.programmer.select()\n rx = self.programmer.write(tx)\n self.programmer.unselect()\n return rx[1:]",
"def get_writable_memory(self):\n for finder in self.files:\n addr = finder.parser.get_writable_memory()\n if addr != None:\n return addr\n raise RuntimeError(\"Couldn't find a .data section when looking for writable memory\")",
"def read(self, nbytes, write=0x00) -> bytes:\n ...",
"def SetMem_propagate_taint(ql, address, params):\n begin = params['Buffer']\n end = begin + params['Size']\n # r8b corresponds to the 'UINT8 Value' parameter.\n taint = ql.triton_ctx.isRegisterTainted(ql.triton_ctx.registers.r8b)\n set_taint_range(ql, begin, end, taint)",
"def cmd_mem2file(self, ui, args):\n x = util.file_mem_args(ui, args, self.cpu.device)\n if x is None:\n return\n (name, adr, size) = x\n if size is None:\n ui.put('invalid length')\n return\n # adjust the address and length\n adr = util.align(adr, 32)\n n = util.nbytes_to_nwords(size, 32)\n # read memory, write to file object\n mf = iobuf.write_file(ui, 'writing to %s' % name, name, n * 4)\n self.cpu.rdmem32(adr, n, mf)\n mf.close()",
"def writeMemory(self, addr, value, transfer_size=32):\n self.emu.write_memory(addr, value)",
"def read_mem(self, register_to):\n #if register_to > 16:\n # raise OverflowError(\"Requested too many registers: {} > 16\".format(self.V[register]))\n\n #if self.I + register_to >= self.RAM_SIZE_BYTES:\n # raise OverflowError(\"Memory out of range: {} - {}\".format(self.I, self.I + self.V[register]))\n for i in range(register_to + 1):\n self.V[i] = self.ram[self.I + i]",
"def test_memory_read_write(self):\n rbcp = Rbcp(\"127.0.0.1\")\n\n # Generate test data.for write.\n send_data = bytearray(0xff)\n for num in range(0xff):\n send_data[num] = num + 1\n\n # Read initial data of register.\n print(\"initial\")\n read_data = rbcp.read(0xffffff00, 255)\n print(read_data)\n\n # Write.\n rbcp.write(0xffffff00, send_data)\n\n # Read written data and compare with initial data.\n print(\"written\")\n read_data = rbcp.read(0xffffff00, 255)\n print(read_data)\n self.assertEqual(send_data, read_data)",
"def windowsRamOLD(self):\n kernel32 = windll.kernel32\n #kernel32 = ctypes.windll.kernel32\n #c_ulong = ctypes.c_ulong\n class MEMORYSTATUS(Structure):\n _fields_ = [\n (\"dwLength\", c_ulong),\n (\"dwMemoryLoad\", c_ulong),\n (\"dwTotalPhys\", c_ulong),\n (\"dwAvailPhys\", c_ulong),\n (\"dwTotalPageFile\", c_ulong),\n (\"dwAvailPageFile\", c_ulong),\n (\"dwTotalVirtual\", c_ulong),\n (\"dwAvailVirtual\", c_ulong)\n ]\n memoryStatus = MEMORYSTATUS()\n memoryStatus.dwLength = sizeof(MEMORYSTATUS)\n kernel32.GlobalMemoryStatus(byref(memoryStatus))\n \n return int(memoryStatus.dwTotalPhys/1024**2)",
"def readMem(addr): \n return mem[addr]",
"def _forward_hook(self, uc, access, address, size, value, user_data):\n pc = self.read_register(self.arch.pc_name)\n if access == unicorn.UC_MEM_READ or access == unicorn.UC_MEM_FETCH:\n msg = RemoteMemoryReadMessage(self._origin, 0, pc, address, size)\n write_back = True\n elif access == unicorn.UC_MEM_WRITE:\n msg = RemoteMemoryWriteMessage(self._origin, 0, pc, address, value, size)\n write_back = False\n else:\n raise ValueError('Forward hook with unknown access {}'.format(access))\n\n self._avatar_queue.put(msg)\n value, success = self._rmp_queue.get()\n if not success:\n self.log.debug('Remote memory request returned 0x{:x}'.format(value))\n elif write_back and not self.write_memory(address, size, value):\n self.log.debug('Failed to write back remote memory')",
"def upload_program_memory(self, program_memory):\n return self.tftp_put(\n program_memory,\n \"prgmem\")",
"def read_memory_status(self):\n return self.host.read_memory_status(self)",
"def modify_memory(self, virtual_quantity):\n uri = self.href + '/virtualHardwareSection/memory'\n item = self.client.get_resource(uri)\n item['{' + NSMAP['rasd'] + '}ElementName'] = \\\n '%s virtual CPU(s)' % virtual_quantity\n item['{' + NSMAP['rasd'] + '}VirtualQuantity'] = virtual_quantity\n return self.client.put_resource(uri, item, EntityType.RASD_ITEM.value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This command allows for reading and writing the nonvolite MBD memory. wLength specifies the number of bytes to read or write. The MBD memory is 1024 bytes (address 0 0x3FF).
|
def MBDMemoryR(self, address, count):
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = address & 0xffff # force to be 16 bits
wIndex = 0
if count > 1023:
raise ValueError('MBDMemoryR: max bytes that can be read is 512.')
if address > 0x3ff:
raise ValueError('MBDMemoryR: address must be in the range 0 - 0x3ff.')
try:
result = self.udev.controlRead(request_type, self.SETTINGS_MEMORY, wValue, wIndex, count, timeout=100)
return result
except:
logging.debug("MBDMemoryR: controlRead error")
|
[
"def readMemory(mbed, address, count):\n write(mbed, 0xfe)\t\t# Send the read memory opcode\n write32(mbed, address)\t# Send the 32 bits start address (4 bytes)\n write16(mbed, count)\t# Send how many bytes we want to read (2 bytes)\n return read(mbed, count)\t# Read the number of bytes from the device",
"def writeMemory(mbed, address, count, data):\n write(mbed, 0xfd)\t\t# Send the write memory opcode\n write32(mbed, address)\t# Send the 32 bits destination address (4 bytes)\n write16(mbed, count)\t# Send how many bytes we want to write (2 bytes)\n for i in range(len(data)):\t# Write data\n write(mbed, data[i])\n return 0",
"def writemem(self, address, data):\n\n if 'qemu-' in os.path.realpath('/proc/%i/exe' % self.pid):\n self.error(\"Cannot use leaker on binaries under QEMU.\")\n\n with open('/proc/%i/mem' % self.pid, 'wb') as mem:\n mem.seek(address)\n return mem.write(data)",
"def test_memory_read_write(self):\n rbcp = Rbcp(\"127.0.0.1\")\n\n # Generate test data.for write.\n send_data = bytearray(0xff)\n for num in range(0xff):\n send_data[num] = num + 1\n\n # Read initial data of register.\n print(\"initial\")\n read_data = rbcp.read(0xffffff00, 255)\n print(read_data)\n\n # Write.\n rbcp.write(0xffffff00, send_data)\n\n # Read written data and compare with initial data.\n print(\"written\")\n read_data = rbcp.read(0xffffff00, 255)\n print(read_data)\n self.assertEqual(send_data, read_data)",
"def cmd_mem2file(self, ui, args):\n x = util.file_mem_args(ui, args, self.cpu.device)\n if x is None:\n return\n (name, adr, size) = x\n if size is None:\n ui.put('invalid length')\n return\n # adjust the address and length\n adr = util.align(adr, 32)\n n = util.nbytes_to_nwords(size, 32)\n # read memory, write to file object\n mf = iobuf.write_file(ui, 'writing to %s' % name, name, n * 4)\n self.cpu.rdmem32(adr, n, mf)\n mf.close()",
"def read(self, nbytes, write=0x00) -> bytes:\n ...",
"def _read(self, command, nbytes, arguments=b\"\"):\n padding = b\"\\x00\" * nbytes\n tx = struct.pack(\"B\", command) + arguments + padding\n self.programmer.flash_mode()\n self.programmer.select()\n rx = self.programmer.write(tx)\n self.programmer.unselect()\n return rx[1:]",
"def get_write_bytes(self, length):\n if length <= 0:\n return\n # log.debug(\n # 'to get {0} write bytes from msg, '\n # '_writeindex:{1}, msg total_len: {2}'.format(\n # length, self._writeindex, len(self._dumpdata)\n # )\n # )\n return self._dumpdata[self._writeindex: self._writeindex + length]",
"def readbin(self, len=512):\n response = pysicl.gpib_read(self.instrument, len)\n return response",
"def send_msg_to_target_process_read_memory_buffer(computer_id,family_id,process_id,msg_num,dmx_attr):\n command='ILLGTestCli -- -r %s %s %s %s %s -mem r'%(computer_id,family_id,process_id,msg_num,dmx_attr)\n out = connections.execute_mml_without_check(command)\n \n if out.count('success in memory operation') == 1:\n match = re.search(r\"\\bbuffer_size:(.+?)!\", out, re.I)\n if match is not None:\n from string import atoi\n return atoi(str(match.group(1)), 16)\n else:\n return 'the return is wrong' \n elif out.count('failure in memory operation') == 1:\n return 'failure'\n else:\n return 'the return is wrong'",
"def write_ram(self, index: Literal[0, 1]) -> int:\n if index == 0:\n return self.command(_EK79686_DTM1, end=False)\n if index == 1:\n return self.command(_EK79686_DTM2, end=False)\n raise RuntimeError(\"RAM index must be 0 or 1\")",
"def modify_memory(self, virtual_quantity):\n uri = self.href + '/virtualHardwareSection/memory'\n item = self.client.get_resource(uri)\n item['{' + NSMAP['rasd'] + '}ElementName'] = \\\n '%s virtual CPU(s)' % virtual_quantity\n item['{' + NSMAP['rasd'] + '}VirtualQuantity'] = virtual_quantity\n return self.client.put_resource(uri, item, EntityType.RASD_ITEM.value)",
"def op_loadw(self, base, offset):\n val = self._memory.read_word(base + 2*offset)\n self._write_result(val)",
"def GetWriteExtent(self):\n ...",
"def write_mem8(self, address, data):\n _logging.info(\"\")\n if len(data) > Stlink._STLINK_MAXIMUM_8BIT_DATA:\n raise StlinkException(\n 'Too many Bytes to write (maximum is %d Bytes)'\n % Stlink._STLINK_MAXIMUM_8BIT_DATA)\n cmd = [Stlink._Cmd.Debug.COMMAND, Stlink._Cmd.Debug.WRITEMEM_8BIT]\n cmd.extend(list(address.to_bytes(4, byteorder='little')))\n cmd.extend(list(len(data).to_bytes(4, byteorder='little')))\n self._com.xfer(cmd, data=data)",
"def writeMemory(self, addr, value, transfer_size=32):\n self.emu.write_memory(addr, value)",
"def get_writable_memory(self):\n for finder in self.files:\n addr = finder.parser.get_writable_memory()\n if addr != None:\n return addr\n raise RuntimeError(\"Couldn't find a .data section when looking for writable memory\")",
"def advapi32_RtlDecryptMemory(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"Memory\", \"MemorySize\", \"OptionFlags\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def test_memory_write(self):\n rbcp = Rbcp(\"127.0.0.1\")\n\n rbcp.write(0xffffff00, b\"\")\n rbcp.write(0xffffff00, b\"0123\")\n rbcp.write(0xffffff00, bytearray(255))\n\n self.assertRaises(ValueError, rbcp.write, 0xffffff00, bytearray(256))\n self.assertRaises(ValueError, rbcp.write, -1, bytearray(256))\n self.assertRaises(ValueError, rbcp.write, 0xffffff01, bytearray(255))\n self.assertRaises(ValueError, rbcp.write, 1.1, bytearray(255))\n\n for _ in range(256): # For coverage of clear code of _packet_id.\n rbcp.write(0xffffff00, bytearray(255))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This commands reads the device USB serial number. The serial number consists of 8 bytes, typically ASCII numeric or hexadecimal digits (i.e. "00000001"). The new serial number will be wtored but not used until the device is reset.
|
def GetSerialNumber(self):
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0x0
wIndex = 0x0
value = self.udev.controlRead(request_type, self.SERIAL, wValue, wIndex, 8, self.HS_DELAY)
return value.decode()
|
[
"def get_serial_number(self):\n return usb.util.get_string(self.dev, 256, 3)",
"def get_virtio_disk_serial(device_name):\n dev_path = ('/sys/block/%s/serial' % device_name)\n out, err, rc = run_command([CAT, dev_path], throw=False)\n if (rc != 0):\n return ''\n # our out list has one element that is the serial number, like\n # ['11111111111111111111']\n return out[0]",
"def get_Serial(self):\n return self.GetStringDescriptor(StringDescriptor.Serial)",
"def get_serial_number():\n process_dict = [\"/usr/sbin/system_profiler SPHardwareDataType | grep 'Serial Number (system)' | awk '{print $NF}'\"]\n process = subprocess.Popen(process_dict, shell=True, stdout=subprocess.PIPE)\n\n return process_parse_output(process)",
"def read_serial_line(self):\n size_of_input_buffer = 0\n while size_of_input_buffer == 0:\n size_of_input_buffer = self.port.inWaiting()\n time.sleep(0.1)\n text = self.port.readline()\n return text.decode()",
"def getStLinkSerialNumbers(self):\n usb_args = ['lsusb', '-v', '-d', '0x0483:0x374b']\n usb_process = sp.Popen(usb_args, stdout=sp.PIPE, shell=False)\n st_link_info = usb_process.communicate()[0]\n st_serials = []\n for line in st_link_info.split('\\n'):\n if 'iSerial' in line:\n st_serials.append(line.split()[2])\n return st_serials",
"def read(self):\n c = self.serial.read(1)\n while c != '':\n self.line_buffer = self.line_buffer[-13:] + c\n if re.match(\"^\\x02[0-9a-fA-F]{12}\\x03$\",self.line_buffer):\n # frame matches lets parse the packet\n checksum = int(self.line_buffer[11:13], 16)\n number = self.line_buffer[1:11]\n local_checksum = int(number[0:2],16)\n local_checksum ^= int(number[2:4],16)\n local_checksum ^= int(number[4:6],16)\n local_checksum ^= int(number[6:8],16)\n local_checksum ^= int(number[8:10],16)\n if checksum == local_checksum:\n self.line_buffer = \"\"\n self.version_id = int(number[0:2],16)\n self.serial_number = int(number[2:10],16)\n return self.serial_number\n c = self.serial.read(1)\n return None",
"def serial_number(self) -> str:\n return self._serial_number.lstrip(\"0\")",
"def GetSystemSerial(self):\n if not self._SystemSerial :\n if self._deviceType == DeviceType.Unknown :\n self.GetDeviceType()\n inv = self.GetInventory()\n ss = \"\"\n if self._deviceType == DeviceType.Firewall :\n allChassis = re.findall(r\"Chassis\\s.*\", inv)\n for thisChassis in allChassis :\n words = filter(None, thisChassis.split(\" \"))\n ss += (\";\" + words[1])\n self._SystemSerial = ss.strip(\";\") \n elif self._deviceType == DeviceType.Router :\n allChassis = re.findall(r\"Chassis\\s.*\", inv)\n for thisChassis in allChassis :\n words = filter(None, thisChassis.split(\" \"))\n ss += (\";\" + words[1])\n self._SystemSerial = ss.strip(\";\") \n elif self._deviceType == DeviceType.Switch :\n FPCs = re.findall(r\"FPC \\d.*\", inv)\n for thisFPC in FPCs :\n words = filter(None, thisFPC.split(\" \"))\n ss += (\";\" + words[5])\n self._SystemSerial = ss.strip(\";\")\n return self._SystemSerial",
"def read_Arduino():\n data = arduino.readline().decode('utf-8')\n return data",
"def get_serial_num(self, fqdn: str):\n device_details = self.__cv_client.api.get_device_by_name(fqdn)\n if \"serialNumber\" in device_details.keys():\n return device_details[\"serialNumber\"]\n device_details = self.__cv_client.api.get_device_by_name(fqdn, search_by_hostname=True)\n if \"serialNumber\" in device_details.keys():\n return device_details[\"serialNumber\"]\n self.__ansible.fail_json(msg=f\"Error, Device {fqdn} doesn't exists on CV. Check the hostname/fqdn\")",
"def test_com_port(port=[]):\n # print(\"Testing\", port)\n s = serial.Serial(port, 115200, timeout=2)\n s.write(bytes([1]))\n s.write(bytes([13]))\n #n = 0\n #while n == 0:\n #n = s.inWaiting()\n try:\n m = ord(s.read())\n if m == 186:\n s.close()\n return 1\n if m == 174:\n s.close()\n return 2\n s.close()\n return 0\n except:\n return 0",
"def get_device_id():\n cpuinfo = open('/proc/cpuinfo', 'r')\n for line in cpuinfo:\n if line[0:6] == 'Serial':\n cpuserial = line[10:26]\n cpuinfo.close()\n return cpuserial",
"def get_port():\n port = 0\n if sys.platform.startswith('darwin'):\n port = glob.glob('/dev/tty.usbmodem*')[0]\n elif sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(32)]\n for p in ports:\n try:\n s = serial.Serial(p)\n s.close()\n port = p\n except (OSError, serial.SerialException):\n pass\n return port",
"def get_device_index_by_serial(serial):\n if PY3 and isinstance(serial, str):\n serial = bytes(serial, 'UTF-8')\n\n result = librtlsdr.rtlsdr_get_index_by_serial(serial)\n if result < 0:\n raise IOError('Error code %d when searching device by serial' % (result))\n\n return result",
"def GetSerialString(self):\n return self._send_string",
"def read_serial_bytes(self, no_of_bytes):\n size_of_input_buffer = 0\n while size_of_input_buffer < no_of_bytes:\n size_of_input_buffer = self.port.inWaiting()\n text = self.port.read(no_of_bytes)",
"def serial_number(self) -> str:\n return self._serial_number",
"def get_com_port():\n ports = list(serial.tools.list_ports.comports())\n\n #Is list ports empty?\n if not ports:\n logging.critical(\"No Serial Ports found! Exiting now\")\n exit()\n\n #If there is only one port available, automatically use that one\n if len(ports) == 1:\n return ports[0].device\n\n #Display all available ports if there are more than one available\n print(\"Available Ports: \")\n for port in ports:\n print(port)\n return input(\"Enter Xbee Serialport: \")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert 12 bit raw value to volts. All values single ended +/ 10V.
|
def volts(self, value):
volt = ((value - 2048) * 10.) / 2048.
return volt
|
[
"def Convert2Volt(values):\n for val in values:\n val *= 10e-4 / 167",
"def to_voltage(val):\n return (val / 1024.0) * 3.3",
"def _hexword2volt(self, hex_str):\n byte1 = format(int(hex_str[0:2], 16), \"08b\")\n byte2 = format(int(hex_str[2:4], 16), \"08b\")\n byte3 = format(int(hex_str[4:6], 16), \"08b\")\n\n v1 = int(byte1 + byte2[:4], 2)\n v2 = int(byte2[4:] + byte3, 2)\n\n v1 = 5 * (1 - v1 / 4095)\n v2 = 5 * (1 - v2 / 4095)\n\n return v1, v2",
"def inVolts(adc_value, bits=12, vRef=1.8):\r\n return adc_value*(vRef/2**bits)",
"def convert_T_to_V(temp):\n return (71-temp)/10.4",
"def _dac_code_to_v(code, min_volt, max_volt):\n frac = code / 65535.0\n \n return (frac * (max_volt - min_volt)) + min_volt",
"def inVolts(mv):\r\n return mv/1000.0",
"def GetVolts( value, units ):\r\n\tif isinstance( value, ( tuple, list ) ): return value.__class__( GetVolts( x, units ) for x in value )\r\n\tif value == None: return None\r\n\tfactors = { '' : 1e0, 'v' : 1e0, 'mv' : 1e-3, 'muv' : 1e-6, 'uv' : 1e-6 }\r\n\treturn value * factors[ units.lower() ]",
"def _dac_code_to_v(self, code):\n return DacBase._dac_code_to_v(code, self._min_volt, self._max_volt)",
"def voltage(analog_pin):\r\n return \"%0.2f\" % inVolts(analogRead(analog_pin))",
"def _threshold_dac2volt(value: float):\n return round((2.047 + 1.024) / 4095 * value - 1.024, 3)",
"def measure_v(self):\n self._ser.write('MEAS:VOLT?')\n __value = float(self._ser.read()[:-1])\n print(f'C62012P OUT Voltage: {__value}V')\n return __value",
"def parse(data):\n global values_raw\n\n values_raw = []\n\n for x in range(0, 12):\n if x % 2 == 0:\n data[x] = data[x] - 252 # Subtracts 252 from the first byte of data to get rid of 111111 sent by ADC\n # print data # Debug\n\n # Combines the two bytes together into one usable 10-bit value\n for x in range(0, 11, 2):\n values_raw.append(bin(data[x]).lstrip('-0b').zfill(2) + bin(data[x + 1]).lstrip('-0b').zfill(8))\n # print values # Debug",
"def dBuV_to_voltage(v) -> Unit(\"V\"):\n return dB_to_ratio(v, factor=dBFactor.Field) * 1e-6",
"def _threshold_volt2dac(value: float):\n return round((value + 1.024) * 4095 / (2.047 + 1.024))",
"def read_volt(self, channel):\n return self.read_value(channel, 'volt')",
"def convert_data(hdu, vslr):\n wave = hdu[1].data['Wavelength']\n wave = air2vac(wave)\n return wave * (1+vslr/c.c)",
"def vat_rate():",
"def read_voltage(self):\n return self.read_raw() * self._scale_factor"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the play_as method call
|
def test_play_as(self):
queue = Queue()
thread = Thread(
target=self.__client_send_thread, args=[self.client, json.dumps("void"), queue])
thread.daemon = True
thread.start()
comm.play_as(self.remote_player, "red")
thread.join()
data_load = queue.get()
self.assertEqual(data_load, [comm.PLAY_AS, ["red"]])
|
[
"def test_cps_play(self):\n self.skill.play_service_string = 'play on godzilla'\n self.skill.CPS_play(['looking_for_freedom.mp3'],\n utterance='play on mothra')\n self.audioservice.play.assert_called_once_with(\n ['looking_for_freedom.mp3'], utterance='play on mothra')\n\n self.audioservice.play.reset_mock()\n # Assert that the utterance is injected\n self.skill.CPS_play(['looking_for_freedom.mp3'])\n self.audioservice.play.assert_called_once_with(\n ['looking_for_freedom.mp3'], utterance='play on godzilla')",
"def test_play(self):\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.coins.get(), 2)\n self.plr.actions.set(1)\n self.plr.test_input = [\"Gain\"]\n self.plr.play_card(self.village)\n self.assertIn(\"Village\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Village\", self.plr.piles[Piles.PLAYED])",
"def test_handle_start_playback(self):\n self.skill.audioservice.is_playing = True\n start_playback = self.bus.on.call_args_list[-1][0][1]\n\n phrase = 'Don\\'t open until doomsday'\n start_playback(Message('play:start', data={'phrase': phrase,\n 'skill_id': 'asdf'}))\n self.skill.CPS_start.assert_not_called()\n\n self.bus.emit.reset_mock()\n start_playback(Message('play:start',\n data={'phrase': phrase,\n 'skill_id': self.skill.skill_id}))\n self.audioservice.stop.assert_called_once_with()\n self.skill.CPS_start.assert_called_once_with(phrase, None)",
"def test_human_play(self):\n print(\"\\nStart test_human_play test\\n\")\n print(\"Testing a human regular play.\\n\")\n\n\n machineNumber = [8,1,5,9]\n humanNumber = 8159\n\n humanPlay(humanNumber, machineNumber)\n\n print(\"\\nFinish test_human_play test\\n\")\n print(\"---------------------------\\n\")",
"def test_player_details_by_player(self):\n pass",
"def start_play():",
"async def test_media_player_play(\n hass: HomeAssistant,\n camera: tuple[Camera, str],\n):\n\n camera[0].__fields__[\"stop_audio\"] = Mock()\n camera[0].__fields__[\"play_audio\"] = Mock()\n camera[0].__fields__[\"wait_until_audio_completes\"] = Mock()\n camera[0].stop_audio = AsyncMock()\n camera[0].play_audio = AsyncMock()\n camera[0].wait_until_audio_completes = AsyncMock()\n\n await hass.services.async_call(\n \"media_player\",\n \"play_media\",\n {\n ATTR_ENTITY_ID: camera[1],\n \"media_content_id\": \"/test.mp3\",\n \"media_content_type\": \"music\",\n },\n blocking=True,\n )\n\n camera[0].play_audio.assert_called_once_with(\"/test.mp3\", blocking=False)\n camera[0].wait_until_audio_completes.assert_called_once()",
"def test_player_details_by_available(self):\n pass",
"def test_pause_and_play(self, soco):\r\n soco.pause()\r\n wait(1)\r\n on_pause = soco.get_current_transport_info()['current_transport_state']\r\n assert on_pause == 'PAUSED_PLAYBACK'\r\n soco.play()\r\n wait(1)\r\n on_play = soco.get_current_transport_info()['current_transport_state']\r\n assert on_play == 'PLAYING'",
"def test_challenge_player_6(self):\n pass",
"async def test_media_player_playback(\n hass, setup_plex_server, requests_mock, playqueue_created, player_plexweb_resources\n):\n requests_mock.get(\"http://1.2.3.5:32400/resources\", text=player_plexweb_resources)\n\n await setup_plex_server()\n\n media_player = \"media_player.plex_plex_web_chrome\"\n requests_mock.post(\"/playqueues\", text=playqueue_created)\n requests_mock.get(\"/player/playback/playMedia\", status_code=200)\n\n # Test movie success\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Movies\", \"title\": \"Movie 1\" }',\n },\n True,\n )\n\n # Test movie incomplete dict\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Movies\"}',\n },\n True,\n )\n\n # Test movie failure with options\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Movies\", \"title\": \"Does not exist\" }',\n },\n True,\n )\n\n # Test movie failure with nothing found\n with patch(\"plexapi.library.LibrarySection.search\", return_value=None):\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Movies\", \"title\": \"Does not exist\" }',\n },\n True,\n )\n\n # Test movie success with dict\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Music\", \"artist_name\": \"Artist\", \"album_name\": \"Album\"}',\n },\n True,\n )\n\n # Test TV show episoe lookup failure\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"TV Shows\", \"show_name\": \"TV Show\", \"season_number\": 1, \"episode_number\": 99}',\n },\n True,\n )\n\n # Test track name lookup failure\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Music\", \"artist_name\": \"Artist\", \"album_name\": \"Album\", \"track_name\": \"Not a track\"}',\n },\n True,\n )\n\n # Test media lookup failure by key\n requests_mock.get(\"/library/metadata/999\", status_code=404)\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,\n ATTR_MEDIA_CONTENT_ID: \"999\",\n },\n True,\n )\n\n # Test invalid Plex server requested\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,\n ATTR_MEDIA_CONTENT_ID: '{\"plex_server\": \"unknown_plex_server\", \"library_name\": \"Music\", \"artist_name\": \"Artist\", \"album_name\": \"Album\"}',\n },\n True,\n )",
"def test_playable_format(self, mock_create_hash_sum):\n mock_create_hash_sum.return_value = '1234567'\n cid2 = create_clip(fid=self.fid, clip_name=\"another_test_clip\", video_format=\"mp4\", start_time=self.st,\n end_time=self.et, latitude=self.lat, longitude=self.lon, width=256, height=240,\n frame_rate=42.0, camera_name=self.cm_name)\n clip2 = get_clip_by_id(cid=cid2)\n self.assertEqual(clip2.playable, True)",
"def play(self, track):\n raise NotImplementedError",
"async def test_audio_pipeline(hass, mock_get_tts_audio):\n pipeline = Pipeline(\n name=\"test\",\n language=hass.config.language,\n conversation_engine=None,\n tts_engine=None,\n )\n\n event_callback = MagicMock()\n await AudioPipelineRequest(intent_input=\"Are the lights on?\").execute(\n PipelineRun(\n hass,\n context=Context(),\n pipeline=pipeline,\n event_callback=event_callback,\n language=hass.config.language,\n )\n )\n\n calls = event_callback.mock_calls\n assert calls[0].args[0].type == PipelineEventType.RUN_START\n assert calls[0].args[0].data == {\n \"pipeline\": \"test\",\n \"language\": hass.config.language,\n }\n\n assert calls[1].args[0].type == PipelineEventType.INTENT_START\n assert calls[1].args[0].data == {\n \"engine\": \"default\",\n \"intent_input\": \"Are the lights on?\",\n }\n assert calls[2].args[0].type == PipelineEventType.INTENT_FINISH\n assert calls[2].args[0].data == {\n \"intent_output\": {\n \"conversation_id\": None,\n \"response\": {\n \"card\": {},\n \"data\": {\"code\": \"no_intent_match\"},\n \"language\": hass.config.language,\n \"response_type\": \"error\",\n \"speech\": {\n \"plain\": {\n \"extra_data\": None,\n \"speech\": \"Sorry, I couldn't understand that\",\n }\n },\n },\n }\n }\n\n assert calls[3].args[0].type == PipelineEventType.TTS_START\n assert calls[3].args[0].data == {\n \"engine\": \"default\",\n \"tts_input\": \"Sorry, I couldn't understand that\",\n }\n assert calls[4].args[0].type == PipelineEventType.TTS_FINISH\n assert (\n calls[4].args[0].data[\"tts_output\"]\n == f\"/api/tts_proxy/dae2cdcb27a1d1c3b07ba2c7db91480f9d4bfd8f_{hass.config.language}_-_demo.mp3\"\n )\n\n assert calls[5].args[0].type == PipelineEventType.RUN_FINISH",
"def pause_play():",
"def test_video_mpg_should_return_true(self):\n\n video_name : str = \"video.mpg\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...",
"def test_video_ts_should_return_true(self):\n\n video_name : str = \"video.ts\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...",
"def test_video_avi_should_return_true(self):\n\n video_name : str = \"video.avi\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...",
"def test_audiences_get_check_audience_job(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
starts the glut main loop
|
def main_loop():
glutMainLoop()
|
[
"def main():\n make_glut()",
"def mainloop(self):\n while self.running:\n self.updateview();\n self.handlekey(self.scr.getch());",
"def mainloop() -> None:\n handle_key_down()\n\n if SS.on_start_screen:\n return\n\n if MAIN not in characters:\n return\n\n control_main()\n increment_time()\n\n if time_passed[1] <= 100:\n register_objects()\n display_objects()\n else:\n display_highscores()\n\n CLOCK.tick(25)",
"def main_loop(self):\n # Start main loop thread (loop() handler)\n while True:\n if self._looping:\n # Call loop() handler\n self._berry.loop_client()",
"def run(self):\n while not glfw.window_should_close(self.win):\n # clear draw buffer and depth buffer (<-TP2)\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n winsize = glfw.get_window_size(self.win)\n view = self.trackball.view_matrix()\n # print(view)\n # view = obj_centered_camera_pose(96., 20., 32.)\n \n projection = self.trackball.projection_matrix(winsize)\n # projection = identity()\n # draw our scene objects\n for drawable in self.drawables:\n drawable.draw(projection, view, identity(), win=self.win,\n color_shader=self.lighting_shader)\n \n # flush render commands, and swap draw buffers\n glfw.swap_buffers(self.win)\n\n # Poll for and process events\n glfw.poll_events()",
"def start(self):\n self.__getLastPos()\n self.window.mainloop()",
"def main():\n # set up the screen display\n screen = pygame.display.set_mode((screen_width, screen_height))\n pygame.display.set_caption(\"Interactive Drum Machine\")\n\n # initialize done to false\n done = False\n\n # create objects\n view = Display()\n sounds = SoundObjects()\n controller = Controller()\n\n # display the screen background\n view.display_background(screen)\n\n while not done:\n # play metronome continuously\n sounds.play_sound(sounds.metronome)\n\n # process events\n done = controller.process_events(sounds.notes, screen)\n\n # exit the window\n pygame.quit()",
"def pre_mainloop(self):\n self.init_pygame()\n self.init_graphics()",
"def main():\n start()",
"def on_init(self):\n self._display = pygame.display.set_mode((self.width, self.height), HWSURFACE | DOUBLEBUF)\n pygame.display.set_caption('Path Finding Visualiser')\n \n self.draw_maze()\n \n self._running = True\n pygame.init()",
"def __init__(self):\n self.mainloop = g_main_loop_new(g_main_context_default(), True)\n MainLoop.default = self",
"def main():\n app = WoJ()\n app.run()\n pygame.quit()",
"def mainLoop(self):\n\n while self.running:\n if self.state == \"START\":\n self.startLoop()\n elif self.state == \"GAME\":\n self.gameLoop()\n elif self.state == \"END\":\n self.endLoop()",
"def run(self):\r\n self.root.after(3000, self.__my_mainloop)\r\n self.root.mainloop()",
"def start_program():\r\n _game.invoke_when_program_starts_callbacks()\r\n _task_runner.loop.call_soon(_game.run)\r\n try:\r\n _task_runner.loop.run_forever()\r\n finally:\r\n _pygame.quit()",
"def main_loop():\n \n ConsoleViewController.isMainLoopRunning = True\n \n while (ConsoleViewController.isMainLoopRunning):\n continue",
"def initGL(self):\t\t\n\n\t\tpass",
"def start(self):\n #self.state = self.RUNNING\n self.root.after(self.updateTime, self.update)\n self.root.mainloop()",
"def main():\n field = Field(1080, 800)\n game = Game(field)\n ui = UserInterface(game)\n ui.main_loop()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return newly create session if it is external user needed else session from the pool.
|
def get_session(self, obj_dict):
return session_pool.create_session(
users.current_user(), is_external=True) if (
self.is_external_user_needed(obj_dict)) else (
session_pool.get_session(users.current_user()))
|
[
"def get_or_create_session (self):\n\n session_name = self.session_class.name_prefix + '_session'\n session = cherrypy.session.get (session_name, self.session_class ())\n cherrypy.session[session_name] = session\n return session",
"def get_session(self, session_id='', create_if_null=False):\r\n\r\n # Is it an existing session?\r\n session = self.session_pool.get(session_id)\r\n\r\n # Otherwise let the client choose their session_id, if\r\n # this transport direction allows\r\n if create_if_null and session is None:\r\n session = self.session_backend(self, session_id=session_id)\r\n self.session_pool.add(session)\r\n elif session:\r\n session.incr_hits()\r\n\r\n return session",
"def _login(self):\n self._session = uuidutils.generate_uuid()\n session = DataObject()\n session.key = self._session\n session.userName = 'sessionUserName'\n _db_content['session'][self._session] = session\n return session",
"def _new_session(self, session: aiohttp_session.Session, user: User) -> UserSession:\n if not self._user_has_session(user):\n # create new session with random id.\n new_id = f\"{random.randint(1000, 9999)}-{random.randint(1000, 9999)}\"\n self._sessions[new_id] = UserSession(user)\n session[SESSION_KEY] = new_id\n\n _LOGGER.debug(f\"New user session: {new_id}\")\n\n return new_id\n else:\n for k in self._sessions:\n if self._sessions[k].user == user and not SESSION_KEY in session:\n session[SESSION_KEY] = k",
"def makeSession(self):\n uid = self._mkuid()\n s = SBSession(self, uid)\n s.expiryTimeout = self.cb.personalRegistryValue('sessionTimeout')\n session = self.sessions[uid] = s\n reactor.callLater(s.expiryTimeout, s.checkExpired)\n \n return session",
"def fresh_session():\n VirtualTN.query.delete()\n ProxySession.query.delete()\n new_tn = VirtualTN('1234567897')\n db_session.add(new_tn)\n db_session.commit()\n new_session = ProxySession(\n new_tn.value, '12223334444', '12223335555', expiry_window=1)\n new_tn.session_id = new_session.id\n db_session.add(new_tn)\n db_session.add(new_session)\n db_session.commit()\n return new_tn, new_session",
"def __sessionmaker():\n\tsession = requests.ClientSession()\n\treturn session",
"def create_session(cls, db, data, client_ip, effective_hours):\n # Try 3 different keys\n for trial in range(3):\n session_key = str_generator.unique_id(40)\n\n # Try to create session instance in database\n session_model = Session.create_session(\n db,\n session_key,\n data,\n client_ip,\n effective_hours\n )\n\n if session_model is not None:\n break\n\n # If still not created, return None\n if session_model is None:\n return None\n else:\n return DatabaseSession(db, session_model)",
"def get_oci_api_session():\n session_cache = getattr(get_oci_api_session, \"_session\", None)\n if session_cache:\n return session_cache\n\n sess = None\n\n try:\n _logger.debug('Creating session')\n sess = oci_utils.oci_api.OCISession()\n # it seems that having a client is not enough, we may not be able to query anything on it\n # workaround :\n # try a dummy call to be sure that we can use this session\n if not bool(sess.this_instance()):\n _logger.debug('Returning None session')\n return None\n setattr(get_oci_api_session, \"_session\", sess)\n except Exception as e:\n _logger.error(\"Failed to access OCI services: %s\", str(e))\n _logger.debug('Returning session')\n return sess",
"def _get_session(self):\n session = Session.object_session(self)\n if not session:\n session = sessionmaker(bind=self.engine)()\n return session",
"def create_session(self):\n\n self.session = self.opentok.create_session(\n media_mode=MediaModes.routed\n )\n return self.session.session_id",
"def get_session(self):\n self._lock.acquire()\n try:\n if not self._session_id or not self._time_created:\n return None\n\n expiry_age = self.get_expiry_age()\n if not expiry_age:\n self._session_id, self._time_created = None, None\n return None\n if expiry_age < self._read_timeout:\n self._session_id, self._time_created = None, None\n return None\n finally:\n self._lock.release()\n return self._session_id",
"def create_scoped_session():\n return scoped_session(create_session())",
"def test_create_session(self):\n _meta = SessionMeta.new(app_secret=self.manager.secret)\n\n session1 = self.manager.get_session(meta=_meta, new=True)\n session1['foo'] = 'bar'\n session1.commit()\n\n # read back session\n session2 = self.manager.get_session(meta=_meta, new=False)\n self.assertEqual(session2['foo'], session1['foo'])",
"def get_session():\n return DatabaseService.connector.get_session()",
"def establish_a_session():\n new_session = requests.Session()\n\n jar = requests.cookies.RequestsCookieJar()\n jar.set('view_mature', 'true' if named_args.adult else 'false')\n jar.set('d_browse_bookshelf', '2') # grid-like view\n\n new_session.cookies = jar\n return new_session",
"def start_session(self) -> Tuple[\"Session\", List[Dict[str, Any]]]:\n\n session_info = login(\n self.gateway.api_root,\n self.access_token,\n self.gateway.country,\n self.gateway.language,\n )\n session_id = session_info[\"jsessionId\"]\n return Session(self, session_id), get_list(session_info, \"item\")",
"def test_create_pool_missing_session_pers_field(self):\n new_pool = self._prepare_and_create_pool()\n pool_initial = self._show_pool(new_pool.get('id'))\n sess = pool_initial.get('session_persistence')\n self.assertIsNone(sess)",
"def create_session(self):\n\n return scoped_session(self.session_factory)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if endpoint is external.
|
def is_endpoint_external(self):
return self.endpoint in objects.EXTERNAL_END_POINTS
|
[
"def IsExternal(self):\n return self.location is not None",
"def is_external(url):\r\n return url.startswith(('//', 'http://', 'https://'))",
"def uses_webhook(self):\n url = self.config.server.base_url\n return 'https' in url and not ('localhost' in url or '127.0.0.1' in url)",
"def validate_external_endpoints(endpoints=ENDPOINTS):\n data = load_json_file()\n externals_ips = map(itemgetter(\"ip\"), data[\"external_endpoints\"])\n region_2_eps = filter(lambda ep: ep[\"region\"] == 2, endpoints)\n are_shared = map(lambda ep: ep[\"ipv6\"][0] in externals_ips or ep[\"ipv4\"][0] in externals_ips, region_2_eps)\n return all(are_shared)",
"def NeedsExternalCalls(self):\n if self.wifi:\n return self.wifi.NeedsExternalCalls()\n elif self.wired:\n return self.wired.NeedsExternalCalls()\n else:\n return True",
"def is_endpoint_subscribed_to(\n self, remote_endpoint: str, event_type: Type[BaseEvent]\n ) -> bool:\n ...",
"def is_reachable(self):\n try:\n r = requests.head(self.endpoint)\n return r.status_code in [200, 304]\n except requests.exceptions.RequestException:\n return False",
"def IsExternal(self):\n return isinstance(self.definition, XMLExternalID)",
"def is_any_endpoint_subscribed_to(self, event_type: Type[BaseEvent]) -> bool:\n ...",
"def isAPIOnline(cls):\r\n return NextBusHandler.isOnline()",
"def is_hosted(self):\n return self.hosted_by is not None",
"def internal(self):\n return self.remote == site_config.params.INTERNAL_REMOTE",
"def check_if_indirect(self):\n try:\n if not self.callingEA:\n self.logger.error(\"Error: could not locate the calling ea for function %s\", self.function.funcName)\n return False\n\n return is_indirect(self.callingEA)\n\n except Exception as ex:\n self.logger.error(\"Failed while checking for indirect call: %s\", ex)\n return False",
"def is_local():\n return 'sinwoo' in socket.gethostname()",
"def _is_worth_logging(self, endpoint_url: str) -> bool:\n if endpoint_url.endswith(('/queue/status', '/devices/v/1', '/Jobs/status',\n '/.../properties', '/.../defaults')):\n return False\n if endpoint_url.startswith(('/users', '/version')):\n return False\n if endpoint_url == '/Network':\n return False\n if 'objectstorage' in endpoint_url:\n return False\n if 'bookings' in endpoint_url:\n return False\n\n return True",
"def _no_host(url):\n return not url.startswith('localhost') or not '.' in url",
"def url_allowed(self, url):\n return get_netloc(url) in self.root_hosts",
"def is_hostonly(self):\n return self.domain is None",
"def isEndPointExistNotUsedButAGoodReference( self, epname):\n\t\t#######################################################\n\t\t#\tCheck to see if the given EndPoint exists.\n\t\t#######################################################\n\t\tmyargs\t\t\t= array( ['specialEndpoints'], java.lang.String )\n\t\tendpointAttrs\t= self.configService.getAttributes( self.configService.session, self.rootObjectName, myargs, False )\n\t\t#endpointAttrs\t= self.configService.getAttributes( self.configService.session, self.rootObjectName, None, False )\n\t\t#self.debug( __name__ + \".isEndPointExist(): endpointAttrs=\" + str( endpointAttrs ) + \"\\n\" )\n\t\tself.debug( __name__ + \".isEndPointExist(): endpointAttrs type=\" + str( type( endpointAttrs ) ) + \"\\n\" )\n\t\tfor endpointAttr in endpointAttrs:\n\t\t\t#self.debug( __name__ + \".isEndPointExist(): endpointAttr=\" + str( endpointAttr ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): endpointAttr type=\" + str( type( endpointAttr ) ) + \"\\n\" )\n\t\t\tattrName = endpointAttr.getName()\n\t\t\tspecialEndPointAttrs= endpointAttr.getValue()\n\t\t\tself.debug( __name__ + \".isEndPointExist(): attrName=\" + str( attrName ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): attrName type=\" + str( type( attrName ) ) + \"\\n\" )\n\t\t\t#self.debug( __name__ + \".isEndPointExist(): specialEndPointAttrs=\" + str( specialEndPointAttrs ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): specialEndPointAttrs type=\" + str( type( specialEndPointAttrs ) ) + \"\\n\" )\n\t\t\tif isinstance( specialEndPointAttrs, java.util.ArrayList ):\n\t\t\t\tfor namedEndPoint in specialEndPointAttrs:\n\t\t\t\t\t#self.debug( __name__ + \".isEndPointExist(): namedEndPoint=\" + str( namedEndPoint ) + \"\\n\" )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): namedEndPoint type=\" + str( type( namedEndPoint ) ) + \"\\n\" )\n\t\t\t\t\tepArgs = array( ['endPointName'], java.lang.String )\n\t\t\t\t\tnameAttrs\t= self.configService.getAttributes( self.configService.session, namedEndPoint, epArgs, False )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): nameAttrs=\" + str( nameAttrs ) + \"\\n\" )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): nameAttrs type=\" + str( type( nameAttrs ) ) + \"\\n\" )\n\t\t\t\t\tepName = self.configService.configServiceHelper.getAttributeValue( nameAttrs, 'endPointName' )\n\t\t\t\t\tif epName == epname:\n\t\t\t\t\t\treturn True\n\t\t\t\t#Endfor\n\t\t\t#Endif\n\t\t#Endfor\n\t\treturn False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if source or destination objects type is external.
|
def is_relationship_types_external(self, obj_dict):
return (self.endpoint == objects.get_singular(objects.RELATIONSHIPS) and
(any(x for x in objects.SINGULAR_DISABLED_OBJS
if x.title() in (obj_dict["source"]["type"],
obj_dict["destination"]["type"]))))
|
[
"def IsExternal(self):\n return self.location is not None",
"def IsExternal(self):\n return isinstance(self.definition, XMLExternalID)",
"def check_source(self):\r\n source_bool = False\r\n for a in G.nodes():\r\n if a in self.source:\r\n source_bool = True\r\n if source_bool == False:\r\n raise Exception('Source is not in Network')\r\n \r\n return source_bool",
"def check_external_mission(self, cr, uid, ids, context=None):\n for h in self.browse(cr, uid, ids, context=context):\n if h.type_mission == 'external':\n return True\n else:\n return False",
"def is_external(url):\r\n return url.startswith(('//', 'http://', 'https://'))",
"def customer_has_source(obj):\n return obj.customer.default_source is not None",
"def is_endpoint_external(self):\n return self.endpoint in objects.EXTERNAL_END_POINTS",
"def is_source_obj(self, obj):\n try:\n if os.path.basename(inspect.getfile(obj)).startswith(FILE_PREFIX):\n return True\n except TypeError:\n pass\n\n return False",
"def _is_point_source(slit, exp_type):\n result = False\n\n # Get the source type value set by the source_type step (if any)\n if slit.source_type is not None:\n src_type = slit.source_type\n elif slit.meta.target.source_type is not None:\n src_type = slit.meta.target.source_type\n else:\n src_type = None\n\n if src_type is not None and src_type.upper() in ['POINT', 'EXTENDED']:\n # Use the supplied value\n log.info(f'Detected a {src_type} source type in slit {slit.name}')\n if src_type.strip().upper() == 'POINT':\n result = True\n else:\n result = False\n else:\n log.info(\"Unknown source type\")\n\n return result",
"def is_source(self) -> bool:\n return self.kind is SlotType.SOURCE",
"def isDatatypeLoaded(self, objType, autoFieldName):\r\n retVal = False\r\n if autoFieldName in self.cacheStruct:\r\n # cacheStruct['id']['object type'] needs to exist, and also needs\r\n # to have something other than empty dicts in it for both src and dest.\r\n if (\r\n (objType in self.cacheStruct[autoFieldName])\r\n and self.cacheStruct[autoFieldName][objType][constants.DATA_SOURCE.SRC]\r\n and self.cacheStruct[autoFieldName][objType][constants.DATA_SOURCE.DEST]\r\n ):\r\n retVal = True\r\n return retVal",
"def is_matching_type(source, target):\n\n if cmds.objectType(source) == cmds.objectType(target):\n return True\n else:\n return False",
"def is_external_ctype(ctype, includes):\n # Get the base type\n while issubclass(ctype, ctypes._Pointer):\n ctype = ctype._type_\n\n if issubclass(ctype, ctypes._SimpleCData):\n return False\n\n for k, v in known_ctypes.items():\n if ctype in v:\n return True\n\n return False",
"def test_otherTypesUnequal(self):\n u = URL.fromText('http://localhost/')\n self.assertTrue(u != 42, \"URL must differ from a number.\")\n self.assertTrue(u != object(), \"URL must be differ from an object.\")",
"def _check_sources(self):\n for source_name, source in self.sources.items():\n if \"data\" not in source or \"ref_column\" not in source:\n raise ValueError(\n \"Each source needs to have a `data` and a `ref_column` property\"\n )\n if not isinstance(source[\"data\"], pd.DataFrame):\n raise ValueError(\n \"The `data` property of each source must contain a DatFrame\"\n )\n if not isinstance(source[\"data\"].index, pd.DatetimeIndex):\n raise ValueError(\n \"The `data` DataFrame must have a pd.DatetimeIndex for each source\"\n )\n if source[\"data\"].index.duplicated().any():\n raise ValueError(\n \"The input dataframe must not have duplicate index values, \"\n \"convert the data into a normalized wide format\"\n )\n if (\n not isinstance(source[\"ref_column\"], str)\n or source[\"ref_column\"] not in source[\"data\"].columns\n ):\n raise ValueError(\n \"Each source must have a string specifying the reference column, and the reference\"\n \"column must be available in the source's DataFrame\"\n )\n if self.ref_source_name not in self.sources.keys():\n raise ValueError(\n \"The reference source name must be available in the source dict\"\n )",
"def graph_object_is_unmanaged_asset(graph_obj: Dict) -> bool:\r\n return graph_obj.get(\"type\") == \"vm\" and graph_obj.get(\"id\", '').startswith(\"ip:\")",
"def is_type_inference_file_object(proxy_obj):\n if hasattr(proxy_obj.__class__, '__module__'):\n if \"stypy.sgmc.sgmc_cache\" in proxy_obj.__class__.__module__:\n return True\n\n return False",
"def is_source_op(self, op):\n op_handler = self._op_handler_dict[op.type]\n return op_handler.is_source_op",
"def has_correct_data_sources(pipeline: Pipeline):\n\n is_data_source_in_names_conds = ['data_source' in str(n) for n in pipeline.nodes if isinstance(n, PrimaryNode)]\n\n if any(is_data_source_in_names_conds) and not all(is_data_source_in_names_conds):\n raise ValueError(f'{ERROR_PREFIX} Data sources are mixed with other primary nodes')\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if custom attribute is external.
|
def is_ca_external(self, obj_dict):
return (self.endpoint == objects.get_singular(
objects.CUSTOM_ATTRIBUTES) and
obj_dict["definition_type"] in objects.ALL_SINGULAR_DISABLED_OBJS)
|
[
"def IsExternal(self):\n return self.location is not None",
"def IsExternal(self):\n return isinstance(self.definition, XMLExternalID)",
"def is_not_known_attribute(cls, attr):\r\n return attr not in cls.known_attributes",
"def _check_rule_has_attribute(self, data_sources, conditions):\n return hasattr(data_sources['asset'], conditions['attribute']) and \\\n getattr(data_sources['asset'], conditions['attribute']) is not None",
"def _has_private_attribute(self):\n return isinstance(self.attributes, dict) and any([k.startswith('__') for k in self.attributes.keys()])",
"def is_external(url):\r\n return url.startswith(('//', 'http://', 'https://'))",
"def has_attribute(self, atributo):\r\n return atributo in self.__atributos",
"def _attr_exists(self, attr):\n\n if self.metadata and attr not in self.metadata:\n self._warn(\"Attribute [{attr}] does not exist. \" +\n \"Check for a typo or disable validation \" +\n \"by .set_validation(False) \".format(attr=attr))\n\n # Return True if attribute validation is disabled\n return False == self.attribute_validation\n\n return True",
"def attribute_check(obj, attribute):\n\n check_node(obj)\n\n dep_node = get_depend_node(obj)\n dep_fn = maya.api.OpenMaya.MFnDependencyNode()\n dep_fn.setObject(dep_node)\n return dep_fn.hasAttribute(attribute)",
"def has_attr(product):\n if len(product.attribute_value_ids) > 0:\n return True\n return False",
"def hasAttribute(*args, **kwargs):\n \n pass",
"def check_external_mission(self, cr, uid, ids, context=None):\n for h in self.browse(cr, uid, ids, context=context):\n if h.type_mission == 'external':\n return True\n else:\n return False",
"def _inspect_descriptor(descriptor):\n # TODO memoize to cache these results\n data_keys = descriptor.data_keys\n is_external = defaultdict(lambda: False)\n for data_key, data_key_dict in data_keys.items():\n if (data_key_dict and 'external' in data_key_dict):\n is_external[data_key] = bool(data_key_dict['external'])\n return is_external",
"def is_endpoint_external(self):\n return self.endpoint in objects.EXTERNAL_END_POINTS",
"def _contains_required_fields(self):\n return LinkFile.FIELD_URL in self._data",
"def is_third_party(self) -> bool:\n return any(\n self.source.startswith(third_party_import_string)\n for third_party_import_string in self.third_party_import_strings\n )",
"def is_relationship_types_external(self, obj_dict):\n return (self.endpoint == objects.get_singular(objects.RELATIONSHIPS) and\n (any(x for x in objects.SINGULAR_DISABLED_OBJS\n if x.title() in (obj_dict[\"source\"][\"type\"],\n obj_dict[\"destination\"][\"type\"]))))",
"def startsWithAttribute(self, line):\r\n return line.startswith('@{0}'.format(self.attribute))",
"def attribute_has(nodeName, attributeName):\r\n\r\n # valider si le noeud possède l'attribut\r\n if maya.cmds.objExists(\"%s.%s\" % (nodeName, attributeName)):\r\n return True\r\n else:\r\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Log error and exit when set use_gpu=true in paddlepaddle cpu version.
|
def check_gpu(use_gpu):
err = "Config use_gpu cannot be set as true while you are " \
"using paddlepaddle cpu version ! \nPlease try: \n" \
"\t1. Install paddlepaddle-gpu to run model on GPU \n" \
"\t2. Set use_gpu as false in config file to run " \
"model on CPU"
try:
if use_gpu and not fluid.is_compiled_with_cuda():
logger.error(err)
sys.exit(1)
except Exception as e:
pass
|
[
"def check_gpu(use_gpu):\n err = \"Config use_gpu cannot be set as true while you are \" \\\n \"using paddlepaddle cpu version ! \\nPlease try: \\n\" \\\n \"\\t1. Install paddlepaddle-gpu to run model on GPU \\n\" \\\n \"\\t2. Set use_gpu as false in config file to run \" \\\n \"model on CPU\"\n if use_gpu:\n try:\n if not paddle.is_compiled_with_cuda():\n print(err)\n sys.exit(1)\n except:\n print(\"Fail to check gpu state.\")\n sys.exit(1)",
"def gpu_enable():\n return msg(\"GPU: enable\")",
"def dbg_basic():\n return not tf.test.is_gpu_available()",
"def test_failure_with_cpu_device_and_gpu_buffer():\n device = hoomd.device.CPU()\n snap = _make_two_particle_snapshot(device)\n sim = hoomd.Simulation(device)\n sim.create_state_from_snapshot(snap)\n custom_force = MyForce('gpu_local_force_arrays')\n thermostat = hoomd.md.methods.thermostats.MTTK(kT=1.0, tau=1.0)\n npt = md.methods.ConstantPressure(hoomd.filter.All(),\n thermostat=thermostat,\n S=1,\n tauS=1,\n couple=\"none\")\n integrator = md.Integrator(dt=0.005, forces=[custom_force], methods=[npt])\n sim.operations.integrator = integrator\n with pytest.raises(RuntimeError):\n sim.run(1)",
"def gpu_disable():\n return msg(\"GPU: disable\")",
"def environment_test():\n print(\"Tensorflow Version: %s\" % tf.__version__)\n print(\"GPU test: \" + str(tf.test.is_gpu_available()))",
"def cmd_error(self):\n self.log.setLevel(logging.ERROR)\n self.log.error('Switching to ERROR threshold')",
"def error(update, context): #в случае вознекновения ошибки она выводится в logger (здесь в поток вывода)\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)",
"def confirm_gpu_availability():\n a = th.FloatTensor(1).cuda()\n # Just make sure a is not somehow removed by any smart compiling,\n # probably not necessary.\n return a is not None",
"def main():\n print(\"CPU temp: \", str(get_cpu_temp()))\n print(\"GPU temp: \", str(get_gpu_temp()))",
"def error(update, context):\r\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)",
"def set_up_logging():\n tf.logging.set_verbosity(tf.logging.INFO)\n logging.getLogger('tensorflow').propagate = False",
"def test_cuda(digit_neural: DigitNeural, tests: Iterator[Tuple[Label, ChanneledImage]]) -> None:\n cuda_device = torch.device(\"cuda\")\n digit_neural.to(cuda_device)\n print(\"Model moved to CUDA!\")\n correct = wrong = 0\n tot_error = 0.0\n errs: List[Tuple[int, int, int, int, str, str]] = []\n with torch.no_grad(), open(ALL_LOG_PATH, \"w\") as file_obj_all:\n for i, (label, image) in enumerate(tests, 1):\n if not i % 1000:\n print(f\" Test {i}\")\n # Preparations\n image_tensor: torch.Tensor = torch.tensor([image])\n image_tensor = image_tensor.to(cuda_device)\n # Run net (forward)\n output: List[float] = digit_neural(image_tensor)[0].tolist()\n # Compute loss\n error = mse(output, label)\n tot_error += error\n # Check success\n success = max(output) == output[label]\n if success:\n correct += 1\n else:\n wrong += 1\n thought = output.index(max(output))\n path = os.path.join(ERR_DIR, f\"{label}-{thought}-{i}.png\")\n generate_img(image[0], path, MAG_RATIO)\n errs.append((label, thought, i, int(success),\n f\"{error:.6f}\", fl2s(output)))\n print(\n label,\n int(success),\n f\"{error:.6f}\",\n fl2s(output),\n file=file_obj_all\n )\n with open(ERR_LOG_PATH, \"w\") as file_obj_err:\n for err in sorted(errs):\n print(*err, file=file_obj_err)\n print(\"Errors written!\")\n total = correct + wrong\n print(f\"Correct: {correct}/{total} ({correct/total*100:.2f}%)\")\n print(f\"Wrong : {wrong}/{total} ({wrong/total*100:.2f}%)\")\n print(f\"Tot err: {tot_error}\")\n print(f\"Avg err: {tot_error/total}\")",
"def log_error(e):\r\n\tprint(e)",
"def main():\n\n @dppy.kernel\n def atomic_add(a):\n dppy.atomic.add(a, 0, 1)\n\n global_size = 100\n a = np.array([0])\n\n try:\n d = dpctl.select_gpu_device()\n with dpctl.device_context(d):\n print(\"Offloading to ...\")\n d.print_device_info()\n atomic_add[global_size, dppy.DEFAULT_LOCAL_SIZE](a)\n # Expected 100, because global_size = 100\n print(a)\n except ValueError:\n print(\"No SYCL GPU found.\")",
"def cpu_loss(self):\r\n\t\tif self.loss_fn is not None:\r\n\t\t\tself.loss_fn.cuda()",
"def memlog(gpu=None, msg=\"\"):\n return\n import torch.cuda\n import inspect\n usage = int(torch.cuda.memory_allocated(gpu) * 1e-6)\n prev = inspect.currentframe().f_back\n fname, lineno, fun, lines, index = inspect.getframeinfo(prev)",
"def ConfigurationError(msg):\n\n if not helpMode:\n print(msg)\n Exit(1)",
"def on_build_failure():\n\n message_title = f\"FireSim Xilinx Alveo {self.build_config.PLATFORM} FPGA Build Failed\"\n\n message_body = \"Your FPGA build failed for quintuplet: \" + self.build_config.get_chisel_quintuplet()\n\n rootLogger.info(message_title)\n rootLogger.info(message_body)\n\n build_farm.release_build_host(self.build_config)",
"def cli_on_gpu(\n usage_help: str = \"Run the model on GPU.\",\n *,\n default: bool = False,\n) -> callable:\n return click.option(\n \"--on-gpu\",\n type=bool,\n default=default,\n help=add_default_to_usage_help(usage_help, default),\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Log error and exit when the installed version of paddlepaddle is not satisfied.
|
def check_version():
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.7.0')
except Exception as e:
logger.error(err)
sys.exit(1)
|
[
"def validate_python() -> None:\n if sys.version_info[:3] < REQUIRED_PYTHON_VER:\n print(\n \"ninewatt Device requires at least Python {}.{}.{}\".format(\n *REQUIRED_PYTHON_VER\n )\n )\n sys.exit(1)",
"def on_build_failure():\n\n message_title = f\"FireSim Xilinx Alveo {self.build_config.PLATFORM} FPGA Build Failed\"\n\n message_body = \"Your FPGA build failed for quintuplet: \" + self.build_config.get_chisel_quintuplet()\n\n rootLogger.info(message_title)\n rootLogger.info(message_body)\n\n build_farm.release_build_host(self.build_config)",
"def check_blender_version(op: bpy.types.Operator):\n if bpy.app.version[0] != 3 or bpy.app.version[1] != 6:\n op.report({'INFO'}, 'For Armory to work correctly, you need Blender 3.6 LTS.')",
"def test_GDPR_app_is_reachable(self):\n from gdpr.version import get_version\n get_version()\n assert_true(True)",
"def on_build_failure():\n\n message_title = \"FireSim Vitis FPGA Build Failed\"\n\n message_body = \"Your FPGA build failed for quintuplet: \" + self.build_config.get_chisel_quintuplet()\n\n rootLogger.info(message_title)\n rootLogger.info(message_body)\n\n build_farm.release_build_host(self.build_config)",
"def check_python_version():\n if sys.version < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {0}+ is required.\".format(MINIMUM_PYTHON_VERSION))",
"def check_python_version():",
"def check_python_version():\n\n if version_info.major < 3:\n print(\"Error: Python version not supported. Must use Python 3.x\")\n quit()",
"def dependencies_check():\n # enforce Python minimum version\n vsys_py = sys.version_info[:3] # 4th element is a string\n if (vsys_py < PYTHON_MIN):\n vmin_py_str = \".\".join((\"%d\" % i) for i in PYTHON_MIN)\n vsys_py_str = \".\".join((\"%d\" % i) for i in vsys_py)\n depfails.append((\"bad\", (\"need Python %s but running under %s: %s\"\n % (vmin_py_str, vsys_py_str, sys.executable))))\n # report problems & exit\n for (p, v) in depfails:\n ERROR(\"%s dependency: %s\" % (p, v))\n if (len(depfails) > 0):\n sys.exit(1)",
"def _raise_failure(self, output):\n lines = [\"Version could not be determined for %s:\" % (self.name,)]\n lines.append(\"\")\n lines.extend(self._describe_call())\n lines.append(\"\")\n\n # Raised if the JRE is too old compared to the JAR\n if \"UnsupportedClassVersionError\" in output:\n lines.extend([\n \"The version of the Java Runtime Environment on this\",\n \"system is too old; please check the the requirement\",\n \"for the program and upgrade your version of Java.\",\n \"\",\n \"See the documentation for more information.\",\n ])\n else:\n lines.append(\"Program may be broken or a version not supported by the\")\n lines.append(\"pipeline; please refer to the PALEOMIX documentation.\\n\")\n lines.append(\" Required: %s\" % (self.checks,))\n lines.append(\" Search string: %r\\n\" % (self._rege.pattern))\n lines.append(\"%s Command output %s\" % (\"-\" * 22, \"-\" * 22))\n lines.append(output)\n\n raise VersionRequirementError(\"\\n\".join(lines))",
"def verify_fail(self):\n raise MissingDependencyError(self, self.installed_version)",
"def test_h_python_command_pocket_version(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport sys\n\t\t\tif sys.__name__ is None:\n\t\t\t\traise ImportError(\"Failed to import system. WTF?!!\")\n\t\t\tfrom .context import piaplib as piaplib\n\t\t\tif piaplib.__version__ is not None:\n\t\t\t\ttheResult = False\n\t\t\tthepython = getPythonCommand()\n\t\t\tif (thepython is not None):\n\t\t\t\ttry:\n\t\t\t\t\ttheOutputtext = checkPythonCommand([\n\t\t\t\t\t\tstr(thepython),\n\t\t\t\t\t\tstr(\"-m\"),\n\t\t\t\t\t\tstr(\"piaplib.pocket\"),\n\t\t\t\t\t\tstr(\"--version\")\n\t\t\t\t\t], stderr=subprocess.STDOUT)\n\t\t\t\t\tif (str(piaplib.__version__) in str(theOutputtext)):\n\t\t\t\t\t\ttheResult = True\n\t\t\t\t\telse:\n\t\t\t\t\t\ttheResult = False\n\t\t\t\texcept Exception as othererr:\n\t\t\t\t\tprint(str(\"\"))\n\t\t\t\t\tprint(str(type(othererr)))\n\t\t\t\t\tprint(str(othererr))\n\t\t\t\t\tprint(str((othererr.args)))\n\t\t\t\t\tprint(str(\"\"))\n\t\t\t\t\tothererr = None\n\t\t\t\t\tdel othererr\n\t\t\t\t\ttheResult = False\n\t\texcept Exception as err:\n\t\t\tprint(str(\"\"))\n\t\t\tprint(str(type(err)))\n\t\t\tprint(str(err))\n\t\t\tprint(str((err.args)))\n\t\t\tprint(str(\"\"))\n\t\t\tothererr = None\n\t\t\tdel othererr\n\t\t\ttheResult = False\n\t\tassert theResult",
"def minver_error(pkg_name):\n print(\n 'ERROR: specify minimal version of \"{0}\" using '\n '\">=\" or \"==\"'.format(pkg_name),\n file=sys.stderr\n )\n sys.exit(1)",
"def warnask(app):\n # Warn about divergence\n warn = '\\nLocal service has diverged from remote or is inaccessible.'\n click.echo(colors.amber(warn), err=True)\n\n # Ask to upgrade\n upgrade = colors.teal('Attempt to upgrade before continuing?')\n if click.confirm(upgrade):\n # Upgrade\n app.repo.pull()\n click.echo(err=True)\n else:\n override = colors.teal('Continue without upgrading?')\n if not click.confirm(override):\n goodbye = 'Please resolve these changes before re-attempting.\\n'\n click.echo(goodbye, err=True)\n raise SystemExit(1)",
"def fail_on_npm_install():\n return 1",
"def error(update, context):\r\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)",
"def test_hook_startup_fail(self):\n # Parse log file for expected exception.\n self.assert_impalad_log_contains(\"INFO\",\n \"Exception during onImpalaStartup from \"\n \"QueryEventHook class {0}\"\n .format(self.FAILING_HOOK), expected_count=-1)",
"def _check_python_component(self):\n\n if self._target_py_version_nr is None:\n self._missing_component('python')",
"def perl_deps_missing():\n global REASON\n try:\n perl.PerlCheck(misc.Options(verbosity=1))\n except SkipOptionalCheck as e:\n REASON = str(e)\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The datetime that a reminder should be sent at
|
def send_reminder_at(self):
previous_meeting = self.study_group.meeting_set.active().filter(
models.Q(meeting_date__lt=self.meeting_date)
| models.Q(meeting_date=self.meeting_date, meeting_time__lt=self.meeting_time)
).order_by('-meeting_date', '-meeting_time').first()
two_days_before = self.meeting_datetime() - datetime.timedelta(days=2)
# ensure send_at is always after previous meeting finished
tz = pytz.timezone(self.study_group.timezone)
# subtract 5 seconds from now so that a past date technically stays in the past and can be sent
now = timezone.now().astimezone(tz) - datetime.timedelta(seconds=5)
if previous_meeting:
return max(now, max(two_days_before, previous_meeting.meeting_datetime() + datetime.timedelta(minutes=self.study_group.duration)))
return max(now, two_days_before)
|
[
"def datetime():\n return _get_rtc().datetime()",
"def get_followup_time(self):\n\n contact_time = arrow.get(self.time)\n reminder_time = contact_time.replace(minutes=+self.delta)\n return reminder_time",
"def get_alarm(self):\n return self.alarm_time",
"def received_time(self) -> datetime:\n pywintime = str(self._email.ReceivedTime)\n time_regex = re.compile(r'(\\d{4})-' # Year\n r'(\\d{2})-' # Month\n r'(\\d{2}) ' # Day\n r'(\\d{2}):' # Hours\n r'(\\d{2}):' # Minutes\n r'(\\d{2}).*'# Seconds\n )\n match = time_regex.match(pywintime)\n return datetime(*map(int, match.groups()))",
"def _workflow_time(self):\n return self.__time",
"def broadcasting_date(self):\n return self.__broadcasting_date",
"def datetime(self):\n return self.date_published.strftime('%Y-%m-%d %H:%M:%S')",
"def snap_time(self):\n\n if self.alert.is_daily():\n # Set delivery to next day.\n self.start_date = datetime(self.start_date.year, self.start_date.month, self.start_date.day)\n self.end_date = self.start_date + timedelta(days=1)\n elif self.alert.is_hourly():\n # Set delivery to next hour.\n self.start_date = datetime(self.start_date.year, self.start_date.month, self.start_date.day,\n self.start_date.hour)\n self.end_date = self.start_date + timedelta(hours=1)",
"def get_modifitication_date_time(self):\n return self._root[\"ModificationDateTime\"]",
"def timestamp(self) -> datetime:\n return self.context['embryo'].get('timestamp')",
"def getNowDatetime():\n return datetime.now()",
"def get_time(self):\n return self.event_time",
"def schedule_reminder(self):\n\n # Calculate the correct time to send this reminder\n appointment_time = arrow.get(self.start)\n reminder_time = appointment_time.shift(minutes=-5)\n now = arrow.now()\n milli_to_wait = int(\n (reminder_time - now).total_seconds()) * 1000\n\n # Schedule the Dramatiq task\n from .tasks import send_sms_reminder\n result = send_sms_reminder.send_with_options(\n args=(self.pk,),\n delay=milli_to_wait)\n\n return result.options['redis_message_id']",
"def determine_initial_sending_time(self):\n now = dj_tz.now()\n\n if self.start_type == 'immediate':\n initial_sending_time = now\n\n elif self.start_type == 'countdown':\n countdown_duration_as_time = dj_tz.datetime(1900, 1, 1, 0,\n self.start_time.minute,\n self.start_time.second)\n delta = countdown_duration_as_time - dj_tz.datetime(1900, 1, 1)\n initial_sending_time = now + delta\n\n elif self.start_type == 'specific_date':\n naive_sending_time = dj_tz.datetime(\n year=self.start_date.year,\n month=self.start_date.month,\n day=self.start_date.day,\n hour=self.start_time.hour,\n minute=self.start_time.minute,\n second=self.start_time.second\n )\n server_timezone = dj_tz.get_default_timezone()\n initial_sending_time = server_timezone.localize(naive_sending_time)\n\n if initial_sending_time < now:\n logger.info('[ ! ] Attempted to schedule for a datetime in the'\n ' past: {}'.format(str(initial_sending_time)))\n return None\n\n return initial_sending_time",
"def _get_timestamp(self):\n return datetime.datetime.now()",
"def calc_date(self):\n minutes = self.time * 5 # time_passed is in 5-minute units\n days_passed = minutes // 1440\n time_leftover = minutes % 1440\n hours_passed = time_leftover // 60\n min_passed = time_leftover % 60\n self.date = f\"{days_passed} days, {hours_passed} hours, {min_passed} min\"\n return self.date",
"def get_date_scraped(self):\n return datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")",
"def creation_datetime(self):\n return super()._to_datetime(self.creation_time)",
"def reminder_date(self, reminder_date):\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n reminder_date = self._utils.format_datetime(reminder_date, date_format='%Y-%m-%dT%H:%M:%SZ')\n self._data['reminderDate'] = reminder_date\n request = {'reminderDate': reminder_date}\n return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns weight of the connection.
|
def get_weight(self):
return self.weight
|
[
"def _get_connection_weight(self, weight):\n\n return weight if weight else uniform(-1, 1)",
"def get_sum_connections_weight(self):\n total = 0.0\n for conn in self.connections:\n total += conn.weight\n return total",
"def getWeight(self):\n return self.vertexWeight",
"def get_weight(self) -> float:\n return 0",
"def get_node_weight(self, key):\n return self._get(key, \"weight\")",
"def get_con_cost(self):\n\n\t\ttotal_weights = 0.0\n\t\tfor c in self.connections:\n\t\t\ttotal_weights += abs(c.getWeight())\n\n\t\treturn total_weights",
"def get_weight(self):\n return self.graph_weights.reshape(self.size_graph_rows, self.size_graph_cols)",
"def get_con_cost(self):\n\t\t\n\t\tweight_total = 0.0\n\t\tfor c in self.connections:\n\t\t\tweight_total += abs(c.getWeight())\n\t\t\n\t\treturn weight_total",
"def total_weight(self):\n return self.weight_fun(self.graph, self.path)",
"def getConnectionsWeights(self):\n return tuple(tuple([x[0].getOutputNeuron().getBias()] + [y.getWeight() for y in x]) for x in self.connectionsMatrix)",
"def operation_weight(self):\n ret = self._get_attr(\"operationWeight\")\n return ret",
"def get_node_weighted_connectivity(self,node):\n return float(sum(self.get_node_weights(node))) /\\\n ( float(sum(self.weights.values()))/2.0 )",
"def path_weight(self, path):\n # make sure the path exists\n if not self.is_path_valid(path):\n return math.inf\n w = 0 # init weight\n # add the weight of each connection between sequential vertices in path\n # from start to finish\n for i in range(0, len(path) - 1):\n w += self.vertices[path[i]].connections[path[i+1]]\n # return\n return w",
"def get_path_weight(self) -> float:\n raise NotImplementedError",
"def edge_weight(self, u, v):\n # make sure they're connected in the first place\n if not self.are_connected(u, v):\n return math.inf\n else:\n # return the value in vertex dictionary member connections at key v\n return self.vertices[u].connections[v]",
"def get_weight_for(username):\n with sql.connect(database_user) as cur:\n res = cur.execute(f\"\"\"\n SELECT weight \n From UserDatabase \n WHERE username='{username}';\n \"\"\")\n _weight = res.fetchone()[0]\n return int(_weight)",
"def get_weights(self):\r\n return self.weights # returning the weight matrix\r",
"def getWeightedValue():\n\t\tweight*value",
"def getWeight(self):\n return np.concatenate([self.weight.ravel()] * 4)",
"def get_edge_weight(self, start, end):\n return self.tree.edges[(start.prehash(), end.prehash())]['weight']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mutates the weight (assigns from a uniform distribution).
|
def mutate_weight(self):
self.weight += np.random.uniform(low = -2.0, high = 2.0)
return
|
[
"def shift_weight(self):\n # Shift a randomly selected weight\n gene: Gene = choice(self.__genes)\n rand = uniform(0, 1)\n if rand <= 0.2:\n # Perturb\n gene.weight += 0.1 * choice([-1, 1])\n elif 0.2 < rand <= 0.5:\n # New random value\n gene.weight = uniform(-1, 1)\n else:\n # Reflect\n gene.weight *= -1\n\n # Keep within [-1.0, 1.0]\n if gene.weight < 0:\n gene.weight = max(-1.0, gene.weight)\n else:\n gene.weight = min(1.0, gene.weight)",
"def weight_new(weights,stepsize,unused):\n\tnew_weights = np.zeros(len(weights))\n\tfor kk in range(0,len(weights)):\n\t\tif not kk in unused:\n\t\t\tnew_weights[kk] = weights[kk]+stepsize*np.random.normal()\n\t\tif new_weights[kk]<0:\n\t\t\tnew_weights[kk] = 0\n\tnew_weights/=np.sum(new_weights)\n\treturn new_weights",
"def add_weight(self, weight):\r\n\t\t\tself.weight += weight",
"def update_weight(self, learn_rate):\n pass",
"def set_weight(self, temp, weight):\n self.temp_dict[temp]['weight'] = weight",
"def adjust_weight(self, new_weight):\n self.weight = new_weight",
"def mutate(self, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < rate:\n weights[j] = np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < rate:\n self.biases[i][j] = np.random.normal(0, 1)",
"def mutate(weights, mutation_rate=0.01):\n for i in range(len(weights)):\n for j in range(len(weights[i])):\n if random.random() < mutation_rate:\n weights[i][j] += weights[i][j] * (random.random() - 0.5) * 3 + (random.random() - 0.5)\n # weights[i][j] = np.random.uniform(-1, 1)\n return weights",
"def update(self, update_value):\n self.weight += update_value\n # Make sure that weight is min 0\n if (self.weight<0):\n self.weight = 0",
"def update(self, index, weight, grad, state):\n weight[:] += grad * self.rescale_grad\n state[:] = weight",
"def weight(self, weight, persister=None):\n assert(weight > 0.0)\n persister.exec_stmt(MySQLServer.UPDATE_SERVER_WEIGHT,\n {\"params\":(weight, str(self.uuid))})\n self.__weight = weight",
"def _lockup_weights_one(self):\n for i in self._lockup_weights_ind:\n self.reward_weights[i] = 1.0",
"def update(self):\n self.weight_mom[self.index] = self.sub_weight_mom\n self.weight[self.index] = self.sub_weight",
"def reinit_weights(self):\n self.w = 0.01 * np.random.randn(self.prev_layer.get_shape()[0], self.nodes)",
"def add_new_weight(self):\n self.weight = add_new_features(self.weight)",
"def set_weights(self, weights):\n self.weights = copy.deepcopy(weights)",
"def getWeightedValue():\n\t\tweight*value",
"def animal_weight_with_age(self):\n self.age += 1\n self.weight -= self.parameters['eta'] * self.weight",
"def normalize_weight(w):\n return w.numpy() / np.linalg.norm(w.numpy())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
loglike, grad = pylag.mlfit.MLFit.log_likelihood(params, eval_gradient=True) Evaluate log(marginal likelihood), as well as its gradient, for the covariance matrix defined by some set of input parameters, applied to the data points we have. Based on the Algorithm 2.1 of Rasmussen & Williams "Gaussian Processes for Machine Learning", the MIT Press, 2006 and implemented in the scikitlearn GaussainProcessRegressor
|
def log_likelihood(self, params, eval_gradient=True):
c = self.cov_matrix(params)
# add white noise along the leading diagonal
# this should be the Poisson noise term when calculating a PSD
if self.noise is not None:
c += np.diag(self.noise)
try:
L = cho_factor(c, lower=True, check_finite=False)[0]
except np.linalg.LinAlgError:
try:
# try doubling the noise first
L = cho_factor(c + np.diag(self.noise), lower=True, check_finite=False)[0]
except np.linalg.LinAlgError:
#printmsg(2, "WARNING: Couldn't invert covariance matrix with parameters " + param2array(params))
return (-1e6, np.zeros(len([p for p in params if params[p].vary])) - 1e6) if eval_gradient else -1e6
except ValueError:
return (np.inf, np.zeros(len([p for p in params if params[p].vary]))) if eval_gradient else -np.inf
alpha = cho_solve((L, True), self.data, check_finite=False)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", self.data, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= c.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1)
if eval_gradient:
c_gradient = self.cov_matrix_deriv(params)
tmp = np.einsum("ik,jk->ijk", alpha, alpha)
tmp -= cho_solve((L, True), np.eye(c.shape[0]))[:, :, np.newaxis]
gradient_dims = 0.5 * np.einsum("ijl,ijk->kl", tmp, c_gradient)
gradient = gradient_dims.sum(-1)
# note we return -log_likelihood, so we can minimize it!
return (log_likelihood, gradient) if eval_gradient else log_likelihood
|
[
"def log_likelihood(self, params, eval_gradient=True):\n if eval_gradient:\n segment_loglike = [c.log_likelihood(params, eval_gradient) for c in self.mlcross_spec]\n # separate and sum the likelihoods and the gradients\n like = np.array([l[0] for l in segment_loglike])\n grad = np.array([l[1] for l in segment_loglike])\n if np.all(np.isfinite(like)):\n return np.sum(like), grad.sum(axis=0)\n else:\n return (-1e6, np.zeros(len([p for p in params if params[p].vary])) - 1e6)\n else:\n return np.sum([c.log_likelihood(params, eval_gradient) for c in self.mlcross_spec])",
"def compute_log_likelihood(X, params):\n m, n, _ = X.shape\n likelihood = 0.\n for i in range(m):\n p_y_0 = p_y(0, params)\n p_y_1 = p_y(1, params)\n for j in range(n):\n x = X[i,j]\n p_y_0 += log_sum_exp(p_x_z(x,0,params) + p_z_y(0,0,params), p_x_z(x,1,params) + p_z_y(1,0,params))\n p_y_1 += log_sum_exp(p_x_z(x,0,params) + p_z_y(0,1,params), p_x_z(x,1,params) + p_z_y(1,1,params))\n likelihood += log_sum_exp(p_y_0, p_y_1)\n\n return likelihood",
"def grad_log_likelihood(kc, cb, eval_request, eval_result, model_params):\n if eval_request.type != KN_RC_EVALGA:\n print(\"*** grad_log_likelihood incorrectly called with eval type %d\" %\n eval_request.type)\n return -1\n params = eval_request.x\n\n np.savetxt(\"current_pars_k.txt\", params)\n\n mus_and_maybe_grad = model_params.mus_and_maybe_grad\n bases_surplus = model_params.bases_surplus\n observed_matching = model_params.observed_matching\n\n ncat_men, ncat_women = bases_surplus.shape[:-1]\n n_prod_categories = ncat_men * ncat_women\n\n mus, _, dmus = mus_and_maybe_grad(params, model_params, gr=True)\n\n grad_loglik = grad_loglik_all_mus(observed_matching, mus)\n\n gradN = grad_loglik[-1]\n gradxy = grad_loglik[:n_prod_categories].reshape(\n (ncat_men, ncat_women)) + gradN\n gradx0 = grad_loglik[n_prod_categories:(\n n_prod_categories + ncat_men)] + gradN\n grad0y = grad_loglik[(n_prod_categories + ncat_men):-1] + gradN\n\n der_muxy = np.einsum('ij,ijk->k', gradxy, dmus.muxy)\n der_mux0 = np.einsum('i,ik->k', gradx0, dmus.mux0)\n der_mu0y = np.einsum('i,ik->k', grad0y, dmus.mu0y)\n\n eval_result.objGrad = -(der_muxy + der_mux0 + der_mu0y)\n\n return 0",
"def GSM_log_likelihood(X, model):\n sum = 0\n for i in range(len(model.mix)):\n sum += logsumexp(np.log(model.mix[i]) + multivariate_normal.logpdf(X.T, cov=model.cov[i]))\n return sum",
"def logL(self, X, Xerr):\n if self.V is None or self.mu is None or self.weights is None:\n raise Exception(\"Model parameters not set.\")\n \n return self.GMM.logL(X,Xerr)",
"def log_marginal_likelihood(self, X):\n pass",
"def log_likelihood_grad(X, Y, w, C=0.1):\n \n # d is dimensionality of a sample.\n d = len(w)\n # N is #training samples.\n N = len(X)\n s = np.zeros(d)\n \n for i in range(N):\n s += Y[i] * X[i] * logistic(-Y[i] * np.dot(X[i], w))\n \n s -= C*w\n return s",
"def log_likelihood_function(self, instance):\r\n\r\n \"\"\"\r\n In the previous tutorial the instance was a single `Gaussian` profile, meaning we could create the model data \r\n using the line:\r\n\r\n model_data = instance.gaussian.model_data_1d_via_xvalues_from(xvalues=self.data.xvalues)\r\n\r\n In this tutorial our instance is comprised of multiple 1D Gaussians, because we will use a `Collection` to\r\n compose the model:\r\n\r\n model = Collection(gaussian_0=Gaussian, gaussian_1=Gaussian).\r\n\r\n By using a Collection, this means the instance parameter input into the fit function is a\r\n dictionary where individual profiles (and their parameters) can be accessed as followed:\r\n\r\n print(instance.gaussian_0)\r\n print(instance.gaussian_1)\r\n print(instance.gaussian_0.centre)\r\n\r\n In this tutorial, the `model_data` is therefore the summed `model_data` of all individual Gaussians in the \r\n model. The function `model_data_from_instance` performs this summation. \r\n \"\"\"\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n chi_squared = sum(chi_squared_map)\r\n noise_normalization = np.sum(np.log(2 * np.pi * noise_map**2.0))\r\n log_likelihood = -0.5 * (chi_squared + noise_normalization)\r\n\r\n return log_likelihood",
"def gradFunc(param):\n\n return np.array(\n GeneralizedExtremeValueDistribution.computeNegLogLikelihoodGrad(\n param[0], param[1], param[2], data\n ))",
"def grad_loglik_all_mus(observed_matching, simulated_matching):\n\n muxy_sim, mux0_sim, mu0y_sim = simulated_matching.unpack()\n n_households_sim = np.sum(muxy_sim) + np.sum(mux0_sim) + np.sum(mu0y_sim)\n\n muxy_obs, mux0_obs, mu0y_obs = observed_matching.unpack()\n n_households_obs = np.sum(muxy_obs) + np.sum(mux0_obs) + np.sum(mu0y_obs)\n\n der_x0 = mux0_obs * der_nplog(mux0_sim)\n der_0y = mu0y_obs * der_nplog(mu0y_sim)\n der_xy = muxy_obs * der_nplog(muxy_sim)\n n_prod_categories, ncat_men, ncat_women = \\\n muxy_obs.size, mux0_obs.size, mu0y_obs.size\n grad_loglik = np.zeros(n_prod_categories + ncat_men + ncat_women + 1)\n grad_loglik[:n_prod_categories] = der_xy.reshape(n_prod_categories)\n grad_loglik[n_prod_categories:(n_prod_categories + ncat_men)] \\\n = der_x0\n grad_loglik[(n_prod_categories + ncat_men):-1] \\\n = der_0y\n grad_loglik[-1] = \\\n -n_households_obs * der_bslog(n_households_sim)\n\n return grad_loglik",
"def log_marginal_likelihood(self, X):\n theta, beta = self.get_model()\n N,_ = X.shape \n logmarg_lk = 0.0\n for i in range(N):\n logmarg_lk += self.util_logmarginal_ind(X[i], theta, beta)\n return logmarg_lk\n # raise NotImplementedError",
"def evaluate(self, params: np.ndarray) -> float:\n kl = 0\n for sample in self.data:\n kl += np.log(self.vgbs.prob_sample(params, sample))\n return -kl / self.nr_samples",
"def objective(par_arr):\n fit_params = copy.copy(params)\n for par, value in zip([p for p in params if params[p].vary], par_arr):\n fit_params[par].value = value\n return self.log_likelihood(fit_params, eval_gradient=False)",
"def MVN_log_likelihood(X, model):\n\n return logsumexp(multivariate_normal.logpdf(X.T, mean=model.mean, cov=model.cov))",
"def d_log_likelihood_lam(self, lam):\n\n result = np.sum(self.x[:, np.newaxis, :] * np.exp(np.dot(self.x, self.lam.T))[:, :, np.newaxis] \\\n * (digamma(np.sum(np.exp(np.dot(self.x, self.lam.T)), axis=1))[:,np.newaxis,np.newaxis]\\\n - digamma(np.sum(self.n_td+np.exp(np.dot(self.x, self.lam.T)), axis=1))[:,np.newaxis,np.newaxis]\\\n + digamma(self.n_td+np.exp(np.dot(self.x, self.lam.T)))[:,:,np.newaxis]\\\n - digamma(np.exp(np.dot(self.x, self.lam.T)))[:,:,np.newaxis]), axis=0)\\\n - lam / (self.sigma ** 2)\n result = -result\n return result",
"def MVN_log_likelihood(X, model):\n return np.sum(multivariate_normal.logpdf(X.T, model.mean, model.cov))",
"def log_likelihood(self, *args, context=None):\n\n if self.owner is None:\n raise ValueError(\n \"Cannot compute a log-likelihood without being assigned as the function of an \"\n \"OptimizationControlMechanism. See the documentation for the \"\n \"ParameterEstimationControlMechanism for more information.\"\n )\n\n # Make sure we have instantiated the log-likelihood function.\n if self._ll_func is None:\n self._ll_func = self._make_objective_func(context=context)\n\n context.execution_phase = ContextFlags.PROCESSING\n ll, sim_data = self._ll_func(*args)\n context.remove_flag(ContextFlags.PROCESSING)\n\n return ll, sim_data",
"def logprob_a(self, X, Xerr):\n if self.V is None or self.mu is None or self.weights is None:\n raise Exception(\"Model parameters not set.\")\n \n return self.GMM.logprob_a(X,Xerr)",
"def ln_likelihood(par, per_obs=False):\n # if we are outside the allowable parameter ranges, return 0\n # likelihood.\n #import pdb; pdb.set_trace()\n for i,p in enumerate(par):\n if not (P['min'][i] < p < P['max'][i]):\n return -np.inf\n # force the cloud thickness to be < 1 Mpc\n coord = par[IND_PAR['NHI']], par[IND_PAR['nH']], par[IND_PAR['Z']], \\\n par[IND_PAR['aUV']]\n logNH = Ncloudy['NH'](coord) \n if (logNH - par[IND_PAR['nH']]) > log10_cm_per_Mpc:\n return -np.inf\n \n \n Nmodel = model(par)\n\n lnprobtot = np.zeros(np.asarray(par[0]).shape)\n\n for pname in priors:\n if pname.startswith('min ') or pname.startswith('max '):\n continue\n # only deals with two-sided gaussian priors at the moment\n pval, siglo, sighi = priors[pname]\n p = ln_pdf_siglohi(par[IND_PAR[pname]], pval, siglo, sighi)\n lnprobtot += p\n\n allprob = []\n for i,tr in enumerate(trans):\n Nobs, siglo, sighi = obs[tr]\n if siglo == 0:\n #print(tr, 'lower limit')\n p = ln_pdf_lolim(Nmodel[i], Nobs, SIG_LIMIT)\n lnprobtot += p\n if per_obs:\n allprob.append(p)\n elif sighi == 0:\n #print(tr, 'upper limit')\n p = ln_pdf_uplim(Nmodel[i], Nobs, SIG_LIMIT)\n lnprobtot += p\n if per_obs:\n allprob.append(p)\n else:\n #print(tr)\n siglo = max(siglo, MIN_SIG)\n sighi = max(sighi, MIN_SIG)\n p = ln_pdf_siglohi(Nmodel[i], Nobs, siglo, sighi)\n lnprobtot += p\n if per_obs:\n allprob.append(p)\n\n if per_obs:\n return lnprobtot, allprob\n else:\n return lnprobtot"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
pylag.mlfit.MLFit.set_param(param, value, min, max) Set the value, lower and upper bounds for a parameter. If any of value, min or max are None, these values will not be altered.
|
def set_param(self, param, value=None, min=None, max=None, vary=None):
if value is not None:
self.params[param].value = value
if min is not None:
self.params[param].min = min
if max is not None:
self.params[param].max = max
if vary is not None:
self.params[param].vary = vary
|
[
"def setparam(self, param, value):\n\t\treturn self.__command(\"param.set %s %s\" % (param, value))",
"def set_parameter_guess_and_limits(parameter_dictionary, param_name, guess_min_max):\n if 'parameter_info' not in parameter_dictionary:\n print(\"Provided parameter dictionary did not have 'parameter_info' as a key.\")\n return\n param_info = parameter_dictionary['parameter_info']\n list_of_paramnames = []\n which_parameter = -1\n for i,param in enumerate(param_info):\n list_of_paramnames.append(param['parname'])\n if param_name==param['parname']:\n which_parameter = i\n if param_name in list_of_paramnames:\n parameter_dictionary['parameter_info'][which_parameter]['value'] = float(guess_min_max[0])\n #print(f\"Parameter '{param_name}' set to {guess_min_max[0]}.\")\n parameter_dictionary['parameter_info'][which_parameter]['limits'][0] = float(guess_min_max[1])\n #print(f\"Parameter '{param_name}' lower limit set to {guess_min_max[1]}.\")\n parameter_dictionary['parameter_info'][which_parameter]['limits'][1] = float(guess_min_max[2])\n #print(f\"Parameter '{param_name}' upper limit set to {guess_min_max[2]}.\")\n else:\n print(f\"Parameter '{param_name}' not found in model 'parameter_info'. Check for typos.\")\n #turn_parameters_into_dataframe_for_display(parameter_dictionary['parameter_info'])",
"def _set_parameter(self, par, val):\n self._parchk(par)\n setattr(self, par, float(val))",
"def set_varied(self, section, name, lower, upper):\n i = self.parameter_index(section, name)\n self.parameters[i].limits = (lower,upper)\n self.reset_fixed_varied_parameters()",
"def set_param(self, name, value):\n param = self._find_param(name)\n if param is not None:\n try:\n param.value = value\n except:\n pass\n else:\n return True\n return False",
"def set_fixed(self, section, name, value):\n i = self.parameter_index(section, name)\n self.parameters[i].limits = (value, value)\n self.reset_fixed_varied_parameters()",
"def bounded_parameter(low, high, param):\n affine = tfb.AffineScalar(shift=tf.cast(low, tf.float64),\n scale=tf.cast(high - low, tf.float64))\n sigmoid = tfb.Sigmoid()\n logistic = tfb.Chain([affine, sigmoid])\n parameter = gpf.Parameter(param, transform=logistic, dtype=tf.float64)\n return parameter",
"def _set_param(self, param: \"Param\") -> None:\n if param.namespaces(0):\n param.ns_param.push(param, -1)\n else:\n for name in param.names:\n self.params[name] = param",
"def setParam(self, layer, numParam, blob) -> None:\n ...",
"def clamp(value, parameter):\n value = parameter.min if value < parameter.min else value\n value = parameter.max if value > parameter.max else value\n return value",
"def clim_set(self, parameter, value):\n if isinstance(value, tuple):\n cmin, cmax = value\n self._run_code(f'self.img_2D.setLevels(({cmin}, {cmax}))')",
"def set_parameter(self, param, value, location=3):\n self.reb.set_parameter(param, value, self.stripe, location)\n logging.info(\"Set REB parameter %s to %s at location %d\" % (param, repr(value), location))",
"def set_params_range(self):\n pass",
"def _set_pmax(self, name: str, value: float) -> None:\n self.parameters.loc[name, \"pmax\"] = value",
"def set_boundary(self, parameter, new_boundaries):\n\n obj = self._model if parameter in self._model.fittingParameters \\\n else self._observed\n name, latex, fget, fset, mode, to_fit, bounds = \\\n obj.fittingParameters[parameter]\n\n bounds = new_boundaries\n\n obj.fittingParameters[parameter] = (\n name, latex, fget, fset, mode, to_fit, bounds)",
"def setActiveParameter(self, value, parameter, leg=0):\n if parameter in self.active[__tp__][leg]:\n self.active[__tp__][leg][parameter] = value\n else:\n raise ValueError",
"def set_parameter(cls, param_name, config):\n if config == None:\n if param_name in cls.parameters:\n del cls.parameters[param_name]\n else:\n cls.parameters[param_name] = config",
"def param(paramname, minreps=None, maxreps=None, pattern=None, default=None, doc=None):\n import validation\n def deco(fn):\n fn, props = _decorate_once(fn)\n request_filters = props.setdefault('request_filters', [])\n if validation.check_no_params in request_filters:\n raise RuntimeError(\"Can't decorate with param and noparams\")\n if validation.check_valid_params not in request_filters:\n request_filters.append(validation.check_valid_params)\n constraints = props.setdefault('valid_params', {})\n if paramname in entities:\n warnings.warn('Parameter name %s is also an HTML entity name. '\n 'This may lead to problems if resulting URLs are '\n 'not correctly escaped when copied into HTML. It '\n 'may be better to use a different parameter name.'\n % paramname, UserWarning)\n if paramname in constraints:\n raise RuntimeError(\"Already set validation constraints for \"\n \"parameter '%s'\" % paramname)\n compiled_pattern = None\n if pattern is not None:\n compiled_pattern = re.compile(pattern, re.UNICODE)\n constraints[paramname] = (minreps, maxreps, pattern,\n compiled_pattern, default, doc)\n return fn\n return deco",
"def set_default(self, param, defval):\n if param not in self._rpars:\n raise ClineError(\n 'set_default: parameter = \"' + param +\n '\" has not been registered.'\n )\n\n if self._rpars[param]['g_or_l'] == Cline.GLOBAL:\n self._gpars[param] = defval\n else:\n self._lpars[param] = defval"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
result = pylag.mlfit.MLFit._dofit(init_params, method='LBFGSB', kwargs) Function to actually perform the minimisation of log(likelihood). This method is not normally called on its own, rather it is called by the fit() or steppar() method.
|
def _dofit(self, init_params, method='L-BFGS-B', **kwargs):
initial_par_arr = np.array([init_params[p].value for p in init_params if init_params[p].vary])
bounds = [(init_params[p].min, init_params[p].max) for p in init_params if init_params[p].vary] if method == 'L-BFGS-B' else None
def objective(par_arr):
"""
wrapper around log_likelihood method to evaluate for an array of just the variable parameters,
which can be used directly with scipy.optimise methods.
"""
fit_params = copy.copy(init_params)
for par, value in zip([p for p in init_params if init_params[p].vary], par_arr):
fit_params[par].value = value
l, g = self.log_likelihood(fit_params, eval_gradient=True)
print("\r-log(L) = %6.3g" % l + " for parameters: " + ' '.join(['%6.3g' % p for p in param2array(fit_params)]), end="")
return -l, -g
result = minimize(objective, initial_par_arr, method=method, jac=True, bounds=bounds, **kwargs)
print("\r-log(L) = %6.3g" % result.fun + " for parameters: " + " ".join(['%6.3g' % p for p in param2array(array2param(result.x, init_params))]))
return result
|
[
"def fit(self):\n ln_l_all_array = [] #array of all log likelihoods\n ln_l_max = float(\"-inf\") #keep track of the maximum likelihood\n cp_parameter_array = None #parameters with the maximum likelihood\n #for multiple initial values\n for i in range(self.n_initial):\n print(\"initial value\", i)\n print(\"gradient descent\")\n super().fit() #regular gradient descent\n #copy the log likelihood\n for ln_l in self.ln_l_array:\n ln_l_all_array.append(ln_l)\n #check for convergence in the log likelihood\n ln_l = ln_l_all_array[len(ln_l_all_array)-1]\n if ln_l > ln_l_max:\n #the log likelihood is bigger, copy the parmeters\n ln_l_max = ln_l\n self.ln_l_max_index = len(ln_l_all_array)-1\n cp_parameter_array = self.copy_parameter()\n #do stochastic gradient descent to get a different initial value\n if i < self.n_initial-1:\n print(\"stochastic gradient descent\")\n #track when stochastic gradient descent was done for this entry\n #of ln_l_array\n self.ln_l_stochastic_index.append(len(ln_l_all_array))\n for j in range(self.n_stochastic_step):\n print(\"step\", j)\n self.m_stochastic_step()\n self.update_all_cp_parameters()\n ln_l_all_array.append(self.get_em_objective())\n #track when gradient descent was done\n #the E step right after this in super().fit() is considered part\n #of stochastic gradient descent\n self.ln_l_stochastic_index.append(len(ln_l_all_array)+1)\n else:\n self.ln_l_stochastic_index.append(len(ln_l_all_array))\n #copy results to the member variable\n self.ln_l_array = ln_l_all_array\n self.set_parameter(cp_parameter_array)\n self.e_step()",
"def test_funMotifs_logit(self):\r\n mtcars = sm.datasets.get_rdataset(\"mtcars\", \"datasets\", cache=True).data\r\n df = pd.DataFrame(mtcars)\r\n outcome_col = 'vs'\r\n col_weight = ['disp', 'wt']\r\n x = WeightFeatures.funMotifs_logit(df[outcome_col], df[col_weight])\r\n\r\n # get values from R results (fastLR() from RcppNumerical package)\r\n wt_coeff = 1.626353\r\n disp_coeff = -0.0344337\r\n\r\n # get values from R result sm.Logit(method=bfgs)\r\n py_wt_coeff = x.params['wt']\r\n py_disp_coeff = x.params['disp']\r\n\r\n\r\n # check that results are essentially the same\r\n assert abs(wt_coeff - py_wt_coeff) < 0.001\r\n assert abs(disp_coeff - py_disp_coeff) < 0.001\r\n\r\n return x",
"def log_likelihood(self, *args, context=None):\n\n if self.owner is None:\n raise ValueError(\n \"Cannot compute a log-likelihood without being assigned as the function of an \"\n \"OptimizationControlMechanism. See the documentation for the \"\n \"ParameterEstimationControlMechanism for more information.\"\n )\n\n # Make sure we have instantiated the log-likelihood function.\n if self._ll_func is None:\n self._ll_func = self._make_objective_func(context=context)\n\n context.execution_phase = ContextFlags.PROCESSING\n ll, sim_data = self._ll_func(*args)\n context.remove_flag(ContextFlags.PROCESSING)\n\n return ll, sim_data",
"def fit_lfp_likelihood(ripple_band_power, is_replay,\n model=GaussianMixture,\n model_kwargs=dict(n_components=3)):\n\n not_nan = np.all(~np.isnan(ripple_band_power), axis=1)\n replay_model = model(**model_kwargs).fit(\n np.log(ripple_band_power[is_replay & not_nan] + np.spacing(1)))\n no_replay_model = model(**model_kwargs).fit(\n np.log(ripple_band_power[~is_replay & not_nan] + np.spacing(1)))\n\n return partial(lfp_likelihood, replay_model=replay_model,\n no_replay_model=no_replay_model)",
"def lmfit(self, star, logger=None):\n import lmfit\n logger = galsim.config.LoggerWrapper(logger)\n params = self._lmfit_params(star)\n results = self._lmfit_minimize(params, star, logger=logger)\n if logger:\n logger.debug(lmfit.fit_report(results))\n flux, du, dv, scale, g1, g2 = results.params.valuesdict().values()\n if not results.success:\n raise RuntimeError(\"Error fitting with lmfit.\")\n\n try:\n params_var = np.diag(results.covar)\n except (ValueError, AttributeError) as e:\n logger.warning(\"Failed to get params_var\")\n logger.warning(\" -- Caught exception: %s\",e)\n # results.covar is either None or does not exist\n params_var = np.zeros(6)\n\n return flux, du, dv, scale, g1, g2, params_var",
"def log_likelihood_function(self, instance):\r\n\r\n \"\"\"\r\n In the previous tutorial the instance was a single `Gaussian` profile, meaning we could create the model data \r\n using the line:\r\n\r\n model_data = instance.gaussian.model_data_1d_via_xvalues_from(xvalues=self.data.xvalues)\r\n\r\n In this tutorial our instance is comprised of multiple 1D Gaussians, because we will use a `Collection` to\r\n compose the model:\r\n\r\n model = Collection(gaussian_0=Gaussian, gaussian_1=Gaussian).\r\n\r\n By using a Collection, this means the instance parameter input into the fit function is a\r\n dictionary where individual profiles (and their parameters) can be accessed as followed:\r\n\r\n print(instance.gaussian_0)\r\n print(instance.gaussian_1)\r\n print(instance.gaussian_0.centre)\r\n\r\n In this tutorial, the `model_data` is therefore the summed `model_data` of all individual Gaussians in the \r\n model. The function `model_data_from_instance` performs this summation. \r\n \"\"\"\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n chi_squared = sum(chi_squared_map)\r\n noise_normalization = np.sum(np.log(2 * np.pi * noise_map**2.0))\r\n log_likelihood = -0.5 * (chi_squared + noise_normalization)\r\n\r\n return log_likelihood",
"def logL(self, X, Xerr):\n if self.V is None or self.mu is None or self.weights is None:\n raise Exception(\"Model parameters not set.\")\n \n return self.GMM.logL(X,Xerr)",
"def powerlaw_cutoff_loglikelihood(params, x, xmin=1.):\n\n alpha, lamb = params[0], params[1]\n a = x.size * np.log(lamb ** (1 - alpha) / float(mpmath.gammainc(1 - alpha, lamb * xmin)))\n result = a - alpha * np.log(x).sum() - lamb * x.sum()\n\n return -result",
"def fit(self, init_params=None, update_params=True, **kwargs):\n if init_params is None:\n init_params = self.params\n\n self.fit_result = self._dofit(init_params, **kwargs)\n print(self.fit_result)\n\n if True or self.fit_result.success and update_params:\n for par, value in zip([p for p in init_params if init_params[p].vary], self.fit_result.x):\n self.params[par].value = value\n\n hess = self.fit_result.hess_inv(self.fit_result.x) if callable(self.fit_result.hess_inv) else np.diag(self.fit_result.hess_inv)\n\n # make sure we only get the finite parameter errors\n self.param_error = np.zeros(len(self.params))\n self.param_error[hess>0] = hess[hess>0] ** 0.5\n\n self.process_fit_results(self.fit_result, self.params)",
"def fit(pdf, prior, parameters, observations, iter=1000, lr=0.1):\n\n for i in range(iter):\n # Define objective function (log-likelihood) to maximize\n prior_ = torch.log(prior(parameters))\n posterior = torch.mean(torch.log(pdf(observations))) + prior_\n\n if np.isnan(posterior.data[0]) or np.isnan(prior_.data[0]):\n return\n\n # Determine gradients\n posterior.backward()\n\n # Update parameters with gradient descent\n for param in parameters:\n param.data.add_(lr * param.grad.data)\n param.grad.data.zero_()",
"def minimize_lbfgs(func, x_0, jac=None, history_size=20, maxiter=None, norm=mnp.inf, gtol=1e-5, line_search_maxiter=10):\n if jac is None:\n jac = grad(func)\n\n if maxiter is None:\n maxiter = ops.size(x_0) * 200\n\n state = AlgorithmLbfgs(func, jac, history_size).construct(x_0, maxiter, norm, gtol, line_search_maxiter)\n results = GradientOptimizationResults(converged=_to_scalar(state.get(\"converged\")),\n failed=_to_scalar(state.get(\"failed\")),\n k=_to_scalar(state.get(\"k\")),\n nfev=_to_scalar(state.get(\"nfev\")),\n ngev=_to_scalar(state.get(\"ngev\")),\n nhev=_to_scalar(state.get(\"nhev\")),\n x_k=state.get(\"x_k\"),\n f_k=_to_scalar(state.get(\"f_k\")),\n g_k=state.get(\"g_k\"),\n H_k=state.get(\"H_k\"),\n old_old_fval=_to_scalar(state.get(\"old_old_fval\")),\n status=_to_scalar(state.get(\"status\")),\n line_search_status=_to_scalar(state.get(\"line_search_status\")))\n\n return results",
"def forward(log_emlik, log_startprob, log_transmat):",
"def MLfit_GLM(gg, Stim, optimArgs=None):\n\n # Set optimization parameters\n algopts=getFminOptsForVersion(version)\n if nargin > 2:\n opts=optimset(algopts[:],optimArgs[:])\n else:\n opts=optimset(algopts[:])\n\n # --- Create design matrix extract initial params from gg ----------------\n prs0,Xstruct=setupfitting_GLM(gg,Stim,nargout=2)\n\n # --- Set loss function --------------------------------------------------\n #if isequal(Xstruct.nlfun,expfun) or isequal(Xstruct.nlfun,exp):\n lfunc=lambda prs=None: Loss_GLM_logli_exp(prs,Xstruct)\n #else:\n # lfunc=lambda prs=None: Loss_GLM_logli(prs,Xstruct)\n\n # --- minimize negative log likelihood --------------------\n prsML,neglogli=fminunc(lfunc,prs0,opts)\n\n # Compute Hessian if desired\n # if nargout > 2:\n # neglogli,__,H=Loss_GLM_logli(prsML,Xstruct)\n\n # Put returned vals back into param structure ------\n gg=reinsertFitPrs_GLM(gg,prsML,Xstruct)\n # #----------------------------------------------------",
"def _lmfit_minimize(self, params, star, logger=None):\n import lmfit\n import time\n logger = galsim.config.LoggerWrapper(logger)\n t0 = time.time()\n logger.debug(\"Start lmfit minimize.\")\n\n results = lmfit.minimize(self._lmfit_resid, params, args=(star,))\n flux, du, dv, scale, g1, g2 = results.params.valuesdict().values()\n\n logger.debug(\"End lmfit minimize. Elapsed time: {0}\".format(time.time() - t0))\n return results",
"def log_posterior(f):\n return self._log_likelihood(np.hstack((0.0, f))) + self._log_prior_laplace(np.hstack((0.0, f)))",
"def _set_lgb_parameters(\n X: np.ndarray,\n y: np.ndarray,\n objective: str,\n rf: bool,\n silent: bool,\n n_jobs: int = 0,\n lgbm_params: dict = None,\n) -> dict:\n\n n_feat = X.shape[1]\n\n params = lgbm_params if lgbm_params is not None else {}\n\n params[\"objective\"] = objective\n params[\"verbosity\"] = -1\n if objective == \"softmax\":\n params[\"num_class\"] = len(np.unique(y))\n\n if rf:\n feat_frac = (\n np.sqrt(n_feat) / n_feat\n if objective in [\"softmax\", \"binary\"]\n else n_feat / (3 * n_feat)\n )\n params.update(\n {\n \"boosting_type\": \"rf\",\n \"bagging_fraction\": 0.7,\n \"feature_fraction\": feat_frac,\n \"bagging_freq\": 1,\n }\n )\n\n clf_losses = [\n \"binary\",\n \"softmax\",\n \"multi_logloss\",\n \"multiclassova\",\n \"multiclass\",\n \"multiclass_ova\",\n \"ova\",\n \"ovr\",\n \"binary_logloss\",\n ]\n if objective in clf_losses:\n y = y.astype(int)\n y_freq_table = pd.Series(y.fillna(0)).value_counts(normalize=True)\n n_classes = y_freq_table.size\n if n_classes > 2 and objective != \"softmax\":\n params[\"objective\"] = \"softmax\"\n params[\"num_class\"] = len(np.unique(y))\n if not silent:\n print(\"Multi-class task, setting objective to softmax\")\n main_class = y_freq_table[0]\n if not silent:\n print(\"GrootCV: classification with unbalance classes\")\n if main_class > 0.8:\n params.update({\"is_unbalance\": True})\n\n params.update({\"num_threads\": n_jobs})\n\n # we are using early_stopping\n # we prevent the overridding of it by popping the n_iterations\n keys_to_pop = [\n \"num_iterations\",\n \"num_iteration\",\n \"n_iter\",\n \"num_tree\",\n \"num_trees\",\n \"num_round\",\n \"num_rounds\",\n \"nrounds\",\n \"num_boost_round\",\n \"n_estimators\",\n \"max_iter\",\n ]\n for key in keys_to_pop:\n params.pop(key, None)\n\n return params",
"def optimize_lambda(self):\n\n def ll(lam):\n lam = np.reshape(lam, (self.T, self.F + 1))\n res = self.log_likelihood_lam(lam)\n return res\n\n def dll(lam):\n lam = np.reshape(lam, (self.T, self.F + 1))\n res = self.d_log_likelihood_lam(lam)\n res = res.reshape((self.T * (self.F + 1)))\n return res\n\n random_starting_point = np.random.rand(self.lam.shape[0], self.lam.shape[1])\n newlam, val, convergence = optimize.fmin_l_bfgs_b(ll, random_starting_point, dll)[0], optimize.fmin_l_bfgs_b(ll, random_starting_point, dll)[1], optimize.fmin_l_bfgs_b(ll, random_starting_point, dll)[2]['warnflag']\n newlam = newlam.reshape((self.T, (self.F + 1)))\n self.sigma = np.var(newlam.T[1])\n self.mu = np.mean(newlam.T[1])\n self.lam = newlam\n self.__calculate_alpha()\n print('optimize lambda')\n return convergence, val",
"def log_p_m_x(log_Bs, myTheta):\n print(\"TODO\")",
"def MCMC2D_Log_General(LogProbabilityDistrib, N, sigG, initialGuess, args=()):\n # get number of free parameters\n freeParams = len(initialGuess)\n \n # make acceptance counter and acceptance rate calculator\n acceptanceCounter = 0\n totalNumberPoints = 0\n values = np.zeros([int(N), freeParams])\n ##\n # step 1: draw initial xi\n currentVals = initialGuess\n ##\n # for x in range(0,int(N)):\n while totalNumberPoints < int(N):\n # step 2: take step to xi+1 = xi+epsilon\n epsilons = np.random.normal(scale=sigG, size=freeParams)\n newVals = currentVals+epsilons\n ##\n # step 3: calc R = P(xi+1)/P(xi)\n R = LogProbabilityDistrib(*newVals, *args)-LogProbabilityDistrib(*currentVals, *args)\n ##\n if R < 1:\n p = np.log(np.random.uniform(low=0., high=1., size=1) [0])\n if p > R:\n currentVals= currentVals\n values[totalNumberPoints] = deepcopy(currentVals)\n totalNumberPoints += 1\n else:\n currentVals = newVals\n values[totalNumberPoints] = deepcopy(currentVals)\n acceptanceCounter += 1\n totalNumberPoints += 1\n else:\n currentVals = newVals\n values[totalNumberPoints] = deepcopy(currentVals)\n acceptanceCounter += 1\n totalNumberPoints += 1\n ##\n acceptanceRate = acceptanceCounter/totalNumberPoints\n print('\\nAcceptance Rate = {}\\n'.format(acceptanceRate))\n ##\n return values, acceptanceRate"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
pylag.mlfit.MLFit._dofit(init_params, method='LBFGSB', kwargs) Fit the model covariance matrix to the data by minimising log(likelihood). Once the fit is complete, the parameters stored in the member variable params will be updated, the uncertainties will be estimated from the Hessian matrix, and the process_fit_results() method will be called from the derived class to update calculate power and lag spectra from the bestfitting parameter values. The actual minimisation is done by the _dofit() method.
|
def fit(self, init_params=None, update_params=True, **kwargs):
if init_params is None:
init_params = self.params
self.fit_result = self._dofit(init_params, **kwargs)
print(self.fit_result)
if True or self.fit_result.success and update_params:
for par, value in zip([p for p in init_params if init_params[p].vary], self.fit_result.x):
self.params[par].value = value
hess = self.fit_result.hess_inv(self.fit_result.x) if callable(self.fit_result.hess_inv) else np.diag(self.fit_result.hess_inv)
# make sure we only get the finite parameter errors
self.param_error = np.zeros(len(self.params))
self.param_error[hess>0] = hess[hess>0] ** 0.5
self.process_fit_results(self.fit_result, self.params)
|
[
"def _dofit(self, init_params, method='L-BFGS-B', **kwargs):\n initial_par_arr = np.array([init_params[p].value for p in init_params if init_params[p].vary])\n bounds = [(init_params[p].min, init_params[p].max) for p in init_params if init_params[p].vary] if method == 'L-BFGS-B' else None\n\n def objective(par_arr):\n \"\"\"\n wrapper around log_likelihood method to evaluate for an array of just the variable parameters,\n which can be used directly with scipy.optimise methods.\n \"\"\"\n fit_params = copy.copy(init_params)\n for par, value in zip([p for p in init_params if init_params[p].vary], par_arr):\n fit_params[par].value = value\n l, g = self.log_likelihood(fit_params, eval_gradient=True)\n print(\"\\r-log(L) = %6.3g\" % l + \" for parameters: \" + ' '.join(['%6.3g' % p for p in param2array(fit_params)]), end=\"\")\n return -l, -g\n\n result = minimize(objective, initial_par_arr, method=method, jac=True, bounds=bounds, **kwargs)\n print(\"\\r-log(L) = %6.3g\" % result.fun + \" for parameters: \" + \" \".join(['%6.3g' % p for p in param2array(array2param(result.x, init_params))]))\n return result",
"def fit(self):\n ln_l_all_array = [] #array of all log likelihoods\n ln_l_max = float(\"-inf\") #keep track of the maximum likelihood\n cp_parameter_array = None #parameters with the maximum likelihood\n #for multiple initial values\n for i in range(self.n_initial):\n print(\"initial value\", i)\n print(\"gradient descent\")\n super().fit() #regular gradient descent\n #copy the log likelihood\n for ln_l in self.ln_l_array:\n ln_l_all_array.append(ln_l)\n #check for convergence in the log likelihood\n ln_l = ln_l_all_array[len(ln_l_all_array)-1]\n if ln_l > ln_l_max:\n #the log likelihood is bigger, copy the parmeters\n ln_l_max = ln_l\n self.ln_l_max_index = len(ln_l_all_array)-1\n cp_parameter_array = self.copy_parameter()\n #do stochastic gradient descent to get a different initial value\n if i < self.n_initial-1:\n print(\"stochastic gradient descent\")\n #track when stochastic gradient descent was done for this entry\n #of ln_l_array\n self.ln_l_stochastic_index.append(len(ln_l_all_array))\n for j in range(self.n_stochastic_step):\n print(\"step\", j)\n self.m_stochastic_step()\n self.update_all_cp_parameters()\n ln_l_all_array.append(self.get_em_objective())\n #track when gradient descent was done\n #the E step right after this in super().fit() is considered part\n #of stochastic gradient descent\n self.ln_l_stochastic_index.append(len(ln_l_all_array)+1)\n else:\n self.ln_l_stochastic_index.append(len(ln_l_all_array))\n #copy results to the member variable\n self.ln_l_array = ln_l_all_array\n self.set_parameter(cp_parameter_array)\n self.e_step()",
"def fit(self, X, Xerr):\n \n if type(X) == pd.core.frame.DataFrame:\n if type(X.columns) == pd.indexes.base.Index:\n self.labels = np.array(X.columns)\n X = X.values\n \n if self.method=='astroML':\n self.GMM.n_components=self.n_components\n self.GMM.n_iter=self.n_iter\n self.GMM.fit(X, Xerr)\n \n self.V=self.GMM.V\n self.mu=self.GMM.mu\n self.weights=self.GMM.alpha\n \n if self.method=='Bovy':\n \"\"\"\n Bovy extreme_deconvolution only imports if the method is\n 'Bovy' (this is because installation is somewhat more\n complicated than astroML, and we don't want it to be\n required)\n \n As with the astroML method, initialize with a few steps of\n the scikit-learn GMM\n \"\"\"\n from extreme_deconvolution import extreme_deconvolution\\\n as bovyXD\n \n tmp_gmm = skl_GMM(self.n_components, max_iter=10,\n covariance_type='full',\n random_state=self.random_state)\n tmp_gmm.fit(X)\n self.mu = tmp_gmm.means_\n self.weights = tmp_gmm.weights_\n self.V = tmp_gmm.covariances_\n \n logl=bovyXD(X,Xerr,self.weights,self.mu,self.V,\n tol=self.tol,maxiter=self.n_iter,w=self.w)\n self.GMM.V = self.V\n self.GMM.mu = self.mu\n self.GMM.alpha = self.weights\n \n return self",
"def fit_nls(\n df_data,\n md=None,\n out=None,\n var_fix=None,\n df_init=None,\n verbose=True,\n uq_method=None,\n **kwargs,\n):\n ## Check `out` invariants\n if out is None:\n out = md.out\n print(\"... fit_nls setting out = {}\".format(out))\n\n ## Check invariants\n if md is None:\n raise ValueError(\"Must provide model md\")\n\n ## Determine variables to be fixed\n if var_fix is None:\n var_fix = set()\n else:\n var_fix = set(var_fix)\n for var in md.var_det:\n wid = md.domain.get_width(var)\n if wid == 0:\n var_fix.add(var)\n var_fix = list(var_fix)\n\n ## Run eval_nls to fit model parameter values\n df_fit = eval_nls(\n md,\n out=out,\n df_data=df_data,\n var_fix=var_fix,\n df_init=df_init,\n append=True,\n verbose=verbose,\n **kwargs,\n )\n ## Select best-fit values\n df_best = df_fit.sort_values(by=\"mse\", axis=0).iloc[[0]].reset_index(drop=True)\n if verbose:\n print(df_fit.sort_values(by=\"mse\", axis=0))\n\n ## Determine variables that were fitted\n var_fitted = list(set(md.var).intersection(set(df_best.columns)))\n var_remain = list(set(md.var).difference(set(var_fitted)))\n\n if len(var_remain) == 0:\n raise ValueError(\"Resulting model is constant!\")\n\n ## Assemble and return fitted model\n if md.name is None:\n name = \"(Fitted Model)\"\n else:\n name = md.name + \" (Fitted)\"\n\n ## Calibrate parametric uncertainty, if requested\n if uq_method == \"linpool\":\n ## Precompute data\n df_nom = eval_nominal(md, df_det=\"nom\")\n df_base = tran_outer(\n df_data, concat((df_best[var_fitted], df_nom[var_fix]), axis=1)\n )\n df_pred = eval_df(md, df=df_base)\n df_grad = eval_grad_fd(md, df_base=df_base, var=var_fitted)\n\n ## Pool variance matrices\n n_obs = df_data.shape[0]\n n_fitted = len(var_fitted)\n Sigma_pooled = zeros((n_fitted, n_fitted))\n\n for output in out:\n ## Approximate sigma_sq\n sigma_sq = npsum(\n nppow(df_data[output].values - df_pred[output].values, 2)\n ) / (n_obs - n_fitted)\n ## Approximate (pseudo)-inverse hessian\n var_grad = list(map(lambda v: \"D\" + output + \"_D\" + v, var_fitted))\n Z = df_grad[var_grad].values\n Hinv = pinv(Z.T.dot(Z), hermitian=True)\n\n ## Add variance matrix to pooled Sigma\n Sigma_pooled = Sigma_pooled + sigma_sq * Hinv / n_fitted\n\n ## Check model for identifiability\n kappa_out = cond(Sigma_pooled)\n if kappa_out > 1e10:\n warn(\n \"Model is locally unidentifiable as measured by the \"\n + \"condition number of the pooled covariance matrix; \"\n + \"kappa = {}\".format(kappa_out),\n RuntimeWarning,\n )\n\n ## Convert to std deviations and correlation\n sigma_comp = npsqrt(diag(Sigma_pooled))\n corr_mat = Sigma_pooled / (atleast_2d(sigma_comp).T.dot(atleast_2d(sigma_comp)))\n corr_data = []\n I, J = triu_indices(n_fitted, k=1)\n for ind in range(len(I)):\n i = I[ind]\n j = J[ind]\n corr_data.append([var_fitted[i], var_fitted[j], corr_mat[i, j]])\n df_corr = DataFrame(data=corr_data, columns=[\"var1\", \"var2\", \"corr\"])\n\n ## Assemble marginals\n marginals = {}\n for ind, var_ in enumerate(var_fitted):\n marginals[var_] = {\n \"dist\": \"norm\",\n \"loc\": df_best[var_].values[0],\n \"scale\": sigma_comp[ind],\n }\n\n ## Construct model with Gaussian copula\n if len(var_fix) > 0:\n md_res = (\n Model(name)\n >> cp_function(\n lambda x: df_nom[var_fix].values,\n var=list(set(var_remain).difference(var_fix)),\n out=var_fix,\n name=\"Fix variable levels\",\n )\n >> cp_md_det(md=md)\n >> cp_marginals(**marginals)\n >> cp_copula_gaussian(df_corr=df_corr)\n )\n else:\n md_res = (\n Model(name)\n >> cp_md_det(md=md)\n >> cp_marginals(**marginals)\n >> cp_copula_gaussian(df_corr=df_corr)\n )\n\n ## Return deterministic model\n elif uq_method is None:\n md_res = (\n Model(name)\n >> cp_function(\n lambda x: df_best[var_fitted].values,\n var=var_remain,\n out=var_fitted,\n name=\"Fix variable levels\",\n )\n >> cp_md_det(md=md)\n )\n\n else:\n raise ValueError(\"uq_method option {} not recognized\".format(uq_method))\n\n return md_res",
"def lmfit(self, star, logger=None):\n import lmfit\n logger = galsim.config.LoggerWrapper(logger)\n params = self._lmfit_params(star)\n results = self._lmfit_minimize(params, star, logger=logger)\n if logger:\n logger.debug(lmfit.fit_report(results))\n flux, du, dv, scale, g1, g2 = results.params.valuesdict().values()\n if not results.success:\n raise RuntimeError(\"Error fitting with lmfit.\")\n\n try:\n params_var = np.diag(results.covar)\n except (ValueError, AttributeError) as e:\n logger.warning(\"Failed to get params_var\")\n logger.warning(\" -- Caught exception: %s\",e)\n # results.covar is either None or does not exist\n params_var = np.zeros(6)\n\n return flux, du, dv, scale, g1, g2, params_var",
"def fit(self, star):\n star1 = self.chisq(star) # Get chisq Taylor expansion for linearized model\n ### Check for non-pos-def\n ###S = np.linalg.svd(star1.fit.alpha,compute_uv=False)\n ###print(\" .in fit(), min SV:\",np.min(S))###\n ###U,S,Vt = np.linalg.svd(star1.fit.alpha,compute_uv=True)\n ###print(\" ..in fit(), min SV:\",np.min(S))###\n\n # star1 has marginalized over flux (& center, if free), and updated these\n # for best linearized fit at the input parameter values.\n if self._degenerate:\n # Do SVD and retain\n # input values for degenerate parameter combinations\n # U,S,Vt = np.linalg.svd(star1.fit.alpha)\n S,U = np.linalg.eigh(star1.fit.alpha)\n # Invert, while zeroing small elements of S.\n # \"Small\" will be taken to be causing a small chisq change\n # when corresponding PSF component changes by the full flux of PSF\n small = 0.2 * self.pixel_area * self.pixel_area\n if np.any(S < -small):\n print(\"negative: \",np.min(S),\"small:\",small)###\n raise ValueError(\"Negative singular value in alpha matrix\")\n # Leave values that are close to zero equal to zero in inverse.\n nonzero = np.abs(S) > small\n invs = np.zeros_like(S)\n invs[nonzero] = 1./S[nonzero]\n\n ###print('S/zero:',S.shape,np.count_nonzero(np.abs(S)<=small),'small=',small) ###\n ###print(' ',np.max(S[np.abs(S)<=small]),np.min(S[np.abs(S)>small])) ##\n # answer = V * S^{-1} * U^T * beta\n # dparam = np.dot(Vt.T, invs * np.dot(U.T,star1.fit.beta))\n dparam = np.dot(U, invs * np.dot(U.T,star1.fit.beta))\n else:\n # If it is known there are no degeneracies, we can skip SVD\n dparam = np.linalg.solve(star1.fit.alpha, star1.fit.beta)\n # ??? dparam = scipy.linalg.solve(alpha, beta, sym_pos=True) would be faster\n # Create new StarFit, update the chisq value. Note no beta is returned as\n # the quadratic Taylor expansion was about the old parameters, not these.\n starfit2 = StarFit(star1.fit.params + dparam,\n flux = star1.fit.flux,\n center = star1.fit.center,\n alpha = star1.fit.alpha, # Inverse covariance matrix\n chisq = star1.fit.chisq \\\n + np.dot(dparam, np.dot(star1.fit.alpha, dparam)) \\\n - 2 * np.dot(star1.fit.beta, dparam))\n return Star(star1.data, starfit2)",
"def em(self, x, options, returnFlog=False):\n\n # Check that inputs are consistent\n errstring = self.consist('gmm', x)\n if errstring != None:\n raise Exception(errstring)\n \n\n ndata, xdim = x.shape\n\n # Sort out the options\n if options[13]:\n niters = options[13]\n else:\n niters = 100\n\n display = options[0]\n store = False\n if returnFlog:\n store = True\t# Store the error values to return them\n errlog = np.zeros(niters)\n test = False\n if options[2] > 0.0:\n test = True\t# Test log likelihood for termination\n\n check_covars = 0\n if options[4] >= 1:\n if display >= 0:\n print 'check_covars is on'\n check_covars = True\t# Ensure that covariances don't collapse\n MIN_COVAR = eps()\t# Minimum singular value of covariance matrix\n init_covars = self.covars\n\n # Main loop of algorithm\n for n in range(niters):\n \n # Calculate posteriors based on old parameters\n post, act = self.post(x)\n \n # Calculate error value if needed\n if display or store or test:\n prob = np.dot(act, self.priors)\n # Error value is negative log likelihood of data\n e = - np.sum(np.log(prob))\n if store:\n errlog[n] = e\n if display > 0:\n print 'Cycle ', n, ' Error ', e\n if test:\n if n > 0 and abs(e - eold) < options[2]:\n options[7] = e\n if returnFlog:\n return errlog\n else:\n return\n else:\n eold = e\n \n \n \n \n # Adjust the new estimates for the parameters\n new_pr = np.sum(post, 0)\n new_c = np.dot(post.T,x)\n \n # Now move new estimates to old parameter vectors\n self.priors = new_pr/ndata\n \n self.centres = new_c/new_pr.reshape(self.ncentres, 1)\n \n if self.covar_type == 'spherical':\n v = np.zeros(self.ncentres)\n n2 = dist2(x, self.centres)\n for j in range(self.ncentres):\n v[j] = np.dot(post[:,j].T, n2[:,j])\n self.covars = ((v/new_pr))/self.nin;\n if check_covars:\n # Ensure that no covariance is too small\n for j in range(self.ncentres):\n if self.covars[j] < MIN_COVAR:\n self.covars[j] = init_covars[j]\n elif self.covar_type == 'diag':\n for j in range(self.ncentres):\n diffs = x - self.centres[j,:]\n self.covars[j,:] = np.sum(np.multiply(np.multiply(diffs, diffs), post[:,j:j+1]), 0)/new_pr[j]\n if check_covars:\n # Ensure that no covariance is too small\n for j in range(self.ncentres):\n if np.min(self.covars[j,:]) < MIN_COVAR:\n self.covars[j,:] = init_covars[j,:]\n elif self.covar_type == 'full':\n for j in range(self.ncentres):\n diffs = x - self.centres[j,:];\n diffs = np.multiply(diffs, np.sqrt(post[:,j:j+1]))\n self.covars[:,:,j] = np.dot(diffs.T,diffs)/new_pr[j]\n if check_covars:\n # Ensure that no covariance is too small\n for j in range(self.ncentres):\n if np.min(la.svd(self.covars[:,:,j], compute_uv=False)) < MIN_COVAR:\n self.covars[:,:,j] = init_covars[:,:,j]\n elif self.covar_type == 'ppca':\n for j in range(self.ncentres):\n diffs = x - self.centres[j,:]\n diffs = np.multiply(diffs,np.sqrt(post[:,j:j+1]))\n tempcovars, tempU, templambda = ppca(np.dot(diffs.T,diffs)/new_pr[j], self.ppca_dim)\n if len(templambda) != self.ppca_dim:\n raise Exception('Unable to extract enough components')\n else: \n self.covars[j] = tempcovars\n self.U[:, :, j] = tempU\n self.lambd[j, :] = templambda\n \n if check_covars:\n if self.covars[j] < MIN_COVAR:\n self.covars[j] = init_covars[j]\n else:\n raise Exception('Unknown covariance type ' + self.covar_type)\n\n options[7] = -np.sum(np.log(self.prob(x)))\n if display >= 0:\n print maxitmess()\n if returnFlog:\n return errlog\n else:\n return",
"def fit(self):\n self.procedure_id = uuid4().hex\n self.procedure_date = str(datetime.today())\n t = perf_counter()\n self.__check_data()\n if self.error_free:\n max_iter = self.parameters[\"max iterations\"]\n conv_criteria = self.parameters[\"convergence level\"]\n\n if self.matrix.is_omx():\n self.output = AequilibraeMatrix()\n self.output.create_from_omx(self.output.random_name(), self.matrix.file_path,\n cores=self.matrix.view_names)\n self.output.computational_view()\n else:\n self.output = self.matrix.copy(self.output_name)\n if self.nan_as_zero:\n self.output.matrix_view[:, :] = np.nan_to_num(self.output.matrix_view)[:, :]\n\n rows = self.rows.data[self.row_field]\n columns = self.columns.data[self.column_field]\n tot_matrix = np.nansum(self.output.matrix_view[:, :])\n\n # Reporting\n self.report.append(\"Target convergence criteria: \" + str(conv_criteria))\n self.report.append(\"Maximum iterations: \" + str(max_iter))\n self.report.append(\"\")\n self.report.append(\"Rows:\" + str(self.rows.entries))\n self.report.append(\"Columns: \" + str(self.columns.entries))\n\n self.report.append(\"Total of seed matrix: \" + \"{:28,.4f}\".format(float(tot_matrix)))\n self.report.append(\"Total of target vectors: \" + \"{:25,.4f}\".format(float(np.nansum(rows))))\n self.report.append(\"\")\n self.report.append(\"Iteration, Convergence\")\n self.gap = conv_criteria + 1\n\n iter = 0\n while self.gap > conv_criteria and iter < max_iter:\n iter += 1\n # computes factors for zones\n marg_rows = self.__tot_rows(self.output.matrix_view[:, :])\n row_factor = self.__factor(marg_rows, rows)\n # applies factor\n self.output.matrix_view[:, :] = np.transpose(\n np.transpose(self.output.matrix_view[:, :]) * np.transpose(row_factor)\n )[:, :]\n\n # computes factors for columns\n marg_cols = self.__tot_columns(self.output.matrix_view[:, :])\n column_factor = self.__factor(marg_cols, columns)\n\n # applies factor\n self.output.matrix_view[:, :] = self.output.matrix_view[:, :] * column_factor\n\n # increments iterarions and computes errors\n self.gap = max(\n abs(1 - np.min(row_factor)),\n abs(np.max(row_factor) - 1),\n abs(1 - np.min(column_factor)),\n abs(np.max(column_factor) - 1),\n )\n\n self.report.append(str(iter) + \" , \" + str(\"{:4,.10f}\".format(float(np.nansum(self.gap)))))\n\n self.report.append(\"\")\n self.report.append(\"Running time: \" + str(\"{:4,.3f}\".format(perf_counter() - t)) + \"s\")",
"def fit(self, data, fit='quantiles'):\n if fit == 'MLE':\n self.setParamsMLE(data)\n self.setDistObj()\n isConverged = True # assume stats.fit will always return a distribution\n else:\n dataMoments = np.array([np.mean(data), np.std(data, ddof=1), moment(data, 3)])\n\n def objFunc(X):\n [self.shape, self.loc, self.scale] = X\n if self.fixedAtZero:\n self.loc = 0\n self.setDistObj()\n if fit == 'quantiles':\n obj = probPlotSqrErr(data, self, self.type, showPlots=False)[0]\n elif fit == 'MOM':\n distMoments = self.moments()\n weights = [1, 1,\n 0.1] # scale the influence of each moment # set last entry to remove skewness from the assessment\n # scale each moment error relative to the data moment value, but replace the data moment with a constant if it is close to zero\n obj = np.sum([abs(dataMoments[i] - distMoments[i]) / max(dataMoments[i], 1E-6) * weights[i] for i in\n range(\n self.nParams)]) # only use the number of moments needed to specify the distribution to match the data # np.sum((distMoments-dataMoments)**2) # np.sum([abs( (dataMoments[i]-distMoments[i])**(1/(i+1)) ) for i in range(3)]) #np.sum((dist.moments()-dataMoments)**2)\n return obj\n\n X = [self.shape, self.loc, self.scale]\n\n res = minimize(objFunc, X, method='SLSQP', options={'disp': True, 'maxiter': 600,\n 'ftol': 1e-8}) # , bounds=bnds, constraints=cons, # options={'maxiter': 500, 'gtol': 1e-6, 'disp': True}\n # method='SLSQP' 'TNC' 'L-BFGS-B' 'COBYLA' #\n # seems to ignore the constraint if bounds not included with method='SLSQP'\n isConverged = res.success\n if isConverged:\n [self.shape, self.loc, self.scale] = res.x\n else:\n [self.shape, self.loc, self.scale] = X # revert to previous values\n\n if self.fixedAtZero:\n self.loc = 0\n\n self.setDistObj()\n return isConverged",
"def MLfit_GLM(gg, Stim, optimArgs=None):\n\n # Set optimization parameters\n algopts=getFminOptsForVersion(version)\n if nargin > 2:\n opts=optimset(algopts[:],optimArgs[:])\n else:\n opts=optimset(algopts[:])\n\n # --- Create design matrix extract initial params from gg ----------------\n prs0,Xstruct=setupfitting_GLM(gg,Stim,nargout=2)\n\n # --- Set loss function --------------------------------------------------\n #if isequal(Xstruct.nlfun,expfun) or isequal(Xstruct.nlfun,exp):\n lfunc=lambda prs=None: Loss_GLM_logli_exp(prs,Xstruct)\n #else:\n # lfunc=lambda prs=None: Loss_GLM_logli(prs,Xstruct)\n\n # --- minimize negative log likelihood --------------------\n prsML,neglogli=fminunc(lfunc,prs0,opts)\n\n # Compute Hessian if desired\n # if nargout > 2:\n # neglogli,__,H=Loss_GLM_logli(prsML,Xstruct)\n\n # Put returned vals back into param structure ------\n gg=reinsertFitPrs_GLM(gg,prsML,Xstruct)\n # #----------------------------------------------------",
"def fit(pdf, prior, parameters, observations, iter=1000, lr=0.1):\n\n for i in range(iter):\n # Define objective function (log-likelihood) to maximize\n prior_ = torch.log(prior(parameters))\n posterior = torch.mean(torch.log(pdf(observations))) + prior_\n\n if np.isnan(posterior.data[0]) or np.isnan(prior_.data[0]):\n return\n\n # Determine gradients\n posterior.backward()\n\n # Update parameters with gradient descent\n for param in parameters:\n param.data.add_(lr * param.grad.data)\n param.grad.data.zero_()",
"def setParamsMLE(self, data):\n if self.type == 'normal':\n args = stats.norm.fit(data) # uses MLE\n\n self.shape = 0 # not used; included for optimiser in fitDistribution\n self.loc = args[0]\n self.scale = args[1]\n\n elif self.type == 'lognormal':\n if not self.fixedAtZero:\n args = stats.lognorm.fit(data) # uses MLE\n else:\n args = stats.lognorm.fit(data, floc=0) # uses MLE\n\n self.shape = args[0]\n self.loc = args[1]\n self.scale = args[2]\n\n elif self.type == 'weibull':\n if not self.fixedAtZero:\n args = stats.weibull_min.fit(data) # uses MLE\n else:\n args = stats.weibull_min.fit(data, floc=0) # uses MLE\n\n self.shape = args[0]\n self.loc = args[1]\n self.scale = args[2]\n\n elif self.type == 'exponential':\n if not self.fixedAtZero:\n args = stats.expon.fit(data) # uses MLE\n else:\n args = stats.expon.fit(data, floc=0) # uses MLE\n\n self.shape = 0 # not used; included for optimiser in fitDistribution\n self.loc = args[0]\n self.scale = args[1]\n\n elif self.type == 'triangular':\n args = stats.triang.fit(data) # uses MLE\n\n self.shape = args[0]\n self.loc = args[1]\n self.scale = args[2]\n\n elif self.type == 'uniform':\n args = stats.uniform.fit(data) # uses MLE\n\n self.shape = 0 # not used; included for optimiser in fitDistribution\n self.loc = args[0]\n self.scale = args[1]\n\n elif self.type == 'gamma':\n if not self.fixedAtZero:\n args = stats.gamma.fit(data) # uses MLE\n else:\n args = stats.gamma.fit(data, floc=0) # uses MLE\n\n self.shape = args[0]\n self.loc = args[1]\n self.scale = args[2]\n\n return",
"def initialize(self,input_size,n_classes):\n\n self.n_classes = n_classes\n self.input_size = input_size\n\n # Can't allocate space for the alpha/beta tables of\n # belief propagation (forward-backward), since their size\n # depends on the input sequence size, which will change from\n # one example to another.\n\n self.alpha = np.zeros((0,0))\n self.beta = np.zeros((0,0))\n \n ###########################################\n # Allocate space for the linear chain CRF #\n ###########################################\n # - self.weights[0] are the connections with the image at the current position\n # - self.weights[-1] are the connections with the image on the left of the current position\n # - self.weights[1] are the connections with the image on the right of the current position\n self.weights = [np.zeros((self.input_size,self.n_classes)),\n np.zeros((self.input_size,self.n_classes)),\n np.zeros((self.input_size,self.n_classes))]\n # - self.bias is the bias vector of the output at the current position\n self.bias = np.zeros((self.n_classes))\n\n # - self.lateral_weights are the linear chain connections between target at adjacent positions\n self.lateral_weights = np.zeros((self.n_classes,self.n_classes))\n \n self.grad_weights = [np.zeros((self.input_size,self.n_classes)),\n np.zeros((self.input_size,self.n_classes)),\n np.zeros((self.input_size,self.n_classes))]\n self.grad_bias = np.zeros((self.n_classes))\n self.grad_lateral_weights = np.zeros((self.n_classes,self.n_classes))\n \n #########################\n # Initialize parameters #\n #########################\n\n # Since the CRF log factors are linear in the parameters,\n # the optimization is convex and there's no need to use a random\n # initialization.\n\n self.n_updates = 0 # To keep track of the number of updates, to decrease the learning rate",
"def define_params_and_initial_state_distributions(self):\n\n label2ind = dict(zip(list(self.fitted_cov.columns), np.arange(len(self.fitted_cov.columns))))\n for i in self.fitted_params.index:\n r = self.fitted_params['id'][i] # region\n self._all_internal_params_distribs[r] = dict(b_fit=LogNormalDist(params=mv2musig(self.fitted_params['b1_mean'][i],\n self.fitted_cov['b1_pop'][label2ind['b1_pop']]),\n stochastic=self.stochastic),\n r_fit=NormalDist(params=[0.034, 0.034 * self.noise], stochastic=self.stochastic),\n N=DiracDist(params=self.fitted_params['popsize'][i], stochastic=self.stochastic),\n N_av=DiracDist(params=float(np.mean(self.fitted_params['popsize'])), stochastic=self.stochastic),\n Dq_fit=LogNormalDist(params=mv2musig(self.fitted_params['Dq_mean'][i],\n self.fitted_cov['Dq_pop'][label2ind['Dq_pop']]),\n stochastic=self.stochastic),\n De=NormalDist(params=[5.1, 5.1 * self.noise], stochastic=self.stochastic),\n Dh=NormalDist(params=[30, 30 * self.noise], stochastic=self.stochastic),\n Di=NormalDist(params=[2.3, 2.3 * self.noise], stochastic=self.stochastic),\n alpha=NormalDist(params=[0.55, 0.55 * self.noise], stochastic=self.stochastic),\n icu=DiracDist(params=self.fitted_params['ICUcapacity'][i], stochastic=self.stochastic),\n beta1=NormalDist(params=[self.fitted_params['betaw1_mean'][i],\n np.sqrt(self.fitted_cov['betaw1_pop'][label2ind['betaw1_pop']])],\n stochastic=self.stochastic),\n beta2=NormalDist(params=[self.fitted_params['betaw2_mean'][i],\n np.sqrt(self.fitted_cov['betaw2_pop'][label2ind['betaw2_pop']])],\n stochastic=self.stochastic),\n beta3=NormalDist(params=[self.fitted_params['betaw3_mean'][i],\n np.sqrt(self.fitted_cov['betaw3_pop'][label2ind['betaw3_pop']])],\n stochastic=self.stochastic),\n beta4=NormalDist(params=[self.fitted_params['betaw4_mean'][i],\n np.sqrt(self.fitted_cov['betaw4_pop'][label2ind['betaw4_pop']])],\n stochastic=self.stochastic),\n )\n self._all_initial_state_distribs[r] = dict(E0=LogNormalDist(params=mv2musig(self.fitted_params['initE_mean'][i], self.fitted_cov['initE_pop'][label2ind['initE_pop']]),\n stochastic=self.stochastic),\n I0=DiracDist(params=self.fitted_params['I0_kalman_mean'][i], stochastic=self.stochastic),\n R0=DiracDist(params=0, stochastic=self.stochastic),\n A0=DiracDist(params=1, stochastic=self.stochastic), # is updated below\n H0=DiracDist(params=self.fitted_params['H0_kalman_mean'][i], stochastic=self.stochastic)\n )",
"def My_Bootstrap(self):\n\ts = len(self.X_test) #200\n t = len(self.X_training) #400\n\tr = np.size(self.X_test,1) #1600\n # Ordinary Least Square method\n if self.method == 'OLS':\n m = np.zeros((self.B,s))\n c = np.zeros((self.B,r))\n for i in range(self.B):\n index = randint(0, t, t)\n X_resample = self.X_training[index]\n z_resample = self.z[index]\n lr = My_Linear_Regression(X_resample, self.X_test, z_resample, self.lambda_)\n lr.My_OLS()\n z_predict = lr.My_Predict(self.X_test, False)\n\t\tcoeff = lr.My_Beta()\n m[i,:] = z_predict\n c[i,:] = coeff\n\n # Ridge regression\n elif self.method == 'Ridge':\n m = np.zeros((self.B,s))\n c = np.zeros((self.B,r))\t\n for i in range(self.B):\n index = randint(0, t, t)\n X_resample = self.X_training[index]\n z_resample = self.z[index]\n lr = My_Linear_Regression(X_resample, self.X_test, z_resample, self.lambda_)\n lr.My_Ridge()\n z_predict = lr.My_Predict(self.X_test, False)\n\t\tcoeff = lr.My_Beta()\n m[i,:] = z_predict\n c[i,:] = coeff\n \n #Lasso regression\n elif self.method == 'Lasso':\n m = np.zeros((self.B,s))\n c = np.zeros((self.B,r))\t\n for i in range(self.B):\n index = randint(0, t, t)\n X_resample = self.X_training[index]\n z_resample = self.z[index]\n lr = My_Linear_Regression(X_resample, self.X_test, z_resample, self.lambda_)\n lr.My_Lasso()\n z_predict = lr.My_Predict(self.X_test, True)\n\t\tcoeff = lr.My_Beta()\n m[i,:] = z_predict\n c[i,:] = coeff\n \n else:\n print('You have forgotten to select method; OLS, Ridge or Lasso.')\n\n return m, c",
"def fitting(fitfunc, X, Y, start_parm, correlated=True, verbose=True):\n errfunc = lambda p, x, y, error: np.dot(error, (y-fitfunc(p,x)).T)\n \n # compute inverse, cholesky decomposed covariance matrix\n if not correlated:\n cov = np.diag(np.diagonal(np.cov(Y.T)))\n else:\n cov = np.cov(Y.T)\n cov = (np.linalg.cholesky(np.linalg.inv(cov))).T\n\n # degrees of freedom\n dof = float(Y.shape[1]-len(start_parm)) \n # create results arrays\n res = np.zeros((Y.shape[0], len(start_parm)))\n res_cov = np.zeros((len(start_parm), len(start_parm)))\n chisquare = np.zeros(Y.shape[0])\n # The FIT to the boostrap samples\n for b in range(0, Y.shape[0]):\n p,cov1,infodict,mesg,ier = leastsq(errfunc, start_parm, \n args=(X, Y[b,:], cov), full_output=1, factor=0.1)\n chisquare[b] = float(sum(infodict['fvec']**2.))\n res[b] = np.array(p)\n if b==0:\n # print(cov1)\n res_cov = cov1*chisquare[b]/dof\n # print(res_cov)\t\n # calculate mean and standard deviation\n res_mean, res_std = af.calc_error(res)\n # chi2 = np.median(chisquare)\n # p-value calculated\n pvals_originfit = 1. - scipy.stats.chi2.cdf(chisquare[0], dof)\n \n # The fit to the mean value\n y = np.mean(Y, axis=0)\n p,cov1,infodict,mesg,ier = leastsq(errfunc, start_parm, \\\n args=(X, y, cov), full_output=1)\n chisquare_meanfit = float(sum(infodict['fvec']**2.))\n pvals_meanfit = 1. - scipy.stats.chi2.cdf(chisquare_meanfit, dof)\n # writing results to screen\n if verbose:\n if correlated:\n print(\"fit results for a correlated fit:\")\n else:\n print(\"fit results for an uncorrelated fit:\")\n print(\"degrees of freedom: %f\\n\" % dof)\n \n print(\"bootstrap fit:\")\n for rm, rs in zip(res_mean, res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n #print(\"Chi^2/dof: %.6e +/- %.6e\\n\" % (chi2/dof, np.std(chisquare)/dof))\n\n print(\"mean value fit:\")\n for rm, rs in zip(p, res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n print(\" Chi^2/dof: %.6e \" % (chisquare_meanfit / dof))\n print(\" p-value: %lf\" % pvals_meanfit) \n\n print(\"original data fit:\")\n for rm, rs in zip(res[0], res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n print(\" Chi^2/dof: %.6e \" % (chisquare[0]/dof))\n print(\" p-value: %lf\" % pvals_originfit) \n return res, res_cov.flatten(), chisquare[0]/dof, pvals_originfit",
"def fit(self) -> None:\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `sigma_ols`.\n self.sigma_ols = self._compute_sigma_ols()\n\n mu_prior = np.zeros((self.m, self.N))\n for i in range(self.m):\n mu_prior[i, self.p * i] = 1\n mu_prior = mu_prior.flatten()\n\n v_prior = self._construct_v_prior()\n\n Z_sig_Z_sum = 0\n Z_sig_y_sum = 0\n\n for t in range(self.p, self.T):\n Z_t = self._construct_Zt(\n self.X, self.Y, t\n ) # shape: m x [m * (m * p + r + 1)]\n\n z_sum_term = (\n Z_t.T @ inv(self.sigma_ols)\n ) @ Z_t # shape: [m * (m * p + r + 1)] x [m * (m * p + r + 1)]\n y_sum_term = (Z_t.T @ inv(self.sigma_ols)) @ self.Y[\n :, t\n ] # shape: [m * (m * p + r + 1)] x 1\n\n assert (\n self.num_mu_coefficients,\n self.num_mu_coefficients,\n ) == z_sum_term.shape, f\"Expected {(self.num_mu_coefficients, self.num_mu_coefficients)}, got {z_sum_term.shape}\"\n assert (\n self.num_mu_coefficients,\n ) == y_sum_term.shape, (\n f\"Expected {(self.num_mu_coefficients,)}, got {y_sum_term.shape}\"\n )\n\n Z_sig_Z_sum += z_sum_term\n Z_sig_y_sum += y_sum_term\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `v_posterior`.\n self.v_posterior = inv(\n inv(v_prior) + Z_sig_Z_sum\n ) # shape: [m * (m * p + r + 1)] x [m * (m * p + r + 1)]\n assert (\n self.num_mu_coefficients,\n self.num_mu_coefficients,\n ) == self.v_posterior.shape, f\"Expected {(self.num_mu_coefficients, self.num_mu_coefficients)}, got {self.v_posterior.shape}\"\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `mu_posterior`.\n self.mu_posterior = self.v_posterior @ (\n inv(v_prior) @ mu_prior + Z_sig_y_sum\n ) # shape: [m * (m * p + r + 1)] x 1\n assert (\n self.num_mu_coefficients,\n ) == self.mu_posterior.shape, (\n f\"Expected {(self.num_mu_coefficients,)}, got {self.mu_posterior.shape}\"\n )\n # pyre-fixme[16]: `BayesianVAR` has no attribute `resid`.\n self.resid = self._get_training_residuals()\n self.fitted = True",
"def _set_lgb_parameters(\n X: np.ndarray,\n y: np.ndarray,\n objective: str,\n rf: bool,\n silent: bool,\n n_jobs: int = 0,\n lgbm_params: dict = None,\n) -> dict:\n\n n_feat = X.shape[1]\n\n params = lgbm_params if lgbm_params is not None else {}\n\n params[\"objective\"] = objective\n params[\"verbosity\"] = -1\n if objective == \"softmax\":\n params[\"num_class\"] = len(np.unique(y))\n\n if rf:\n feat_frac = (\n np.sqrt(n_feat) / n_feat\n if objective in [\"softmax\", \"binary\"]\n else n_feat / (3 * n_feat)\n )\n params.update(\n {\n \"boosting_type\": \"rf\",\n \"bagging_fraction\": 0.7,\n \"feature_fraction\": feat_frac,\n \"bagging_freq\": 1,\n }\n )\n\n clf_losses = [\n \"binary\",\n \"softmax\",\n \"multi_logloss\",\n \"multiclassova\",\n \"multiclass\",\n \"multiclass_ova\",\n \"ova\",\n \"ovr\",\n \"binary_logloss\",\n ]\n if objective in clf_losses:\n y = y.astype(int)\n y_freq_table = pd.Series(y.fillna(0)).value_counts(normalize=True)\n n_classes = y_freq_table.size\n if n_classes > 2 and objective != \"softmax\":\n params[\"objective\"] = \"softmax\"\n params[\"num_class\"] = len(np.unique(y))\n if not silent:\n print(\"Multi-class task, setting objective to softmax\")\n main_class = y_freq_table[0]\n if not silent:\n print(\"GrootCV: classification with unbalance classes\")\n if main_class > 0.8:\n params.update({\"is_unbalance\": True})\n\n params.update({\"num_threads\": n_jobs})\n\n # we are using early_stopping\n # we prevent the overridding of it by popping the n_iterations\n keys_to_pop = [\n \"num_iterations\",\n \"num_iteration\",\n \"n_iter\",\n \"num_tree\",\n \"num_trees\",\n \"num_round\",\n \"num_rounds\",\n \"nrounds\",\n \"num_boost_round\",\n \"n_estimators\",\n \"max_iter\",\n ]\n for key in keys_to_pop:\n params.pop(key, None)\n\n return params",
"def log_likelihood_function(self, instance):\r\n\r\n \"\"\"\r\n In the previous tutorial the instance was a single `Gaussian` profile, meaning we could create the model data \r\n using the line:\r\n\r\n model_data = instance.gaussian.model_data_1d_via_xvalues_from(xvalues=self.data.xvalues)\r\n\r\n In this tutorial our instance is comprised of multiple 1D Gaussians, because we will use a `Collection` to\r\n compose the model:\r\n\r\n model = Collection(gaussian_0=Gaussian, gaussian_1=Gaussian).\r\n\r\n By using a Collection, this means the instance parameter input into the fit function is a\r\n dictionary where individual profiles (and their parameters) can be accessed as followed:\r\n\r\n print(instance.gaussian_0)\r\n print(instance.gaussian_1)\r\n print(instance.gaussian_0.centre)\r\n\r\n In this tutorial, the `model_data` is therefore the summed `model_data` of all individual Gaussians in the \r\n model. The function `model_data_from_instance` performs this summation. \r\n \"\"\"\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n chi_squared = sum(chi_squared_map)\r\n noise_normalization = np.sum(np.log(2 * np.pi * noise_map**2.0))\r\n log_likelihood = -0.5 * (chi_squared + noise_normalization)\r\n\r\n return log_likelihood"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
pylag.mlfit.MLFit.run_mcmc(init_params=None, burn=300, steps=1000, thin=1, walkers=50, kwargs) Run MCMC to obtain the posterior distributions of the model parameters. MCMC calculation is run using the GoodmanWeare algorithm, via emcee, called via lmfit's Minimzer class.
|
def run_mcmc(self, init_params=None, burn=300, steps=1000, thin=1, walkers=50, **kwargs):
if init_params is None:
init_params = self.params
# we initialise a Minimizer object, but only if there isn't one already, so we can
if self.mcmc_minimizer is None:
self.mcmc_minimizer = lmfit.Minimizer(lambda p: self.log_likelihood(p, eval_gradient=False), params=init_params,
nan_policy='propagate')
self.mcmc_result = self.mcmc_minimizer.emcee(init_params, burn=burn, steps=steps, thin=thin, nwalkers=walkers, **kwargs)
|
[
"def run_mcmc(self):\n if self.sampler is not None:\n instance = None\n prev_samples = self.sampler.iterations\n else:\n instance = self._setup_instance()\n if self.fit_type == \"both\":\n optres = self.run_downhill(instance)\n if optres.success:\n self.guess = list(optres.x)\n prev_samples = 0\n\n self._write_log_pre()\n\n fitter = fit.MCMC(\n priors=self.priors,\n data=self.y,\n quantity=self.quantity,\n constraints=self.constraints,\n sigma=self.sigma,\n guess=self.guess,\n blobs=self.blobs,\n verbose=self.verbose,\n relax=self.relax,\n )\n\n start = time.time()\n if self.chunks == 0:\n self.chunks = self.nsamples - prev_samples\n nchunks = np.ceil((self.nsamples - prev_samples) / float(self.chunks))\n for i, s in enumerate(\n fitter.fit(\n self.sampler,\n instance,\n self.nwalkers,\n self.nsamples - prev_samples,\n self.burnin,\n self.nthreads,\n self.chunks,\n )\n ):\n # Write out files\n self.write_iter_pickle(s)\n print(\n (\n \"Done {0}%. Time per sample: {1}\".format(\n 100 * float(i + 1) / nchunks,\n (time.time() - start) / ((i + 1) * self.chunks * self.nwalkers),\n )\n )\n )\n\n total_time = time.time() - start\n\n self._write_log_post(s, total_time)\n self._write_data(s)",
"def run_emcee(pos,ndim,nwalkers,run_dir,lnprob_args,init_params,param_names,\n\t\t\t auto_stop,conv_type,min_samp,ncor_times,autocorr_tol,write_iter,write_thresh,burn_in,min_iter,max_iter,threads,\n\t\t\t print_output=True):\n\t# Keep original burn_in and max_iter to reset convergence if jumps out of convergence\n\torig_burn_in = burn_in\n\torig_max_iter = max_iter\n\t# Sorted parameter names\n\tparam_names = np.array(param_names)\n\ti_sort = np.argsort(param_names) # this array gives the ordered indices of parameter names (alphabetical)\n\t# Create MCMC_chain.csv if it doesn't exist\n\tif os.path.exists(run_dir+'log/MCMC_chain.csv')==False:\n\t\tf = open(run_dir+'log/MCMC_chain.csv','w')\n\t\tparam_string = ', '.join(str(e) for e in param_names)\n\t\tf.write('# iter, ' + param_string) # Write initial parameters\n\t\tbest_str = ', '.join(str(e) for e in init_params)\n\t\tf.write('\\n 0, '+best_str)\n\t\tf.close()\n\n\n\t# initialize the sampler\n\tsampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=lnprob_args,threads=threads)\n\n\tstart_time = time.time() # start timer\n\n\twrite_log((ndim,nwalkers,auto_stop,conv_type,burn_in,write_iter,write_thresh,min_iter,max_iter,threads),'emcee_options',run_dir)\n\n\t# Initialize stuff for autocorrelation analysis\n\tif (auto_stop==True):\n\t\tautocorr_times_all = [] # storage array for autocorrelation times\n\t\tautocorr_tols_all = [] # storage array for autocorrelation tolerances\n\t\told_tau = np.full(len(param_names),np.inf)\n\t\tmin_samp\t = min_samp # minimum iterations to use past convergence\n\t\tncor_times = ncor_times # multiplicative tolerance; number of correlation times before which we stop sampling\t\n\t\tautocorr_tol = autocorr_tol\t\n\t\tstop_iter\t= max_iter # stopping iteration; changes once convergence is reached\n\t\tconverged = False\n\t\t# write_log((min_samp,autocorr_tol,ncor_times,conv_type),'autocorr_options',run_dir)\n\n\t# If one provides a list of parameters for autocorrelation, it needs to be in the \n\t# form of a tuple. If one only provides one paraemeter, it needs to be converted to a tuple:\n\tif (auto_stop==True) and (conv_type != 'all') and (conv_type != 'mean') and (conv_type != 'median'):\n\t\tif not isinstance(conv_type, tuple):\n\t\t\tconv_type = (conv_type,) #\n\n\t# Check auto_stop convergence type:\n\tif (auto_stop==True) and (isinstance(conv_type,tuple)==True) :\n\t\tif all(elem in param_names for elem in conv_type)==True:\n\t\t\tif (print_output):\n\t\t\t\tprint('\\n Only considering convergence of following parameters: ')\n\t\t\t\tfor c in conv_type:\t\n\t\t\t\t\tprint('\t\t %s' % c)\n\t\t\t\tpass\n\t\t# check to see that all param_names are in conv_type, if not, remove them \n\t\t# from conv_type\n\t\telse:\n\t\t\ttry:\n\t\t\t\tconv_type_list = list(conv_type)\n\t\t\t\tfor c in conv_type:\n\t\t\t\t\tif c not in param_names:\n\t\t\t\t\t\tconv_type_list.remove(c)\n\t\t\t\tconv_type = tuple(conv_type_list)\n\t\t\t\tif all(elem in conv_type for elem in param_names)==True:\n\t\t\t\t\tif (print_output):\n\t\t\t\t\t\tprint('\\n Only considering convergence of following parameters: ')\n\t\t\t\t\t\tfor c in conv_type:\t\n\t\t\t\t\t\t\tprint('\t\t %s' % c)\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif (print_output):\n\t\t\t\t\t\t\tprint('\\n One of more parameters in conv_type is not a valid parameter. Defaulting to median convergence type../.\\n')\n\t\t\t\t\t\tconv_type='median'\n\n\t\t\texcept:\n\t\t\t\tprint('\\n One of more parameters in conv_type is not a valid parameter. Defaulting to median convergence type../.\\n')\n\t\t\t\tconv_type='median'\n\n\tif (auto_stop==True):\n\t\twrite_log((min_samp,autocorr_tol,ncor_times,conv_type),'autocorr_options',run_dir)\n\t# Run emcee\n\tfor k, result in enumerate(sampler.sample(pos, iterations=max_iter)):\n\t\t\n\t\tbest = [] # For storing current chain positions (median of parameter values at write_iter iterations)\n\t\tif ((k+1) % write_iter == 0) and ((k+1)>=write_thresh): # Write every [write_iter] iteration\n\t\t\t# Chain location for each parameter\n\t\t\t# Median of last 100 positions for each walker.\n\t\t\tnwalkers = np.shape(sampler.chain)[0]\n\t\t\tnpar = np.shape(sampler.chain)[2]\n\t\t\t\n\t\t\tsampler_chain = sampler.chain[:,:k+1,:]\n\t\t\tnew_sampler_chain = []\n\t\t\tfor i in range(0,np.shape(sampler_chain)[2],1):\n\t\t\t\tpflat = sampler_chain[:,:,i] # flattened along parameter\n\t\t\t\tflat = np.concatenate(np.stack(pflat,axis=1),axis=0)\n\t\t\t\tnew_sampler_chain.append(flat)\n\t\t\t# best = []\n\t\t\tfor pp in range(0,npar,1):\n\t\t\t\tdata = new_sampler_chain[pp][-int(nwalkers*write_iter):]\n\t\t\t\tmed = np.median(data)\n\t\t\t\tbest.append(med)\n\t\t\t# write to file\n\t\t\tf = open(run_dir+'log/MCMC_chain.csv','a')\n\t\t\tbest_str = ', '.join(str(e) for e in best)\n\t\t\tf.write('\\n'+str(k+1)+', '+best_str)\n\t\t\tf.close()\n\t\t# Checking autocorrelation times for convergence\n\t\tif ((k+1) % write_iter == 0) and ((k+1)>=min_iter) and ((k+1)>=write_thresh) and (auto_stop==True):\n\t\t\t# Autocorrelation analysis of chain to determine convergence; the minimum autocorrelation time is 1.0, which results when a time cannot be accurately calculated.\n\t\t\ttau = autocorr_convergence(sampler.chain,param_names,plot=False) # Calculate autocorrelation times for each parameter\n\t\t\tautocorr_times_all.append(tau) # append tau to storage array\n\t\t\t# Calculate tolerances\n\t\t\ttol = (np.abs(tau-old_tau)/old_tau) * 100.0\n\t\t\tautocorr_tols_all.append(tol) # append tol to storage array\n\t\t\t# If convergence for mean autocorrelation time \n\t\t\tif (auto_stop==True) & (conv_type == 'mean'):\n\t\t\t\tpar_conv = [] # converged parameter indices\n\t\t\t\tpar_not_conv = [] # non-converged parameter indices\n\t\t\t\tfor x in range(0,len(param_names),1):\n\t\t\t\t\tif (round(tau[x],1)>1.0):# & (0.0<round(tol[x],1)<autocorr_tol):\n\t\t\t\t\t\tpar_conv.append(x) # Append index of parameter for which an autocorrelation time can be calculated; we use these to calculate the mean\n\t\t\t\t\telse: par_not_conv.append(x)\n\t\t\t\t# Calculate mean of parameters for which an autocorrelation time could be calculated\n\t\t\t\tpar_conv = np.array(par_conv) # Explicitly convert to array\n\t\t\t\tpar_not_conv = np.array(par_not_conv) # Explicitly convert to array\n\n\t\t\t\tif (par_conv.size == 0) and (stop_iter == orig_max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\nIteration = %d' % (k+1))\n\t\t\t\t\t\tprint('-------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('- Not enough iterations for any autocorrelation times!')\n\t\t\t\telif ( (par_conv.size > 0) and (k+1)>(np.mean(tau[par_conv]) * ncor_times) and (np.mean(tol[par_conv])<autocorr_tol) and (stop_iter == max_iter) ):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\n ---------------------------------------------')\n\t\t\t\t\t\tprint(' | Converged at %d iterations.\t\t\t | ' % (k+1))\n\t\t\t\t\t\tprint(' | Performing %d iterations of sampling... | ' % min_samp )\n\t\t\t\t\t\tprint(' | Sampling will finish at %d iterations. | ' % ((k+1)+min_samp) )\n\t\t\t\t\t\tprint(' ---------------------------------------------')\n\t\t\t\t\tburn_in = (k+1)\n\t\t\t\t\tstop_iter = (k+1)+min_samp\n\t\t\t\t\tconv_tau = tau\n\t\t\t\t\tconverged = True\n\t\t\t\telif ((par_conv.size == 0) or ( (k+1)<(np.mean(tau[par_conv]) * ncor_times)) or (np.mean(tol[par_conv])>autocorr_tol)) and (stop_iter < orig_max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\nIteration = %d' % (k+1))\n\t\t\t\t\t\tprint('-------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('- Jumped out of convergence! Resetting convergence criteria...')\n\t\t\t\t\t\t# Reset convergence criteria\n\t\t\t\t\t\tprint('- Resetting burn_in = %d' % orig_burn_in)\n\t\t\t\t\t\tprint('- Resetting max_iter = %d' % orig_max_iter)\n\t\t\t\t\tburn_in = orig_burn_in\n\t\t\t\t\tstop_iter = orig_max_iter\n\t\t\t\t\tconverged = False\n\n\t\t\t\tif (par_conv.size>0):\n\t\t\t\t\tpnames_sorted = param_names[i_sort]\n\t\t\t\t\ttau_sorted\t= tau[i_sort]\n\t\t\t\t\ttol_sorted\t= tol[i_sort]\n\t\t\t\t\tbest_sorted = np.array(best)[i_sort]\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('{0:<30}{1:<40}{2:<30}'.format('\\nIteration = %d' % (k+1),'%d x Mean Autocorr. Time = %0.2f' % (ncor_times,np.mean(tau[par_conv]) * ncor_times),'Mean Tolerance = %0.2f' % np.mean(tol[par_conv])))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('{0:<30}{1:<20}{2:<20}{3:<20}{4:<20}'.format('Parameter','Current Value','Autocorr. Time','Tolerance','Converged?'))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------')\n\t\t\t\t\t\tfor i in range(0,len(pnames_sorted),1):\n\t\t\t\t\t\t\tif (((k+1)>tau_sorted[i]*ncor_times) and (tol_sorted[i]<autocorr_tol) and (tau_sorted[i]>1.0) ):\n\t\t\t\t\t\t\t\tconv_bool = 'True'\n\t\t\t\t\t\t\telse: conv_bool = 'False'\n\t\t\t\t\t\t\tif (round(tau_sorted[i],1)>1.0):# & (tol[i]<autocorr_tol):\n\t\t\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}{2:<20.4f}{3:<20.4f}{4:<20}'.format(pnames_sorted[i],best_sorted[i],tau_sorted[i],tol_sorted[i],conv_bool))\n\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}{2:<20}{3:<20}{4:<20}'.format(pnames_sorted[i],best_sorted[i],' -------- ',' -------- ',' -------- '))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------')\n\n\t\t\t# If convergence for median autocorrelation time \n\t\t\tif (auto_stop==True) & (conv_type == 'median'):\n\t\t\t\tpar_conv = [] # converged parameter indices\n\t\t\t\tpar_not_conv = [] # non-converged parameter indices\n\t\t\t\tfor x in range(0,len(param_names),1):\n\t\t\t\t\tif (round(tau[x],1)>1.0):# & (tol[x]<autocorr_tol):\n\t\t\t\t\t\tpar_conv.append(x) # Append index of parameter for which an autocorrelation time can be calculated; we use these to calculate the mean\n\t\t\t\t\telse: par_not_conv.append(x)\n\t\t\t\t# Calculate mean of parameters for which an autocorrelation time could be calculated\n\t\t\t\tpar_conv = np.array(par_conv) # Explicitly convert to array\n\t\t\t\tpar_not_conv = np.array(par_not_conv) # Explicitly convert to array\n\n\t\t\t\tif (par_conv.size == 0) and (stop_iter == orig_max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\nIteration = %d' % (k+1))\n\t\t\t\t\t\tprint('-------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('- Not enough iterations for any autocorrelation times!')\n\t\t\t\telif ( (par_conv.size > 0) and (k+1)>(np.median(tau[par_conv]) * ncor_times) and (np.median(tol[par_conv])<autocorr_tol) and (stop_iter == max_iter) ):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\n ---------------------------------------------')\n\t\t\t\t\t\tprint(' | Converged at %d iterations.\t\t\t |' % (k+1))\n\t\t\t\t\t\tprint(' | Performing %d iterations of sampling... |' % min_samp )\n\t\t\t\t\t\tprint(' | Sampling will finish at %d iterations. |' % ((k+1)+min_samp) )\n\t\t\t\t\t\tprint(' ---------------------------------------------')\n\t\t\t\t\tburn_in = (k+1)\n\t\t\t\t\tstop_iter = (k+1)+min_samp\n\t\t\t\t\tconv_tau = tau\n\t\t\t\t\tconverged = True\n\t\t\t\telif ((par_conv.size == 0) or ( (k+1)<(np.median(tau[par_conv]) * ncor_times)) or (np.median(tol[par_conv])>autocorr_tol)) and (stop_iter < orig_max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\nIteration = %d' % (k+1))\n\t\t\t\t\t\tprint('-------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('- Jumped out of convergence! Resetting convergence criteria...')\n\t\t\t\t\t\t# Reset convergence criteria\n\t\t\t\t\t\tprint('- Resetting burn_in = %d' % orig_burn_in)\n\t\t\t\t\t\tprint('- Resetting max_iter = %d' % orig_max_iter)\n\t\t\t\t\tburn_in = orig_burn_in\n\t\t\t\t\tstop_iter = orig_max_iter\n\t\t\t\t\tconverged = False\n\n\t\t\t\tif (par_conv.size>0):\n\t\t\t\t\tpnames_sorted = param_names[i_sort]\n\t\t\t\t\ttau_sorted\t= tau[i_sort]\n\t\t\t\t\ttol_sorted\t= tol[i_sort]\n\t\t\t\t\tbest_sorted = np.array(best)[i_sort]\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('{0:<30}{1:<40}{2:<30}'.format('\\nIteration = %d' % (k+1),'%d x Median Autocorr. Time = %0.2f' % (ncor_times,np.median(tau[par_conv]) * ncor_times),'Med. Tolerance = %0.2f' % np.median(tol[par_conv])))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('{0:<30}{1:<20}{2:<20}{3:<20}{4:<20}'.format('Parameter','Current Value','Autocorr. Time','Tolerance','Converged?'))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------')\n\t\t\t\t\t\tfor i in range(0,len(pnames_sorted),1):\n\t\t\t\t\t\t\tif (((k+1)>tau_sorted[i]*ncor_times) and (tol_sorted[i]<autocorr_tol) and (tau_sorted[i]>1.0)):\n\t\t\t\t\t\t\t\tconv_bool = 'True'\n\t\t\t\t\t\t\telse: conv_bool = 'False'\n\t\t\t\t\t\t\tif (round(tau_sorted[i],1)>1.0):# & (tol[i]<autocorr_tol):\t\n\t\t\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}{2:<20.4f}{3:<20.4f}{4:<20}'.format(pnames_sorted[i],best_sorted[i],tau_sorted[i],tol_sorted[i],conv_bool))\n\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}{2:<20}{3:<20}{4:<20}'.format(pnames_sorted[i],best_sorted[i],' -------- ',' -------- ',' -------- '))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------')\n\t\t\t\t\n\t\t\t# If convergence for ALL autocorrelation times \n\t\t\tif (auto_stop==True) & (conv_type == 'all'):\n\t\t\t\tif ( all( (x==1.0) for x in tau) ) and (stop_iter == orig_max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\nIteration = %d' % (k+1))\n\t\t\t\t\t\tprint('-------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('- Not enough iterations for any autocorrelation times!')\n\t\t\t\telif all( ((k+1)>(x * ncor_times)) for x in tau) and all( (x>1.0) for x in tau) and all(y<autocorr_tol for y in tol) and (stop_iter == max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\n ---------------------------------------------')\n\t\t\t\t\t\tprint(' | Converged at %d iterations.\t\t\t | ' % (k+1))\n\t\t\t\t\t\tprint(' | Performing %d iterations of sampling... | ' % min_samp )\n\t\t\t\t\t\tprint(' | Sampling will finish at %d iterations. | ' % ((k+1)+min_samp) )\n\t\t\t\t\t\tprint(' ---------------------------------------------')\n\t\t\t\t\tburn_in = (k+1)\n\t\t\t\t\tstop_iter = (k+1)+min_samp\n\t\t\t\t\tconv_tau = tau\n\t\t\t\t\tconverged = True\n\t\t\t\telif (any( ((k+1)<(x * ncor_times)) for x in tau) or any( (x==1.0) for x in tau) or any(y>autocorr_tol for y in tol)) and (stop_iter < orig_max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\n Iteration = %d' % (k+1))\n\t\t\t\t\t\tprint('-------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('- Jumped out of convergence! Resetting convergence criteria...')\n\t\t\t\t\t\t# Reset convergence criteria\n\t\t\t\t\t\tprint('- Resetting burn_in = %d' % orig_burn_in)\n\t\t\t\t\t\tprint('- Resetting max_iter = %d' % orig_max_iter)\n\t\t\t\t\tburn_in = orig_burn_in\n\t\t\t\t\tstop_iter = orig_max_iter\n\t\t\t\t\tconverged = False\n\t\t\t\tif 1:\n\t\t\t\t\tpnames_sorted = param_names[i_sort]\n\t\t\t\t\ttau_sorted\t= tau[i_sort]\n\t\t\t\t\ttol_sorted\t= tol[i_sort]\n\t\t\t\t\tbest_sorted = np.array(best)[i_sort]\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('{0:<30}'.format('\\nIteration = %d' % (k+1)))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('{0:<30}{1:<20}{2:<20}{3:<25}{4:<20}{5:<20}'.format('Parameter','Current Value','Autocorr. Time','Target Autocorr. Time','Tolerance','Converged?'))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------------------------------------------')\n\t\t\t\t\t\tfor i in range(0,len(pnames_sorted),1):\n\t\t\t\t\t\t\tif (((k+1)>tau_sorted[i]*ncor_times) and (tol_sorted[i]<autocorr_tol) and (tau_sorted[i]>1.0) ):\n\t\t\t\t\t\t\t\tconv_bool = 'True'\n\t\t\t\t\t\t\telse: conv_bool = 'False'\n\t\t\t\t\t\t\tif (round(tau_sorted[i],1)>1.0):# & (tol[i]<autocorr_tol):\n\t\t\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}{2:<20.4f}{3:<25.4f}{4:<20.4f}{5:<20}'.format(pnames_sorted[i],best_sorted[i],tau_sorted[i],tau_sorted[i]*ncor_times,tol_sorted[i],str(conv_bool)))\n\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}{2:<20}{3:<25}{4:<20}{5:<20}'.format(pnames_sorted[i],best_sorted[i],' -------- ',' -------- ',' -------- ',' -------- '))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------------------------------------------')\n\n\t\t\t# If convergence for a specific set of parameters\n\t\t\tif (auto_stop==True) & (isinstance(conv_type,tuple)==True):\n\t\t\t\t# Get indices of parameters for which we want to converge; these will be the only ones we care about\n\t\t\t\tpar_ind = np.array([i for i, item in enumerate(param_names) if item in set(conv_type)])\n\t\t\t\t# Get list of parameters, autocorrelation times, and tolerances for the ones we care about\n\t\t\t\tparam_interest = param_names[par_ind]\n\t\t\t\ttau_interest = tau[par_ind]\n\t\t\t\ttol_interest = tol[par_ind]\n\t\t\t\tbest_interest = np.array(best)[par_ind]\n\t\t\t\t# New sort for selected parameters\n\t\t\t\ti_sort = np.argsort(param_interest) # this array gives the ordered indices of parameter names (alphabetical)\n\t\t\t\tif ( all( (x==1.0) for x in tau_interest) ) and (stop_iter == orig_max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\nIteration = %d' % (k+1))\n\t\t\t\t\t\tprint('-------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('- Not enough iterations for any autocorrelation times!')\n\t\t\t\telif all( ((k+1)>(x * ncor_times)) for x in tau_interest) and all( (x>1.0) for x in tau_interest) and all(y<autocorr_tol for y in tol_interest) and (stop_iter == max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\n ---------------------------------------------')\n\t\t\t\t\t\tprint(' | Converged at %d iterations.\t\t\t | ' % (k+1))\n\t\t\t\t\t\tprint(' | Performing %d iterations of sampling... | ' % min_samp )\n\t\t\t\t\t\tprint(' | Sampling will finish at %d iterations. | ' % ((k+1)+min_samp) )\n\t\t\t\t\t\tprint(' ---------------------------------------------')\n\t\t\t\t\tburn_in = (k+1)\n\t\t\t\t\tstop_iter = (k+1)+min_samp\n\t\t\t\t\tconv_tau = tau\n\t\t\t\t\tconverged = True\n\t\t\t\telif (any( ((k+1)<(x * ncor_times)) for x in tau_interest) or any( (x==1.0) for x in tau_interest) or any(y>autocorr_tol for y in tol_interest)) and (stop_iter < orig_max_iter):\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('\\n Iteration = %d' % (k+1))\n\t\t\t\t\t\tprint('-------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('- Jumped out of convergence! Resetting convergence criteria...')\n\t\t\t\t\t\t# Reset convergence criteria\n\t\t\t\t\t\tprint('- Resetting burn_in = %d' % orig_burn_in)\n\t\t\t\t\t\tprint('- Resetting max_iter = %d' % orig_max_iter)\n\t\t\t\t\tburn_in = orig_burn_in\n\t\t\t\t\tstop_iter = orig_max_iter\n\t\t\t\t\tconverged = False\n\t\t\t\tif 1:\n\t\t\t\t\tpnames_sorted = param_interest[i_sort]\n\t\t\t\t\ttau_sorted\t= tau_interest[i_sort]\n\t\t\t\t\ttol_sorted\t= tol_interest[i_sort]\n\t\t\t\t\tbest_sorted = np.array(best_interest)[i_sort]\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint('{0:<30}'.format('\\nIteration = %d' % (k+1)))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------------------------------------------')\n\t\t\t\t\t\tprint('{0:<30}{1:<20}{2:<20}{3:<25}{4:<20}{5:<20}'.format('Parameter','Current Value','Autocorr. Time','Target Autocorr. Time','Tolerance','Converged?'))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------------------------------------------')\n\t\t\t\t\t\tfor i in range(0,len(pnames_sorted),1):\n\t\t\t\t\t\t\tif (((k+1)>tau_sorted[i]*ncor_times) and (tol_sorted[i]<autocorr_tol) and (tau_sorted[i]>1.0) ):\n\t\t\t\t\t\t\t\tconv_bool = 'True'\n\t\t\t\t\t\t\telse: conv_bool = 'False'\n\t\t\t\t\t\t\tif (round(tau_sorted[i],1)>1.0):# & (tol[i]<autocorr_tol):\n\t\t\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}{2:<20.4f}{3:<25.4f}{4:<20.4f}{5:<20}'.format(pnames_sorted[i],best_sorted[i],tau_sorted[i],tau_sorted[i]*ncor_times,tol_sorted[i],str(conv_bool)))\n\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}{2:<20}{3:<25}{4:<20}{5:<20}'.format(pnames_sorted[i],best_sorted[i],' -------- ',' -------- ',' -------- ',' -------- '))\n\t\t\t\t\t\tprint('--------------------------------------------------------------------------------------------------------------------------------------------')\n\n\t\t\t# Stop\n\t\t\tif ((k+1) == stop_iter):\n\t\t\t\tbreak\n\n\t\t\told_tau = tau\t\n\n\t\t# If auto_stop=False, simply print out the parameters and their best values at that iteration\n\t\tif ((k+1) % write_iter == 0) and ((k+1)>=min_iter) and ((k+1)>=write_thresh) and (auto_stop==False):\n\t\t\tpnames_sorted = param_names[i_sort]\n\t\t\tbest_sorted = np.array(best)[i_sort]\n\t\t\tif print_output:\n\t\t\t\tprint('{0:<30}'.format('\\nIteration = %d' % (k+1)))\n\t\t\t\tprint('------------------------------------------------')\n\t\t\t\tprint('{0:<30}{1:<20}'.format('Parameter','Current Value'))\n\t\t\t\tprint('------------------------------------------------')\n\t\t\t\tfor i in range(0,len(pnames_sorted),1):\n\t\t\t\t\t\tprint('{0:<30}{1:<20.4f}'.format(pnames_sorted[i],best_sorted[i]))\n\t\t\t\tprint('------------------------------------------------')\n\n\telap_time = (time.time() - start_time)\t \n\trun_time = time_convert(elap_time)\n\tif print_output:\n\t\tprint(\"\\n emcee Runtime = %s. \\n\" % (run_time))\n\n\t# Write to log file\n\tif (auto_stop==True):\n\t\t# Write autocorrelation chain to log \n\t\t# np.save(run_dir+'/log/autocorr_times_all',autocorr_times_all)\n\t\t# np.save(run_dir+'/log/autocorr_tols_all',autocorr_tols_all)\n\t\t# Create a dictionary with parameter names as keys, and contains\n\t\t# the autocorrelation times and tolerances for each parameter\n\t\tautocorr_times_all = np.stack(autocorr_times_all,axis=1)\n\t\tautocorr_tols_all = np.stack(autocorr_tols_all,axis=1)\n\t\tautocorr_dict = {}\n\t\tfor k in range(0,len(param_names),1):\n\t\t\tif (np.shape(autocorr_times_all)[0] > 1):\n\t\t\t\tautocorr_dict[param_names[k]] = {'tau':autocorr_times_all[k],\n\t\t\t\t\t\t\t\t\t\t\t \t 'tol':autocorr_tols_all[k]} \n\t\tnp.save(run_dir+'/log/autocorr_dict.npy',autocorr_dict)\n\n\n\t\tif (converged == True):\n\t\t\twrite_log((burn_in,stop_iter,param_names,conv_tau,autocorr_tol,tol,ncor_times),'autocorr_results',run_dir)\n\t\telif (converged == False):\n\t\t\tunconv_tol = (np.abs((old_tau) - (tau)) / (tau))\n\t\t\twrite_log((burn_in,stop_iter,param_names,tau,autocorr_tol,unconv_tol,ncor_times),'autocorr_results',run_dir)\n\twrite_log(run_time,'emcee_time',run_dir) \n\n\t# Remove excess zeros from sampler chain if emcee converged on a solution\n\t# in fewer iterations than max_iter\n\t# Remove zeros from all chains\n\ta = [] # the zero-trimmed sampler.chain\n\tfor p in range(0,np.shape(sampler.chain)[2],1):\n\t\tc = sampler.chain[:,:,p]\n\t\tc_trimmed = [np.delete(c[i,:],np.argwhere(c[i,:]==0)) for i in range(np.shape(c)[0])] # delete any occurence of zero \n\t\ta.append(c_trimmed)\n\ta = np.swapaxes(a,1,0) \n\ta = np.swapaxes(a,2,1)\n\n\t# Collect garbage\n\tdel lnprob_args\n\tif (auto_stop==True):\n\t\tdel tau\n\t\tdel tol\n\tgc.collect()\n\n\treturn a, burn_in, sampler.blobs",
"def MCMC_setup():\n # set seed\n set_random_seed(parameter_dict[\"Seed\"])\n # create output folder if not existent\n if not os.path.isdir(parameter_dict[\"Output\"]):\n os.mkdir(parameter_dict[\"Output\"])\n # check if data file exists\n if not os.path.exists(parameter_dict[\"Data\"]):\n raise Exception(\"Data file not found \" + parameter_dict[\"Data\"])\n # write information about MCMC run\n write_settings_file(parameter_dict[\"Output\"] + \"settings.log\", time=now)\n\n # if the header in the data file is not the default one, use these names for the respective columns\n header = [parameter_dict[\"lang_col\"],\n parameter_dict[\"concept_col\"],\n parameter_dict[\"transcription_col\"],\n parameter_dict[\"cognate_class_col\"]]\n # process the data\n if parameter_dict[\"folder\"] is not None:\n pre_tree, sm_d, tr_d = read_state_from_file(parameter_dict[\"folder\"])\n else:\n pre_tree = None\n sm_d = None\n tr_d = None\n\n # process the data\n data = data_creation(data_file=parameter_dict[\"Data\"],\n header=header,\n sound_model=parameter_dict[\"Sound Model\"],\n cc_sample=parameter_dict[\"sample_cognates\"],\n ldn=parameter_dict[\"ldn\"],\n diag=parameter_dict[\"bottom-up\"],\n check_consistency=parameter_dict[\"consistency_checker\"],\n pre_tree=pre_tree)\n MCMC_mod = MCMC.MCMC.create_mcmc(data=data, parameters=parameter_dict, tr_params=tr_d, em_params=sm_d)\n # set up the MCMC\n\n return MCMC_mod",
"def do_mcmc(self, nsamp, do_propMH=True, prog=True, do_lockstep=True):\n # Initialize all models\n for model in self.model_list:\n model.params.lp.set_val(model.logPost())\n\n # MCMC loop\n for _ in tqdm(range(nsamp), desc='MCMC sampling', mininterval=0.5, disable=not(prog)):\n\n ### The usual: sample all non-hierarchical model parameters\n for model in self.model_list:\n model.mcmc_step(do_propMH)\n\n ### Sampling hierarchical parameters\n for hi in range(self.n_hier):\n theta_inds = self.hier_theta_inds[hi, :]\n n_models = theta_inds.shape[0]\n mu_param = self.hier_mu[hi]\n lam_param = self.hier_lambda[hi]\n self.mcmc_step_mulam(theta_inds, mu_param)\n self.mcmc_step_mulam(theta_inds, lam_param)\n\n if do_lockstep:\n ## Lockstep update\n arr_ind = np.unravel_index(0, self.hier_delta[hi].val_shape, order='F')\n delta_cand = self.hier_delta[hi].mcmc.draw_candidate(arr_ind, False)\n mu_cand = delta_cand + mu_param.val[0, 0].copy()\n # check in bounds for mu; check in bounds for theta (TODO other constraints...)\n inb = mu_param.prior.is_in_bounds(mu_cand)\n if inb:\n for mi in range(n_models):\n if theta_inds[mi] > -1:\n theta_param = self.model_list[mi].params.theta\n inb = inb and (mu_cand > theta_param.prior.bounds[0] and mu_cand < theta_param.prior.bounds[1])\n # Check if new thetas will be in bounds, too, and don't continue if not\n tv = theta_param.val[0, theta_inds[mi]]\n inb = inb and (tv + delta_cand > theta_param.prior.bounds[0] and tv + delta_cand < theta_param.prior.bounds[1])\n # If in bounds, evaluate draw to decide whether or not to accept\n if inb:\n # Store old/current log prior and prior params for thetas in case reject,\n # put modified mu cand into priors and thetas\n old_prior = []\n old_prior_params = []\n old_lik = []\n for mi in range(n_models):\n if theta_inds[mi] > -1:\n old_prior_params.append(copy.deepcopy(self.model_list[mi].params.theta.prior.params))\n old_lik.append(self.model_list[mi].logLik('theta'))\n #old_prior.append(self.model_list[mi].log_prior())\n self.model_list[mi].params.theta.prior.params[0][0, theta_inds[mi]] = mu_cand\n self.model_list[mi].params.theta.refVal[0, theta_inds[mi]] = self.model_list[mi].params.theta.val[0, theta_inds[mi]].copy()\n self.model_list[mi].params.theta.val[0, theta_inds[mi]] = self.model_list[mi].params.theta.val[0, theta_inds[mi]].copy() + delta_cand\n old_prior.append(self.hier_mu[hi].prior.compute_log_prior())\n # Put mu candidate into place\n mu_param.refVal = mu_param.val.copy()\n mu_param.val[0, 0] = mu_cand\n # Compute new prior/lik\n new_prior = []\n new_lik = []\n for mi in range(n_models):\n if theta_inds[mi] > -1:\n #new_prior.append(self.model_list[mi].log_prior())\n new_lik.append(self.model_list[mi].logLik('theta'))\n new_prior.append(self.hier_mu[hi].prior.compute_log_prior())\n # Calculate acceptance\n if np.log(np.random.uniform()) < (sum(new_prior) + sum(new_lik) - sum(old_prior) - sum(old_lik)):\n # Accept: most of work is done, update each model's logpost and update recorded mcmc draw\n for mi in range(n_models):\n if theta_inds[mi] > -1:\n # TODO: do we store loglik separately? (so don't need to do whole loglik again?)\n self.model_list[mi].params.lp.val = self.model_list[mi].logPost('theta')\n # Have to overwrite already recorded sample for theta\n self.model_list[mi].params.theta.mcmc.draws[_][0, theta_inds[mi]] = self.model_list[mi].params.theta.val[0, theta_inds[mi]].copy()\n else:\n # Reject: need to put things back\n mu_param.val = mu_param.refVal.copy()\n for mi in range(n_models):\n if theta_inds[mi] > -1:\n self.model_list[mi].params.theta.prior.params = old_prior_params[mi]\n self.model_list[mi].params.theta.val[0, theta_inds[mi]] = self.model_list[mi].params.theta.refVal[0, theta_inds[mi]].copy()\n\n # Record hierarchical model draws\n self.hier_mu[hi].mcmc.record()\n self.hier_lambda[hi].mcmc.record()\n self.hier_delta[hi].mcmc.record()\n # Recalculate and record logPost into each model\n for model in self.model_list:\n lp_tmp = model.logPost()\n model.params.lp.set_val(lp_tmp)\n model.params.lp.mcmc.draws[_] = lp_tmp",
"def _setup_mcmc(model, n_chains, *, init_position=None, seed=None, **pins):\n pinned_model = model.experimental_pin(**pins) if pins else model\n bijector, step_bijector = _get_flat_unconstraining_bijector(pinned_model)\n\n if init_position is None:\n raw_init_dist = initialization.init_near_unconstrained_zero(pinned_model)\n init_position = initialization.retry_init(\n raw_init_dist.sample,\n target_fn=pinned_model.unnormalized_log_prob,\n sample_shape=n_chains,\n seed=seed)\n\n initial_transformed_position = tf.nest.map_structure(\n tf.identity, bijector.forward(init_position))\n\n batch_shape = pinned_model.batch_shape\n if tf.nest.is_nested(batch_shape):\n batch_shape = functools.reduce(tf.broadcast_static_shape,\n tf.nest.flatten(batch_shape))\n\n if not tensorshape_util.is_fully_defined(batch_shape):\n batch_shape = pinned_model.batch_shape_tensor()\n if tf.nest.is_nested(batch_shape):\n batch_shape = functools.reduce(tf.broadcast_dynamic_shape,\n tf.nest.flatten(batch_shape))\n\n # This tf.function is not redundant with the ones on _fast_window\n # and _slow_window because the various kernels (like HMC) may invoke\n # `target_log_prob_fn` multiple times within one window.\n @tf.function(autograph=False)\n def target_log_prob_fn(*args):\n lp = pinned_model.unnormalized_log_prob(bijector.inverse(args))\n ldj = bijector.inverse_log_det_jacobian(\n args, event_ndims=[1 for _ in initial_transformed_position])\n return lp + ldj\n\n def step_broadcast(step_size):\n # Only apply the bijector to nested step sizes or non-scalar batches.\n if tf.nest.is_nested(step_size):\n return step_bijector(\n nest_util.broadcast_structure(pinned_model.event_shape_tensor(),\n step_size))\n else:\n return step_size\n\n shard_axis_names = pinned_model.experimental_shard_axis_names\n if any(tf.nest.flatten(shard_axis_names)):\n shard_axis_names = nest.flatten_up_to(\n initial_transformed_position,\n list(pinned_model._model_flatten(shard_axis_names))) # pylint: disable=protected-access\n\n else:\n # No active shard axis names\n shard_axis_names = None\n\n return (target_log_prob_fn,\n initial_transformed_position,\n bijector,\n step_broadcast,\n ps.convert_to_shape_tensor(batch_shape, name='batch_shape'),\n shard_axis_names)",
"def do_sampling(args, phot, sampler_type=\"ensemble\"):\n p0 = phot.model.get_params()\n print \"Starting params:\", p0\n nvars = len(p0)\n\n if sampler_type == \"ensemble\":\n p0 = emcee.utils.sample_ball(p0, np.ones_like(p0) * 0.1, args.nwalkers)\n sampler = emcee.EnsembleSampler(args.nwalkers,\n nvars,\n phot,\n threads=args.nthreads)\n elif sampler_type == \"parallel\":\n # p0 = np.random.uniform(low=1., high=5.0, size=(args.ntemps, args.nwalkers, nvars))\n p0 = walker_temp_ball(p0, 0.1, args.ntemps, args.nwalkers)\n phot_prior = PhotometryPrior()\n phot.use_prior = False\n sampler = emcee.PTSampler(args.ntemps,\n args.nwalkers,\n nvars, \n phot,\n phot_prior)\n else:\n raise KeyError(\"Unsupported sampler type\")\n\n nburn = max([1,args.nburn])\n logging.info(\"Burning with {:d} steps\".format(nburn))\n if sampler_type == \"ensemble\":\n pp, lnp, rstate = sampler.run_mcmc(p0, nburn)\n sampler.reset()\n pps = []\n lnps = []\n lnpriors = []\n logging.info(\"Running Ensemble sampler\")\n for i in range(args.nsamples):\n if np.mod(i+1, 10) == 0:\n print \"\\tStep {:d} / {:d}, lnp: {:5.4g}\".format(i+1, args.nsamples,\n np.mean(pp))\n pp, lnp, rstate = sampler.run_mcmc(pp, 1, lnprob0=lnp, rstate0=rstate)\n if not args.quiet:\n print i, np.mean(lnp)\n print np.mean(pp, axis=0)\n print np.std(pp, axis=0)\n lnprior = np.array([phot.lnprior(p) for p in pp])\n pps.append(np.column_stack((pp.copy(), lnprior)))\n lnps.append(lnp.copy())\n elif sampler_type == \"parallel\":\n # burn-in\n for p, lnprob, lnlike in sampler.sample(p0, iterations=nburn):\n pass\n sampler.reset()\n logging.info(\"Running Parallel Tempering sampler\")\n lnps = np.zeros((args.nsamples, args.nwalkers))\n isamp = 0\n for p, lnprob, lnlike in sampler.sample(p, \n lnprob0=lnprob,\n lnlike0=lnlike,\n iterations=args.nsamples, \n thin=1):\n lnps[isamp, :] = lnprob[0,...]\n isamp += 1\n logging.debug(\"Finished Parallel Tempering sampler\")\n print sampler.chain.shape\n # pps = sampler.chain[0, ...] ## Return only zero-temp samples\n pps = np.zeros((args.nsamples, args.nwalkers, nvars+1), dtype=np.float64)\n for isamp in xrange(args.nsamples):\n pps[isamp, :, 0:nvars] = sampler.chain[0, :, isamp, :]\n print \"lnps:\", np.array(lnps).shape\n else:\n raise KeyError(\"Unsupported sampler type\")\n return np.array(pps), np.array(lnps)",
"def mcmc(x0,loglikelihood,logprior,stepsize,nSim):\n X=np.zeros(int(nSim))\n\n logPrior=logprior(x0)\n logL=loglikelihood(x0)\n X[0]=x0\n sim = np.random.normal(0,1,nSim-1)\n\n for ii in range(1,nSim):\n x1=x0+sim[ii-1]*stepsize\n proposed_logprior=logprior(x1)\n proposed_logL=loglikelihood(x1)\n if np.log(np.random.uniform())<proposed_logprior-logPrior+proposed_logL-logL:\n x0=x1\n logL=proposed_logL\n logPrior=proposed_logprior\n X[ii]=x0\n\n return X",
"def test_posterior_fitting_univariate_mog(self):\n # set up conjugate Gamma prior\n gamma_prior = scipy.stats.gamma(a=2., scale=5.)\n # get data\n thetas, x = sample_poisson(gamma_prior, n_samples=100, sample_size=10)\n sx = calculate_stats_toy_examples(x)\n sx, norm = normalize(sx)\n\n # define a MoG model with n_params + 1 inputs: data dimensions plus model index\n model = UnivariateMogMDN()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\n trainer = Trainer(model, optimizer, verbose=True)\n\n loss_trace = trainer.train(sx, thetas, n_epochs=10, n_minibatch=10)",
"def do_sampling(self, savefile, nwalkers=100, burnin=600, nsamples = 300, while_loop=True, maxsample=200):\n #Limits\n if self.cstring is not None:\n #Say Gmu ranges from exp(-45) - exp(-14), LIGO merger rate between 0 and 100\n #and IMRI rate from 0 to 1.\n pr = np.array([10, 100, 0.1, 2])\n #Priors are assumed to be in the middle.\n cent = np.array([-40, 55, 0.05, 1])\n elif self.phase is not None:\n pr = np.array([2, 100, 0.1, 2, 0.05])\n cent = np.array([13, 100, 0.1, 1, 0.1])\n p0 = [cent+2*pr/16.*np.random.rand(len(pr))-pr/16. for _ in range(nwalkers)]\n lnk0 = np.array([self.lnlikelihood(pp) for pp in p0])\n assert np.all(np.isfinite(lnk0))\n emcee_sampler = emcee.EnsembleSampler(nwalkers, np.size(pr), self.lnlikelihood)\n pos, _, _ = emcee_sampler.run_mcmc(p0, burnin)\n #Check things are reasonable\n #print(emcee_sampler.acceptance_fraction)\n assert np.all(emcee_sampler.acceptance_fraction > 0.01)\n emcee_sampler.reset()\n emcee_sampler = emcee.EnsembleSampler(nwalkers, np.size(pr), self.lnlikelihood)\n pos, _, _ = emcee_sampler.run_mcmc(p0, burnin)\n #print(emcee_sampler.acceptance_fraction)\n emcee_sampler.reset()\n self.cur_results = emcee_sampler\n gr = 10.\n count = 0\n while np.any(gr > 1.01) and count < maxsample:\n emcee_sampler.run_mcmc(pos, nsamples)\n gr = gelman_rubin(emcee_sampler.chain)\n print(\"Total samples:\",nsamples,\" Gelman-Rubin: \",gr)\n np.savetxt(savefile+\".tmp\", emcee_sampler.flatchain)\n shutil.move(savefile+\".tmp\", savefile)\n count += 1\n if while_loop is False:\n break\n self.flatchain = emcee_sampler.flatchain\n return emcee_sampler",
"def my_sampling(dim, log_posterior, nbr_walkers=configs['nbr_walkers'], nbr_warmup=configs['nbr_warmup'], nbr_samples=configs['nbr_samples']):\n\tinitial_positions = np.random.rand(nbr_walkers, dim)\n\tinitial_positions /= np.expand_dims(np.sum(initial_positions, axis=1), axis=1) + np.random.rand(nbr_walkers, 1) # make sure sum(p_full) = 1\n\tsampler = ensembleSampler(nbr_walkers, dim, log_posterior, pool=Pool())\n\n\tpos, tr, pr = sampler.run_mcmc(initial_positions, nbr_warmup)\n\tsampler.reset()\n\tsampler.run_mcmc(pos, nbr_samples);\n\t\t\n\treturn sampler.flatchain",
"def postprocess_mcmc_chain(kwargs_result, samples, kwargs_model, fixed_lens_kwargs, fixed_ps_kwargs, fixed_src_light_kwargs, fixed_special_kwargs, kwargs_constraints, kwargs_fixed_lens_light=None, verbose=False, forward_modeling=False):\n param = Param(kwargs_model, fixed_lens_kwargs, kwargs_fixed_ps=fixed_ps_kwargs, kwargs_fixed_source=fixed_src_light_kwargs, kwargs_fixed_special=fixed_special_kwargs, kwargs_fixed_lens_light=kwargs_fixed_lens_light, kwargs_lens_init=kwargs_result['kwargs_lens'], **kwargs_constraints)\n if verbose:\n param.print_setting()\n n_samples = len(samples)\n processed = []\n for i in range(n_samples):\n kwargs = {}\n kwargs_out = param.args2kwargs(samples[i])\n kwargs_lens_out, kwargs_special_out, kwargs_ps_out, kwargs_source_out, kwargs_lens_light_out = kwargs_out['kwargs_lens'], kwargs_out['kwargs_special'], kwargs_out['kwargs_ps'], kwargs_out['kwargs_source'], kwargs_out['kwargs_lens_light']\n for k, v in kwargs_lens_out[0].items():\n kwargs['lens_mass_{:s}'.format(k)] = v\n for k, v in kwargs_lens_out[1].items():\n kwargs['external_shear_{:s}'.format(k)] = v\n if forward_modeling:\n for k, v in kwargs_source_out[0].items():\n kwargs['src_light_{:s}'.format(k)] = v\n for k, v in kwargs_lens_light_out[0].items():\n kwargs['lens_light_{:s}'.format(k)] = v\n else:\n kwargs['src_light_R_sersic'] = kwargs_source_out[0]['R_sersic']\n if 'ra_source' in kwargs_ps_out[0]:\n kwargs['src_light_center_x'] = kwargs_ps_out[0]['ra_source'] - kwargs_lens_out[0]['center_x']\n kwargs['src_light_center_y'] = kwargs_ps_out[0]['dec_source'] - kwargs_lens_out[0]['center_y']\n for k, v in kwargs_special_out.items():\n kwargs[k] = v\n processed.append(kwargs)\n processed_df = pd.DataFrame(processed)\n processed_df = metadata_utils.add_qphi_columns(processed_df)\n processed_df = metadata_utils.add_gamma_psi_ext_columns(processed_df)\n return processed_df",
"def run_MC_EM(self,\n max_iter=1000,\n tol=1e-4,\n num_burnin=None,\n num_results=None,\n num_leapfrog_steps=None,\n eps_sq=None):\n\n if num_burnin == None:\n num_burnin = self.num_burnin\n if num_results == None:\n num_results = self.num_results\n if num_leapfrog_steps == None:\n num_leapfrog_steps = self.num_leapfrog_steps\n if eps_sq == None:\n eps_sq = self.eps_sq\n\n var_out_chain = self.E_step()\n self.W, self.H, self.g_T = self.M_step(var_out_chain)\n\n # costs = []\n\n # for n in range(max_iter):\n # var_out_r_ds = self.train_ds.map(self.single_E_step)\n # self.W, self.H, self.g = self.single_M_step(var_out_r_ds)\n\n # costs.append(cost_Q(self.W, self.H, self.g, var_out, x_sq))\n\n # if n > 0 and abs(costs[n]-costs[n+1]) < tol:\n # print(\"MC_EM converged after {} steps!\".format(n+1))\n # break",
"def emceehammer(challenge,procs=10,suffix=None,ndim=None,nwalkers=200,iters=100,limit=None,inject=False,resume=False,checkpoint=None):\n\n global resid_f,cgw,alphaab,times_f,gmat,meta,cpn,error_f\n\n meta,desi,times_f,resid_f,error_f = load(challenge,limit=limit)\n alphaab = alphamat(meta)\n\n if inject == 'raw':\n print \"Loading clean data from raw challenge files\"\n resid_f = loadraw(challenge,limit=None)\n\n if ndim is None and challenge in ['closed1','closed2','closed3']:\n ndim = 2*len(meta) + 2\n\n with timing(\"Initial setup\"):\n cgw = Cgw_100ns(alphaab,times_f,alpha=-2.0/3.0,fL=1.0/500)\n cpn = Cpn(error_f)\n\n if challenge == 'open3':\n cpn = cpn + Cred_100ns(alphaab,times_f,A=5.77e-22,alpha=1.7,fL=1.0/500)\n\n if inject == 'inject':\n print \"Injecting synthetic signals at dataset times\"\n resid_f = simulate(alphaab,times_f,cgw,cpn,A=5e-14,n=1)\n\n if desi is None:\n gmat = Gproj(times_f,len(meta))\n else:\n print \"Using tempo2 design matrix\"\n gmat = Gdesi2(desi,meta) # gmat = Gdesi(desi,len(meta))\n\n resid_f = N.dot(gmat.T,resid_f)\n\n if ndim == 1: # otherwise the multiplication is done in logL\n cgw = blockmul(cgw,gmat,meta) # cgw = N.dot(gmat.T,N.dot(cgw,gmat))\n cpn = blockmul(cpn,gmat,meta) # cpn = N.dot(gmat.T,N.dot(cpn,gmat))\n\n if N.any(N.isnan(cgw.flatten())) or N.any(N.isinf(cgw.flatten())):\n raise ArithmeticError\n\n # multiprocessing seems to work better if nwalkers >> procs\n # also keep in mind that the ensemble is split in two...\n\n trueA, truealpha = 5e-14, -2.0/3.0\n trueAred, truealphared = 5.77e-22, 1.7\n\n if ndim == 1:\n # initial walker positions - a list of numpy arrays\n p0 = [random.uniform(trueA*0.5,trueA*1.5) for i in range(nwalkers)]\n\n sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob,args=[],threads=int(procs))\n elif ndim == 2:\n p0 = [[random.uniform(trueA*0.5,trueA*1.5),\n random.uniform(alpha_min,alpha_max)] for i in range(nwalkers)]\n\n sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob2,args=[],threads=int(procs))\n elif ndim == 4:\n p0 = [[random.uniform(trueA*0.5,trueA*1.5),\n random.uniform(alpha_min,alpha_max),\n random.uniform(trueAred*0.1,trueAred*10),\n random.uniform(alphared_min,alphared_max)] for i in range(nwalkers)]\n\n sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob4,args=[],threads=int(procs))\n elif ndim == 2*len(meta) + 2:\n p0 = [[random.uniform(trueA*0.5,trueA*1.5),random.uniform(alpha_min,alpha_max)] +\n [value for pulsar in meta\n for value in [random.uniform(math.log10(trueAred*0.1),math.log10(trueAred*10)),random.uniform(alphared_min,alphared_max)]]\n# for value in [random.uniform(trueAred*0.1,trueAred*10),random.uniform(alphared_min,alphared_max)]]\n for i in range(nwalkers)]\n\n sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob22Nlog,args=[],threads=int(procs))\n # sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob22N,args=[],threads=int(procs))\n elif ndim == 3*len(meta) + 2:\n p0 = [[random.uniform(trueA*0.5,trueA*1.5),random.uniform(alpha_min,alpha_max)] +\n [value for pulsar in meta\n for value in [random.uniform(trueAred*0.1,trueAred*10),\n random.uniform(alphared_min,alphared_max),\n random.uniform(log10_efac_min,log10_efac_max)]]\n for i in range(nwalkers)]\n\n sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob23N,args=[],threads=int(procs))\n\n suffix = (suffix + '-' + str(ndim)) if suffix else str(ndim)\n\n resumefile = '../runs/resume-{0}-{1}.npy'.format(challenge,suffix)\n chainfile = '../runs/chain-{0}-{1}.npy'.format(challenge,suffix)\n lnprobfile = '../runs/lnprob-{0}-{1}.npy'.format(challenge,suffix)\n\n if resume:\n p0 = N.load(resumefile)\n print \"Resuming run from file\", resumefile\n\n if checkpoint:\n for subrun in range(int(iters/checkpoint)):\n with timing(\"{0} x {1} samples (subrun {2})\".format(checkpoint,nwalkers,subrun)):\n sampler.run_mcmc(p0,checkpoint)\n\n p0 = sampler.chain[:,-1,:]\n N.save(resumefile,p0)\n\n N.save(chainfile, sampler.chain)\n N.save(lnprobfile,sampler.lnprobability)\n else:\n with timing(\"{0} x {1} samples\".format(iters,nwalkers)):\n sampler.run_mcmc(p0,iters)\n\n N.save(resumefile,sampler.chain[:,-1,:])\n\n N.save(chainfile, sampler.chain)\n N.save(lnprobfile,sampler.lnprobability)\n\n print \"Done! Mean acceptance fraction:\", N.mean(sampler.acceptance_fraction)\n\n # TO DO: broken in the emcee revision I'm using?\n # print \"Autocorrelation time:\", sampler.acor",
"def fit(self, init_params=None, update_params=True, **kwargs):\n if init_params is None:\n init_params = self.params\n\n self.fit_result = self._dofit(init_params, **kwargs)\n print(self.fit_result)\n\n if True or self.fit_result.success and update_params:\n for par, value in zip([p for p in init_params if init_params[p].vary], self.fit_result.x):\n self.params[par].value = value\n\n hess = self.fit_result.hess_inv(self.fit_result.x) if callable(self.fit_result.hess_inv) else np.diag(self.fit_result.hess_inv)\n\n # make sure we only get the finite parameter errors\n self.param_error = np.zeros(len(self.params))\n self.param_error[hess>0] = hess[hess>0] ** 0.5\n\n self.process_fit_results(self.fit_result, self.params)",
"def init_mp(theta_space, table, data, random_seed, N_MCMC_SAMPLE=3000, MCMC_GAUSSIAN_STD=0.1):\n\n # randomly draw a template mondrian process\n # sample = draw_informed_Mondrian(theta_space, table)\n # log_p_sample = comp_log_p_sample(sample, pooled_data) + \\\n # comp_log_p_prior(sample, table, [1 for _ in range(table.shape[1])])\n\n # for idx in xrange(n_mcmc_sample):\n # new_sample = Mondrian_Gaussian_perturbation(theta_space,sample, mcmc_gaussian_std)\n # # perform accept-reject step\n # new_log_p_sample = comp_log_p_sample(new_sample, data) + \\\n # comp_log_p_prior(new_sample, table, [1 for _ in range(table.shape[1])])\n\n # if new_log_p_sample >= log_p_sample or \\\n # np.log(np.random.uniform(low=0, high=1.)) <= new_log_p_sample - log_p_sample:\n # sample = new_sample\n # log_p_sample = new_log_p_sample\n return MP_mcmc(data, theta_space, table, random_seed, N_MCMC_SAMPLE=3000, MCMC_GAUSSIAN_STD=0.1)[-1]",
"def setup(dm, key='%s', data_list=None):\n vars = {}\n\n\n param_type = 'all-cause_mortality'\n data = [d for d in data_list if d['data_type'] == 'all-cause mortality data']\n m_all_cause = dm.mortality(key % param_type, data)\n\n covariate_dict = dm.get_covariates()\n X_region, X_study = rate_model.regional_covariates(key, covariate_dict)\n est_mesh = dm.get_estimate_age_mesh()\n\n # update age_weights on non-incidence/prevalence data to reflect\n # prior prevalence distribution, if available\n prior_prev = dm.get_mcmc('emp_prior_mean', key % 'prevalence')\n if len(prior_prev) > 0:\n for d in data:\n if d['data_type'].startswith('incidence') or d['data_type'].startswith('prevalence'):\n continue\n age_indices = indices_for_range(est_mesh, d['age_start'], d['age_end'])\n d['age_weights'] = prior_prev[age_indices]\n d['age_weights'] /= sum(d['age_weights']) # age weights must sum to 1 (optimization of inner loop removed check on this)\n \n\n for param_type in ['incidence', 'remission', 'excess-mortality']:\n data = [d for d in data_list if d['data_type'] == '%s data' % param_type]\n\n lower_bound_data = []\n # TODO: include lower bound data when appropriate\n \n prior_dict = dm.get_empirical_prior(param_type)\n if prior_dict == {}:\n prior_dict.update(alpha=np.zeros(len(X_region)),\n beta=np.zeros(len(X_study)),\n gamma=-5*np.ones(len(est_mesh)),\n sigma_alpha=[1.],\n sigma_beta=[1.],\n sigma_gamma=[10.],\n # delta is filled in from the global prior dict in neg_binom setup\n )\n vars[key % param_type] = rate_model.setup(dm, key % param_type, data,\n emp_prior=prior_dict, lower_bound_data=lower_bound_data)\n\n i = vars[key % 'incidence']['rate_stoch']\n r = vars[key % 'remission']['rate_stoch']\n f = vars[key % 'excess-mortality']['rate_stoch']\n\n # Initial population with condition\n logit_C_0 = mc.Normal('logit_%s' % (key % 'C_0'), -5., 10.**-2, value=-5.)\n @mc.deterministic(name=key % 'C_0')\n def C_0(logit_C_0=logit_C_0):\n return mc.invlogit(logit_C_0)\n \n # Initial population without condition\n @mc.deterministic(name=key % 'S_0')\n def SC_0(C_0=C_0):\n return np.array([1. - C_0, C_0]).ravel()\n vars[key % 'bins'] = {'initial': [SC_0, C_0, logit_C_0]}\n \n \n # iterative solution to difference equations to obtain bin sizes for all ages\n import scipy.linalg\n @mc.deterministic(name=key % 'bins')\n def SCpm(SC_0=SC_0, i=i, r=r, f=f, m_all_cause=m_all_cause, age_mesh=dm.get_param_age_mesh()):\n SC = np.zeros([2, len(age_mesh)])\n p = np.zeros(len(age_mesh))\n m = np.zeros(len(age_mesh))\n \n SC[:,0] = SC_0\n p[0] = SC_0[1] / (SC_0[0] + SC_0[1])\n m[0] = trim(m_all_cause[age_mesh[0]] - f[age_mesh[0]] * p[0], .1*m_all_cause[age_mesh[0]], 1-NEARLY_ZERO)\n\n for ii, a in enumerate(age_mesh[:-1]):\n A = np.array([[-i[a]-m[ii], r[a] ],\n [ i[a] , -r[a]-m[ii]-f[a]]]) * (age_mesh[ii+1] - age_mesh[ii])\n\n SC[:,ii+1] = np.dot(scipy.linalg.expm(A), SC[:,ii])\n \n p[ii+1] = trim(SC[1,ii+1] / (SC[0,ii+1] + SC[1,ii+1]), NEARLY_ZERO, 1-NEARLY_ZERO)\n m[ii+1] = trim(m_all_cause[age_mesh[ii+1]] - f[age_mesh[ii+1]] * p[ii+1], .1*m_all_cause[age_mesh[ii+1]], 1-NEARLY_ZERO)\n\n SCpm = np.zeros([4, len(age_mesh)])\n SCpm[0:2,:] = SC\n SCpm[2,:] = p\n SCpm[3,:] = m\n return SCpm\n\n vars[key % 'bins']['age > 0'] = [SCpm]\n\n \n # prevalence = # with condition / (# with condition + # without)\n @mc.deterministic(name=key % 'p')\n def p(SCpm=SCpm, param_mesh=dm.get_param_age_mesh(), est_mesh=dm.get_estimate_age_mesh()):\n return dismod3.utils.interpolate(param_mesh, SCpm[2,:], est_mesh)\n data = [d for d in data_list if d['data_type'] == 'prevalence data']\n prior_dict = dm.get_empirical_prior('prevalence')\n if prior_dict == {}:\n prior_dict.update(alpha=np.zeros(len(X_region)),\n beta=np.zeros(len(X_study)),\n gamma=-5*np.ones(len(est_mesh)),\n sigma_alpha=[1.],\n sigma_beta=[1.],\n sigma_gamma=[10.],\n # delta is filled in from the global prior dict in neg_binom setup\n )\n \n vars[key % 'prevalence'] = rate_model.setup(dm, key % 'prevalence', data, p, emp_prior=prior_dict)\n p = vars[key % 'prevalence']['rate_stoch'] # replace perfectly consistent p with version including level-bound priors\n \n # make a blank prior dict, to avoid weirdness\n blank_prior_dict = dict(alpha=np.zeros(len(X_region)),\n beta=np.zeros(len(X_study)),\n gamma=-5*np.ones(len(est_mesh)),\n sigma_alpha=[1.],\n sigma_beta=[1.],\n sigma_gamma=[10.],\n delta=100.,\n sigma_delta=1.\n )\n # cause-specific-mortality is a lower bound on p*f\n @mc.deterministic(name=key % 'pf')\n def pf(p=p, f=f):\n return (p+NEARLY_ZERO)*f\n # TODO: add a 'with-condition population mortality rate date' type\n # data = [d for d in data_list if d['data_type'] == 'with-condition population mortality rate data']\n data = []\n lower_bound_data = [d for d in data_list if d['data_type'] == 'cause-specific mortality data']\n vars[key % 'prevalence_x_excess-mortality'] = rate_model.setup(dm, key % 'pf', rate_stoch=pf, data_list=data, lower_bound_data=lower_bound_data, emp_prior=blank_prior_dict)\n \n\n # m = m_all_cause - f * p\n @mc.deterministic(name=key % 'm')\n def m(SCpm=SCpm, param_mesh=dm.get_param_age_mesh(), est_mesh=dm.get_estimate_age_mesh()):\n return dismod3.utils.interpolate(param_mesh, SCpm[3,:], est_mesh)\n vars[key % 'm'] = m\n\n # m_with = m + f\n @mc.deterministic(name=key % 'm_with')\n def m_with(m=m, f=f):\n return m + f\n data = [d for d in data_list if d['data_type'] == 'mortality data']\n # TODO: test this\n #prior_dict = dm.get_empirical_prior('excess-mortality') # TODO: make separate prior for with-condition mortality\n vars[key % 'mortality'] = rate_model.setup(dm, key % 'm_with', data, m_with, emp_prior=blank_prior_dict)\n\n # mortality rate ratio = mortality with condition / mortality without\n @mc.deterministic(name=key % 'RR')\n def RR(m=m, m_with=m_with):\n return m_with / (m + .0001)\n data = [d for d in data_list if d['data_type'] == 'relative-risk data']\n vars[key % 'relative-risk'] = log_normal_model.setup(dm, key % 'relative-risk', data, RR)\n \n # standardized mortality rate ratio = mortality with condition / all-cause mortality\n @mc.deterministic(name=key % 'SMR')\n def SMR(m_with=m_with, m_all_cause=m_all_cause):\n return m_with / (m_all_cause + .0001)\n data = [d for d in data_list if d['data_type'] == 'smr data']\n vars[key % 'smr'] = log_normal_model.setup(dm, key % 'smr', data, SMR)\n\n # duration = E[time in bin C]\n @mc.deterministic(name=key % 'X')\n def X(r=r, m=m, f=f):\n hazard = r + m + f\n pr_not_exit = np.exp(-hazard)\n X = np.empty(len(hazard))\n X[-1] = 1 / hazard[-1]\n for i in reversed(range(len(X)-1)):\n X[i] = pr_not_exit[i] * (X[i+1] + 1) + 1 / hazard[i] * (1 - pr_not_exit[i]) - pr_not_exit[i]\n return X\n data = [d for d in data_list if d['data_type'] == 'duration data']\n vars[key % 'duration'] = normal_model.setup(dm, key % 'duration', data, X)\n\n # YLD[a] = disability weight * i[a] * X[a] * regional_population[a]\n @mc.deterministic(name=key % 'i*X')\n def iX(i=i, X=X, p=p, pop=rate_model.regional_population(key)):\n return i * X * (1-p) * pop \n vars[key % 'incidence_x_duration'] = {'rate_stoch': iX}\n\n return vars",
"def plot_sfh_mcmc(name_sampler_fits, nchains=200, del_t=0.05, lbacktime_max=None, yrange=None, factor=1.0, loc_legend=2, fontsize_tick=18, \n\tfontsize_label=25, fontsize_legend=26, logscale_x=False, logscale_y=False, name_plot=None):\n\t\n\thdu = fits.open(name_sampler_fits)\n\theader_samplers = hdu[0].header\n\tdata_samplers = hdu[1].data\n\thdu.close()\n\n\tsfh_form = header_samplers['sfh_form']\n\tnsamplers = len(data_samplers['log_age'])\n\tfree_z = int(header_samplers['free_z'])\n\n\t# cosmology parameter\n\tcosmo = header_samplers['cosmo']\n\tH0 = float(header_samplers['H0'])\n\tOm0 = float(header_samplers['Om0'])\n\n\tif free_z == 0:\n\t\tgal_z = float(header_samplers['gal_z'])\n\t\tif cosmo == 'flat_LCDM':\n\t\t\tcosmo1 = FlatLambdaCDM(H0=H0, Om0=Om0)\n\t\t\tmax_lbt = cosmo1.age(gal_z).value\n\t\telif cosmo == 'WMAP5':\n\t\t\tmax_lbt = WMAP5.age(gal_z).value\n\t\telif cosmo == 'WMAP7':\n\t\t\tmax_lbt = WMAP7.age(gal_z).value\n\t\telif cosmo == 'WMAP9':\n\t\t\tmax_lbt = WMAP9.age(gal_z).value\n\t\telif cosmo == 'Planck13':\n\t\t\tmax_lbt = Planck13.age(gal_z).value\n\t\telif cosmo == 'Planck15':\n\t\t\tmax_lbt = Planck15.age(gal_z).value\n\t\t#elif cosmo == 'Planck18':\n\t\t#\tmax_lbt = Planck18.age(gal_z).value\n\telif free_z == 1:\n\t\tmax_z = max(data_samplers['z'])\n\t\tif cosmo == 'flat_LCDM':\n\t\t\tcosmo1 = FlatLambdaCDM(H0=H0, Om0=Om0)\n\t\t\tmax_lbt = cosmo1.age(max_z).value\n\t\telif cosmo == 'WMAP5':\n\t\t\tmax_lbt = WMAP5.age(max_z).value\n\t\telif cosmo == 'WMAP7':\n\t\t\tmax_lbt = WMAP7.age(max_z).value\n\t\telif cosmo == 'WMAP9':\n\t\t\tmax_lbt = WMAP9.age(max_z).value\n\t\telif cosmo == 'Planck13':\n\t\t\tmax_lbt = Planck13.age(max_z).value\n\t\telif cosmo == 'Planck15':\n\t\t\tmax_lbt = Planck15.age(max_z).value\n\t\t#elif cosmo == 'Planck18':\n\t\t#\tmax_lbt = Planck18.age(gal_z).value\n\n\tnt = int(max_lbt/del_t)\n\tgrid_lbt = np.linspace(0.0,max_lbt,nt)\n\tarray_sfr_at_lbt = np.zeros((nchains,nt))\n\n\t## exclude saturated samplers: log(SFR)~-29.99..\n\tidx_sel = np.where(data_samplers['log_sfr']>-29.0)\n\trand_idx = np.random.uniform(0,len(idx_sel[0]),nchains)\n\n\tfor ii in range(0,nchains):\n\t\tidx = idx_sel[0][int(rand_idx[ii])]\n\n\t\tif free_z == 1:\n\t\t\tgal_z = data_samplers['z'][idx]\n\t\t\tif cosmo == 'flat_LCDM':\n\t\t\t\tcosmo1 = FlatLambdaCDM(H0=H0, Om0=Om0)\n\t\t\t\tmax_lbt1 = cosmo1.age(gal_z).value\n\t\t\telif cosmo == 'WMAP5':\n\t\t\t\tmax_lbt1 = WMAP5.age(gal_z).value\n\t\t\telif cosmo == 'WMAP7':\n\t\t\t\tmax_lbt1 = WMAP7.age(gal_z).value\n\t\t\telif cosmo == 'WMAP9':\n\t\t\t\tmax_lbt1 = WMAP9.age(gal_z).value\n\t\t\telif cosmo == 'Planck13':\n\t\t\t\tmax_lbt1 = Planck13.age(gal_z).value\n\t\t\telif cosmo == 'Planck15':\n\t\t\t\tmax_lbt1 = Planck15.age(gal_z).value\n\t\telif free_z == 0:\n\t\t\tmax_lbt1 = max_lbt\n\n\t\tage = pow(10.0,data_samplers['log_age'][idx])\n\t\ttau = pow(10.0,data_samplers['log_tau'][idx])\n\t\tt0 = 0.0\n\t\talpha = 0.0\n\t\tbeta = 0.0\n\t\tif sfh_form==2 or sfh_form==3:\n\t\t\tt0 = pow(10.0,data_samplers['log_t0'][idx])\n\t\tif sfh_form==4:\n\t\t\talpha = pow(10.0,data_samplers['log_alpha'][idx])\n\t\t\tbeta = pow(10.0,data_samplers['log_beta'][idx])\n\n\t\tformed_mass = pow(10.0,data_samplers['log_mass'][idx])\n\n\t\tt,SFR_t = construct_SFH(sfh_form=sfh_form,t0=t0,tau=tau,alpha=alpha,beta=beta,age=age,formed_mass=formed_mass)\n\t\tt_back = np.abs(t - age)\n\n\t\tarray_sfr_at_lbt[ii] = np.interp(grid_lbt,t_back[::-1],SFR_t[::-1],left=0,right=0)*factor\n\n\tarray_sfr_at_lbt_trans = np.transpose(array_sfr_at_lbt, axes=(1,0))\n\tgrid_sfr_p16 = np.percentile(array_sfr_at_lbt_trans, 16, axis=1)\n\tgrid_sfr_p50 = np.percentile(array_sfr_at_lbt_trans, 50, axis=1)\n\tgrid_sfr_p84 = np.percentile(array_sfr_at_lbt_trans, 84, axis=1)\n\n\t# plotting\n\tfig = plt.figure(figsize=(8,5))\n\tf1 = plt.subplot()\n\tif logscale_y == True:\n\t\tf1.set_yscale('log')\n\tif logscale_x == True:\n\t\tf1.set_xscale('log')\n\tplt.setp(f1.get_xticklabels(), fontsize=int(fontsize_tick))\n\tplt.setp(f1.get_yticklabels(), fontsize=int(fontsize_tick))\n\tplt.tick_params(axis='y', which='both', right='on')\n\tplt.tick_params(axis='x', which='both', top='on')\n\tplt.xlabel('Look back time [Gyr]', fontsize=int(fontsize_label))\n\tplt.ylabel(r'SFR[$M_{\\odot}yr^{-1}$]', fontsize=int(fontsize_label))\n\n\tf1.fill_between(grid_lbt, grid_sfr_p16, grid_sfr_p84, color='gray', alpha=0.5)\n\tplt.plot(grid_lbt,grid_sfr_p50,lw=4,color='black')\n\n\t# xrange:\n\tif lbacktime_max == None:\n\t\txmax = max_lbt\n\t\tplt.xlim(xmax,0)\n\telif lbacktime_max != None:\n\t\txmax = lbacktime_max\n\t\tplt.xlim(xmax,0)\n\n\t# yrange:\n\tif yrange == None:\n\t\tmaxSFR = max(grid_sfr_p84)\n\t\tplt.ylim(0,maxSFR*1.2)\n\tif yrange != None:\n\t\tplt.ylim(yrange[0],yrange[1])\n\n\tplt.subplots_adjust(left=0.15, right=0.95, bottom=0.15, top=0.95)\n\n\tif name_plot==None:\n\t\tname_sampler_fits1 = name_sampler_fits.replace('.fits','')\n\t\tname_plot = \"sfh_%s.png\" % (name_sampler_fits1)\n\t\n\tplt.savefig(name_plot)\n\n\treturn name_plot, grid_lbt, grid_sfr_p16, grid_sfr_p50, grid_sfr_p84",
"def run_em(data_mu, data_sigma, num_iter=100, N=1000, M=3):\n\t# Generate the data\n\tpz = [1.0/M]*M\n\tmu = [np.asarray([x]) for x in data_mu]\n\tsigma = [np.asarray([x]).reshape(1, 1) for x in data_sigma]\n\tgmm = GMM(pz=pz, muks=mu, sigmaks=sigma, k=M, d=1)\n\tx, _ = gmm.generate_points(n=N)\n\tx = x.reshape((-1,))\n\n\t# Run the EM algorithm\n\tem = EM(x, M, N)\n\tcosts = []\n\tfor i in range(num_iter):\n\t\tpi, mu, sigma, cost = em.train()\n\t\tcosts.append(cost)\n\t\n\t# Print results\n\tprint 'Mu:', mu\n\tprint 'Sigma:', sigma\n\t\n\t# Plot cost vs iterations\n\tplt.plot(costs)\n\tplt.title('Iteration vs Cost')\n\tplt.xlabel('Iterations')\n\tplt.ylabel('Cost')\n\tplt.show()\n\t\n\t# Show the histogram, true distribution and estimated distribution\n\tplt.hist(x, normed=True, color='#cccccc')\n\tdef estimated_dist(x):\n\t\tN = np.shape(x)[0]\n\t\tx_stacked = np.stack([x]*M, axis=1)\n\t\tmu_stacked = np.stack([mu]*N, axis=0)\n\t\tsigma_stacked = np.stack([sigma]*N, axis=0)\n\t\tx_given_z = np.exp(-0.5 * np.power((x_stacked - mu_stacked) / sigma_stacked, 2))\n\t\tx_given_z = x_given_z * np.stack([1.0 / np.sqrt(2 * np.pi * sigma)]*N, axis=0)\n\t\tpz = np.mean(pi / np.stack([np.sum(pi, axis=1)]*M, axis=1), axis=0)\n\t\tpx = np.multiply(x_given_z, np.stack([pz]*N, axis=0))\n\t\treturn np.sum(px, axis=1)\n\tdef true_dist(x):\n\t\tN = np.shape(x)[0]\n\t\tx_stacked = np.stack([x]*M, axis=1)\n\t\tmu_stacked = np.stack([np.asarray(data_mu)]*N, axis=0)\n\t\tsigma_stacked = np.stack([np.asarray(data_sigma)]*N, axis=0)\n\t\tx_given_z = np.exp(-0.5 * np.power((x_stacked - mu_stacked) / sigma_stacked, 2))\n\t\tx_given_z = x_given_z * np.stack([1.0 / np.sqrt(2 * np.pi * np.asarray(data_sigma))]*N, axis=0)\n\t\tprz = np.asarray(pz)\n\t\tpx = np.multiply(x_given_z, np.stack([prz]*N, axis=0))\n\t\treturn np.sum(px, axis=1)\n\t\t\n\tx_axis = np.arange(np.min(x)-0.5, np.max(x)+0.5, 0.01)\n\tplt.plot(x_axis, true_dist(x_axis), 'ro', label='True Distribution')\n\tplt.plot(x_axis, estimated_dist(x_axis), 'co', label='Estimated Distribution')\n\tplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\tplt.show()",
"def _do_mh_walkers(pipeline_obj, p0_walkers, req_n_walkers):\n\n # Make abbreviation for pipeline_obj\n pipe = pipeline_obj\n\n # Define function to check if proposed sam_set should be accepted\n def advance_chain(sam_set):\n # Make sure that sam_set is 2D\n sam_set = np_array(sam_set, ndmin=2)\n\n # Check if sam_set is within parameter space and reject if not\n par_rng = pipe._modellink._par_rng\n accept = ((par_rng[:, 0] <= sam_set)*(sam_set <= par_rng[:, 1])).all(1)\n\n # Evaluate all non-rejected samples and accept if plausible\n emul_i = pipe._emulator._emul_i\n accept[accept] = pipe._make_call('_evaluate_sam_set', emul_i,\n sam_set[accept], 'project')[0]\n\n # Return which samples should be accepted or rejected\n return(accept)\n\n # Initialize array of final walkers\n n_walkers = p0_walkers.shape[0]\n walkers = np.empty([req_n_walkers+n_walkers-1, pipe._modellink._n_par])\n walkers[:n_walkers] = p0_walkers\n\n # Check if logging is currently turned on\n was_logging = bool(pipe.do_logging)\n\n # Make sure that logging is turned off\n pipe.do_logging = False\n\n # Use worker mode\n with pipe.worker_mode:\n if pipe._is_controller:\n # Initialize progress bar\n pbar = tqdm(desc=\"Finding walkers\", total=req_n_walkers,\n initial=n_walkers, disable=not was_logging,\n bar_format=(\"{l_bar}{bar}| {n_fmt}/{total_fmt} \"\n \"[Time: {elapsed}]\"))\n\n # Keep searching parameter space until req_n_walkers are found\n while(n_walkers < req_n_walkers):\n # Calculate the covariance matrix of all walkers\n cov = np.cov(walkers[:n_walkers].T)\n\n # Create set of proposed walkers\n new_walkers = np.apply_along_axis(multivariate_normal, 1,\n p0_walkers, cov)\n\n # Check which proposed walkers should be accepted\n accept = advance_chain(new_walkers)\n acc_walkers = new_walkers[accept]\n n_accepted = sum(accept)\n\n # Replace current walkers with accepted walkers\n p0_walkers[accept] = acc_walkers\n\n # Update final walkers array\n walkers[n_walkers:n_walkers+n_accepted] = acc_walkers\n n_walkers += n_accepted\n\n # Update progress bar\n pbar.update(min(req_n_walkers, n_walkers)-pbar.n)\n\n # Close progress bar\n pbar.close()\n\n # Turn logging back on if it used to be on\n pipe.do_logging = was_logging\n\n # Broadcast walkers to all workers\n walkers = pipe._comm.bcast(np.unique(walkers[:req_n_walkers], axis=0), 0)\n n_walkers = walkers.shape[0]\n\n # Return n_walkers, walkers\n return(n_walkers, walkers)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
wrapper around log_likelihood method to evaluate for an array of just the variable parameters, which can be used directly with scipy.optimise methods.
|
def objective(par_arr):
fit_params = copy.copy(params)
for par, value in zip([p for p in params if params[p].vary], par_arr):
fit_params[par].value = value
return self.log_likelihood(fit_params, eval_gradient=False)
|
[
"def likelihood(self, x: np.ndarray) -> np.ndarray:",
"def evaluate_likelihood(self, X):\n Y = np.apply_along_axis(self.likelihood_l, 1, X)\n return(Y)",
"def compute_log_likelihood(X, params):\n m, n, _ = X.shape\n likelihood = 0.\n for i in range(m):\n p_y_0 = p_y(0, params)\n p_y_1 = p_y(1, params)\n for j in range(n):\n x = X[i,j]\n p_y_0 += log_sum_exp(p_x_z(x,0,params) + p_z_y(0,0,params), p_x_z(x,1,params) + p_z_y(1,0,params))\n p_y_1 += log_sum_exp(p_x_z(x,0,params) + p_z_y(0,1,params), p_x_z(x,1,params) + p_z_y(1,1,params))\n likelihood += log_sum_exp(p_y_0, p_y_1)\n\n return likelihood",
"def log_likelihood(self, *args, context=None):\n\n if self.owner is None:\n raise ValueError(\n \"Cannot compute a log-likelihood without being assigned as the function of an \"\n \"OptimizationControlMechanism. See the documentation for the \"\n \"ParameterEstimationControlMechanism for more information.\"\n )\n\n # Make sure we have instantiated the log-likelihood function.\n if self._ll_func is None:\n self._ll_func = self._make_objective_func(context=context)\n\n context.execution_phase = ContextFlags.PROCESSING\n ll, sim_data = self._ll_func(*args)\n context.remove_flag(ContextFlags.PROCESSING)\n\n return ll, sim_data",
"def log_data_likelihood(y, explan, explan_bar, params, num_pts, t, pi_bit):\n # type: (object, object, object, object, object, object) -> object\n if params[-1] <= 0:\n print params\n temp_1 = 0.5 * num_pts * np.log(params[-1])\n temp_2 = np.sum((y - params[0] - params[1] * (explan - explan_bar))**2) / (2. * params[-1])\n return -t * (pi_bit + temp_1 + temp_2)",
"def log_likelihood(self, params, eval_gradient=True):\n if eval_gradient:\n segment_loglike = [c.log_likelihood(params, eval_gradient) for c in self.mlcross_spec]\n # separate and sum the likelihoods and the gradients\n like = np.array([l[0] for l in segment_loglike])\n grad = np.array([l[1] for l in segment_loglike])\n if np.all(np.isfinite(like)):\n return np.sum(like), grad.sum(axis=0)\n else:\n return (-1e6, np.zeros(len([p for p in params if params[p].vary])) - 1e6)\n else:\n return np.sum([c.log_likelihood(params, eval_gradient) for c in self.mlcross_spec])",
"def log_wealth_optim(f, pnl):\n return -np.mean(np.log(1 + f * pnl))",
"def logits(self, x):",
"def log_likelihood(self, params, eval_gradient=True):\n c = self.cov_matrix(params)\n\n # add white noise along the leading diagonal\n # this should be the Poisson noise term when calculating a PSD\n if self.noise is not None:\n c += np.diag(self.noise)\n\n try:\n L = cho_factor(c, lower=True, check_finite=False)[0]\n except np.linalg.LinAlgError:\n try:\n # try doubling the noise first\n L = cho_factor(c + np.diag(self.noise), lower=True, check_finite=False)[0]\n except np.linalg.LinAlgError:\n #printmsg(2, \"WARNING: Couldn't invert covariance matrix with parameters \" + param2array(params))\n return (-1e6, np.zeros(len([p for p in params if params[p].vary])) - 1e6) if eval_gradient else -1e6\n except ValueError:\n return (np.inf, np.zeros(len([p for p in params if params[p].vary]))) if eval_gradient else -np.inf\n\n alpha = cho_solve((L, True), self.data, check_finite=False)\n\n log_likelihood_dims = -0.5 * np.einsum(\"ik,ik->k\", self.data, alpha)\n log_likelihood_dims -= np.log(np.diag(L)).sum()\n log_likelihood_dims -= c.shape[0] / 2 * np.log(2 * np.pi)\n log_likelihood = log_likelihood_dims.sum(-1)\n\n if eval_gradient:\n c_gradient = self.cov_matrix_deriv(params)\n tmp = np.einsum(\"ik,jk->ijk\", alpha, alpha)\n tmp -= cho_solve((L, True), np.eye(c.shape[0]))[:, :, np.newaxis]\n gradient_dims = 0.5 * np.einsum(\"ijl,ijk->kl\", tmp, c_gradient)\n gradient = gradient_dims.sum(-1)\n\n # note we return -log_likelihood, so we can minimize it!\n return (log_likelihood, gradient) if eval_gradient else log_likelihood",
"def gradFunc(param):\n\n return np.array(\n GeneralizedExtremeValueDistribution.computeNegLogLikelihoodGrad(\n param[0], param[1], param[2], data\n ))",
"def log_likelihood(mu, sigma, obs):\r\n obs = np.asarray(obs)\r\n N = obs.shape[0]\r\n return -N * np.log(sigma) - (1 / (2 * sigma ** 2)) * np.sum((obs - mu) ** 2)",
"def ln_GLRT(s_array:np.ndarray, x_array:np.ndarray) -> np.ndarray:\n assert s_array.ndim == x_array.ndim == 1\n N = s_array.shape[0]\n\n GLRT_out = []\n\n print('\\n## Generalized Likelihood Ration Computation ##')\n\n for n_0 in tqdm(range(x_array.shape[0] - N)):\n x_array_truncate = x_array[n_0:n_0+N]\n\n A_MLE = np.sum(np.multiply(s_array,x_array_truncate)) / np.sum(np.square(s_array))\n\n sigma2_0_MLE = np.average(np.square(x_array_truncate))\n \n sigma2_1_MLE = np.average(np.square(x_array_truncate - (A_MLE * s_array)))\n\n GLRT_out.append( (N/2.0) * (np.log(sigma2_0_MLE) - np.log(sigma2_1_MLE)) )\n \n return np.array(GLRT_out)",
"def powerlaw_cutoff_loglikelihood(params, x, xmin=1.):\n\n alpha, lamb = params[0], params[1]\n a = x.size * np.log(lamb ** (1 - alpha) / float(mpmath.gammainc(1 - alpha, lamb * xmin)))\n result = a - alpha * np.log(x).sum() - lamb * x.sum()\n\n return -result",
"def _log_likelihood(self, t, sess, feed_dict):\n feed_dict[self.t] = t\n feed_dict[self.z] = self.zv\n return sess.run(self.lld, feed_dict=feed_dict)",
"def log_marginal_likelihood(self, X):\n pass",
"def log_likelihood_continuous(tte, uncensored, alpha, beta, epsilon=EPS):\n y_a = (tte + epsilon) / alpha\n return uncensored * (torch.log(beta) + beta * torch.log(y_a)) - torch.pow(y_a, beta)",
"def bayesian_information_criteria(self):\n self.max_likelihood('diff_evo')\n l_hat = optimize.ln_likelihood(self.variable_medians, self.function, self.abscissa, self.ordinate)\n return np.log(self.x.size) * self.len_parameters - 2 * l_hat",
"def log_likelihood_function(self, instance):\r\n\r\n \"\"\"\r\n In the previous tutorial the instance was a single `Gaussian` profile, meaning we could create the model data \r\n using the line:\r\n\r\n model_data = instance.gaussian.model_data_1d_via_xvalues_from(xvalues=self.data.xvalues)\r\n\r\n In this tutorial our instance is comprised of multiple 1D Gaussians, because we will use a `Collection` to\r\n compose the model:\r\n\r\n model = Collection(gaussian_0=Gaussian, gaussian_1=Gaussian).\r\n\r\n By using a Collection, this means the instance parameter input into the fit function is a\r\n dictionary where individual profiles (and their parameters) can be accessed as followed:\r\n\r\n print(instance.gaussian_0)\r\n print(instance.gaussian_1)\r\n print(instance.gaussian_0.centre)\r\n\r\n In this tutorial, the `model_data` is therefore the summed `model_data` of all individual Gaussians in the \r\n model. The function `model_data_from_instance` performs this summation. \r\n \"\"\"\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n chi_squared = sum(chi_squared_map)\r\n noise_normalization = np.sum(np.log(2 * np.pi * noise_map**2.0))\r\n log_likelihood = -0.5 * (chi_squared + noise_normalization)\r\n\r\n return log_likelihood",
"def likelihoods(self, step):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
c = pylag.mlfit.MLPSD.cov_matrix(params) Calculate the model covariance matrix for the specified parameter values.
|
def cov_matrix(self, params):
# if no model is specified, the PSD model is just the PSD value in each frequency bin
# note the factor of 2 to integrate over the negative frequencies too!
if self.model is None:
psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm
else:
psd = self.model(params, self.fbins.bin_cent) * self.psdnorm
cov = np.sum(np.array([p * c for p, c in zip(psd, self.cos_integral)]), axis=0)
return cov
|
[
"def cross_cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array([params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n cov = np.sum(np.array([p * (c * np.cos(phi) - s * np.sin(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)]), axis=0)\n return cov",
"def cov_matrix_deriv(self, params):\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n\n # in this simple case, the covariance matrix is just a linear sum of each frequency term\n # so the derivative is simple - we multiply by p when we're talking about the log\n return np.stack([c * p for c, p in zip(self.cos_integral, psd)], axis=-1)\n else:\n psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n return np.stack([np.sum([c * p for c, p in zip(self.cos_integral, psd_deriv[:, par])], axis=0) for par in range(psd_deriv.shape[-1])], axis=-1)",
"def cov_matrix_deriv(self, params):\n cc = self.cross_cov_matrix_deriv(params)\n\n if self.freeze_psd:\n Z = np.zeros_like(self.ac1)\n return np.stack(\n [np.vstack([np.hstack([Z, cc[..., p].T]), np.hstack([cc[..., p], Z])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n else:\n ac1 = self.mlpsd1.cov_matrix_deriv(params)\n ac2 = self.mlpsd2.cov_matrix_deriv(params)\n return np.stack(\n [np.vstack([np.hstack([ac1[..., p], cc[..., p].T]), np.hstack([cc[..., p], ac2[..., p]])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n return np.stack([np.vstack([np.hstack([ac1[...,p], cc[...,p].T]), np.hstack([cc[...,p], ac2[...,p]])]) for p in range(len(self.params))], axis=-1)",
"def cov_matrix(self, params):\n if self.freeze_psd:\n if self.ac1 is None or self.ac2 is None:\n raise AssertionError(\"Autocovariance matrices are not available. Did you fit the PSDs?\")\n ac1 = self.ac1\n ac2 = self.ac2\n else:\n ac1 = self.mlpsd1.cov_matrix(params)\n ac2 = self.mlpsd2.cov_matrix(params)\n\n cc = self.cross_cov_matrix(params)\n\n return np.vstack([np.hstack([ac1, cc.T]), np.hstack([cc, ac2])])",
"def cross_cov_matrix_deriv(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array(\n [params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n if self.cpsd_model is None:\n cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi\n in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],\n axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)\n\n if self.lag_model is None:\n lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid\n in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],\n axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)\n\n # this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)\n # and (2) the phases\n return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)",
"def compute_covariance_matrix(X):\n return np.cov(X, rowvar=0)",
"def cal_cov_matrix(training_data):\n\t# cov_matrix = np.transpose(training_data).dot(training_data)/(training_data.shape[0] - 1)\n\tcov_matrix = training_data.T.dot(training_data)\n\t# cal cov_matrix by numpy\n\t# cov_matrix = np.cov(training_data, rowvar=False, bias=True)\n\tprint('cov_matrix shape ::: ', cov_matrix.shape)\n\t\"\"\" cal eig vector and value \"\"\"\n\teig_val, eig_vec = np.linalg.eig(cov_matrix)\n\t# print('val :::', eig_val)\n\t# print('sorted val :::', np.sort(eig_val))\n\t\"\"\" return the largest max_index eignvalues \"\"\"\n\tsort_index = np.argsort(-eig_val)\n\teig_val = sorted(eig_val, reverse=True)\n\t# eig_val = np.sort(-eig_val)\n\treturn sort_index, eig_val, eig_vec",
"def var_cov_matrix(df, weigths):\n\n sigma = np.cov(np.array(df).T, ddof=0)\n var = (np.array(weigths) * sigma * np.array(weigths).T).sum()\n return var",
"def covariance_matrix(self):\n self.covariance = np.dot(self.matrix, self.matrix.transpose())",
"def Kcov(self,masses):\n from numpy.linalg import eigh\n from statistics import calc_cov\n #average and standar deviation for kinetic energy\n Kav=numpy.zeros(self.nat); Kdev=numpy.zeros(self.nat)\n #covariance of the kinetic energy\n Kcov=numpy.zeros(self.nat*self.nat).reshape(self.nat,self.nat)\n while self.loadframe():\n v2=(self.frame*self.frame).sum(axis=1);\n K=0.5*masses*v2; Kav+=K; Kdev+=K*K\n Kcov+=numpy.dot(K.reshape(self.nat,1),K.reshape(1,self.nat))\n results=calc_cov(Kav,Kdev,Kcov,self.nat,self.nframe)\n [evals,evecs]=eigh(results['cov']); #diagonalize\n perm=numpy.argsort(evals)[::-1] #sort from bigger to smaller\n evals=evals[perm]; evecs=evecs[:,perm]\n return {'Kav':results['av'], 'Kdev':results['dev'],\n 'Kcov':results['cov'], 'evals':evals, 'evecs':evecs}",
"def constant_cov(x,y,c):\n return c*np.ones(x.shape[0])",
"def se_cov(cov):\n return np.sqrt(np.diag(cov))",
"def compute_covariance_of_sampson_estimate(algebraicEllipseParameters, dataPts, covList=None):\n\n nPts = len(dataPts)\n\n # Check to see if the user passed in their own list of covariance matrices\n if covList is None:\n # Generate a list of diagonal covariance matrices\n covList = covList = [np.eye(2) for i in range(nPts)]\n sigma_squared = estimateNoiseLevel(algebraicEllipseParameters,\n dataPts, covList)\n\n # ensure that the isotropic covariance matrices are scaled with\n # an estimate of the noise level\n for iPts in range(nPts):\n covList[iPts] = covList[iPts] * sigma_squared\n\n # the algebraicEllipseParameters were computed in a hartley normalised\n # coordinate system so in order to correctly characterise the uncertainty\n # of the estimate, we need to know the transformation matrix T that maps\n # between the original coordinate system and the hartley normalised\n # coordinate system.\n #\n # scale and translate data points so that they lie inside a unit box\n\n\n dataPts, T = utils.normalize_data_isotropically(dataPts)\n dataPts=dataPts.T\n\n # transfer initialParameters to normalized coordinate system\n # the formula appears in the paper Z.Szpak, W. Chojnacki and A. van den\n # Hengel, \"A comparison of ellipse fitting methods and implications for\n # multiple view geometry\", Digital Image Computing Techniques and\n # Applications, Dec 2012, pp 1--8\n algebraicEllipseParameters = (algebraicEllipseParameters / \n norm(algebraicEllipseParameters))\n algebraicEllipseParametersNormalisedSpace = utils.normalize_coordinate_system(T, algebraicEllipseParameters)\n # Becase the data points are now in a new normalised coordinate system,\n # the data covariance matrices also need to be tranformed into the \n # new normalised coordinate system. The transformation of the covariance\n # matrices into the new coordinate system can be achieved by embedding the\n # covariance matrices in a 3x3 matrix (by padding the 2x2 covariance\n # matrices by zeros) and by multiply the covariance matrices by the \n # matrix T from the left and T' from the right. \n normalised_CovList = utils.normalize_cov_list(covList,T)\n\n M,aml,t = NoiseLevelLoop(algebraicEllipseParametersNormalisedSpace,dataPts, normalised_CovList)\n \n Pt = eye(6) - np.outer(t,t)/norm(t)**2\n # compute rank-5 constrained pseudo-inverse of M\n U, D, V = svd(M)\n\n for i in range(5):\n D[i] = 1/D[i]\n\n D[5] = 0\n pinvM = V.T @ diag(D) @ U.T\n \n covarianceMatrixNormalisedSpace = Pt @ pinvM @ Pt\n # transform covariance matrix from normalised coordinate system\n # back to the original coordinate system \n E,P34,D3 = utils.epd()\n F = inv(E) @ P34 @ pinv(D3) @ kron(T,T).T @ D3 @ P34 @ E \n t = F @ algebraicEllipseParametersNormalisedSpace\n P = eye(6) - np.outer(t,t)/norm(t)**2;\n covarianceMatrix = norm(t)**(-2) * P @ F @ covarianceMatrixNormalisedSpace @ F.T @ P\n return covarianceMatrix, covarianceMatrixNormalisedSpace",
"def get_cov(self, j):\n\t\tC = self.extern(self.sigma[j])\n\t\tif self.diag:\n\t\t\tC=diag(C)\n\t\treturn C",
"def get_covariance(self):\n log.info(\"Calculating covariance matrix (this may take a while...)\")\n return int_nf.get_covariance(\n frame_data=self.frames.data,\n frame_valid=self.frames.valid,\n frame_weight=self.frames.relative_weight,\n channel_flags=self.channels.data.flag,\n channel_weight=self.channels.data.weight,\n sample_flags=self.frames.sample_flag,\n frame_flags=self.frames.flag,\n source_flags=self.flagspace.convert_flag('SOURCE_FLAGS').value)",
"def posterior_covariance_matrix(self, sigma=None):\n if sigma is None:\n sigma = self.sigma\n s = self.s\n ss2 = s ** 2 + sigma ** 2\n UY = self.UY\n return self.y_test_prior_covariance - ((UY.T * (s ** 2 / ss2)) @ UY)",
"def _empirical_covariance(train_targets: np.ndarray) -> np.ndarray:\n return np.cov(train_targets.T)",
"def get_cpsd(self, params=None):\n if params is None:\n params = self.params\n\n if self.cpsd_model is None:\n return np.array([self.params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n return np.log(self.cpsd_model(self.params, self.fbins.bin_cent))",
"def calc_monte_carlo_covariances(mats):\n\n # check input\n assert all([e.is_square() for e in mats])\n n = mats[0].n_rows()\n assert all([e.n_rows() == n for e in mats])\n\n # create an empty var-cov matrix\n covmat = flex.double(flex.grid(n**2,n**2), 0.0)\n\n for i in range(covmat.all()[0]):\n for j in range(covmat.all()[1]):\n a = [m[i] for m in mats]\n b = [m[j] for m in mats]\n covmat[i,j] = cov(a,b)\n\n return covmat"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
dc = pylag.mlfit.MLPSD.cov_matrix_deriv(params) Calculate the first derivative of the covariance matrix wrt the parameters
|
def cov_matrix_deriv(self, params):
if self.model is None:
psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm
# in this simple case, the covariance matrix is just a linear sum of each frequency term
# so the derivative is simple - we multiply by p when we're talking about the log
return np.stack([c * p for c, p in zip(self.cos_integral, psd)], axis=-1)
else:
psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm
return np.stack([np.sum([c * p for c, p in zip(self.cos_integral, psd_deriv[:, par])], axis=0) for par in range(psd_deriv.shape[-1])], axis=-1)
|
[
"def cross_cov_matrix_deriv(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array(\n [params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n if self.cpsd_model is None:\n cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi\n in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],\n axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)\n\n if self.lag_model is None:\n lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid\n in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],\n axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)\n\n # this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)\n # and (2) the phases\n return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)",
"def cov_matrix_deriv(self, params):\n cc = self.cross_cov_matrix_deriv(params)\n\n if self.freeze_psd:\n Z = np.zeros_like(self.ac1)\n return np.stack(\n [np.vstack([np.hstack([Z, cc[..., p].T]), np.hstack([cc[..., p], Z])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n else:\n ac1 = self.mlpsd1.cov_matrix_deriv(params)\n ac2 = self.mlpsd2.cov_matrix_deriv(params)\n return np.stack(\n [np.vstack([np.hstack([ac1[..., p], cc[..., p].T]), np.hstack([cc[..., p], ac2[..., p]])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n return np.stack([np.vstack([np.hstack([ac1[...,p], cc[...,p].T]), np.hstack([cc[...,p], ac2[...,p]])]) for p in range(len(self.params))], axis=-1)",
"def cross_cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array([params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n cov = np.sum(np.array([p * (c * np.cos(phi) - s * np.sin(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)]), axis=0)\n return cov",
"def gradient_energy(params, X):\n\n # electronic densities\n n_edens = X[1].shape[-1]\n edens = X[1].dot(params[-n_edens:])\n \n with np.errstate(divide='ignore'):\n # (-1) is there for the constant parameter value\n tmp = np.nan_to_num(-1.0/(2.0*np.sqrt(edens)) + 2.0*params[0]*edens)\n\n grad = np.empty((X[0].shape[0], len(params)), dtype=float)\n grad[:, 1:-n_edens] = X[0] # pair\n grad[:, 0] = np.sum(edens**2, axis=1) # embed\n grad[:, -n_edens:] = np.sum(tmp[:, :, None]*X[1], axis=1) # edens\n\n return grad",
"def log_likelihood(self, params, eval_gradient=True):\n c = self.cov_matrix(params)\n\n # add white noise along the leading diagonal\n # this should be the Poisson noise term when calculating a PSD\n if self.noise is not None:\n c += np.diag(self.noise)\n\n try:\n L = cho_factor(c, lower=True, check_finite=False)[0]\n except np.linalg.LinAlgError:\n try:\n # try doubling the noise first\n L = cho_factor(c + np.diag(self.noise), lower=True, check_finite=False)[0]\n except np.linalg.LinAlgError:\n #printmsg(2, \"WARNING: Couldn't invert covariance matrix with parameters \" + param2array(params))\n return (-1e6, np.zeros(len([p for p in params if params[p].vary])) - 1e6) if eval_gradient else -1e6\n except ValueError:\n return (np.inf, np.zeros(len([p for p in params if params[p].vary]))) if eval_gradient else -np.inf\n\n alpha = cho_solve((L, True), self.data, check_finite=False)\n\n log_likelihood_dims = -0.5 * np.einsum(\"ik,ik->k\", self.data, alpha)\n log_likelihood_dims -= np.log(np.diag(L)).sum()\n log_likelihood_dims -= c.shape[0] / 2 * np.log(2 * np.pi)\n log_likelihood = log_likelihood_dims.sum(-1)\n\n if eval_gradient:\n c_gradient = self.cov_matrix_deriv(params)\n tmp = np.einsum(\"ik,jk->ijk\", alpha, alpha)\n tmp -= cho_solve((L, True), np.eye(c.shape[0]))[:, :, np.newaxis]\n gradient_dims = 0.5 * np.einsum(\"ijl,ijk->kl\", tmp, c_gradient)\n gradient = gradient_dims.sum(-1)\n\n # note we return -log_likelihood, so we can minimize it!\n return (log_likelihood, gradient) if eval_gradient else log_likelihood",
"def cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n # note the factor of 2 to integrate over the negative frequencies too!\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n else:\n psd = self.model(params, self.fbins.bin_cent) * self.psdnorm\n\n cov = np.sum(np.array([p * c for p, c in zip(psd, self.cos_integral)]), axis=0)\n\n return cov",
"def _goniometer_derivatives(\n self, isel, parameterisation=None, dS_dgon_p=None, reflections=None\n ):\n\n # Get required data\n axis = self._axis.select(isel)\n fixed_rotation = self._fixed_rotation.select(isel)\n phi_calc = self._phi_calc.select(isel)\n h = self._h.select(isel)\n s1 = self._s1.select(isel)\n e_X_r = self._e_X_r.select(isel)\n e_r_s0 = self._e_r_s0.select(isel)\n UB = self._UB.select(isel)\n D = self._D.select(isel)\n\n if dS_dgon_p is None:\n\n # get derivatives of the setting matrix S wrt the parameters\n dS_dgon_p = [\n None if der is None else flex.mat3_double(len(isel), der.elems)\n for der in parameterisation.get_ds_dp(use_none_as_null=True)\n ]\n\n dphi_dp = []\n dpv_dp = []\n\n # loop through the parameters\n for der in dS_dgon_p:\n\n if der is None:\n dphi_dp.append(None)\n dpv_dp.append(None)\n continue\n\n # calculate the derivative of r for this parameter\n tmp = fixed_rotation * (UB * h)\n dr = der * tmp.rotate_around_origin(axis, phi_calc)\n\n # calculate the derivative of phi for this parameter\n dphi = -1.0 * dr.dot(s1) / e_r_s0\n dphi_dp.append(dphi)\n\n # calculate the derivative of pv for this parameter\n dpv_dp.append(D * (dr + e_X_r * dphi))\n\n return dpv_dp, dphi_dp",
"def flderiv(b, c, d):\n b2 = b * b\n c2 = c * c\n d2 = d * d\n bd = b * d\n df1db = 2*b + 6*d\n df1dc = 4*c\n df1dd = 6*b + 30*d\n df2db = 4*c * (b + 12*d)\n df2dc = 2 * (b2 + 24*bd + 105*d2 + 2)\n df2dd = 4 * c * (12*b + 105*d)\n df3db = 24 * (d + c2 * (2*b + 28*d) + 48 * d**3)\n df3dc = 48 * c * (1 + b2 + 28*bd + 141*d2)\n df3dd = 24 * (b + 28*b * c2 + 2 * d * (12 + 48*bd +\n 141*c2 + 225*d2) + d2 * (48*b + 450*d))\n return np.matrix([[df1db, df1dc, df1dd],\n [df2db, df2dc, df2dd],\n [df3db, df3dc, df3dd]])",
"def _detector_derivatives(\n self, isel, panel_id, parameterisation=None, dd_ddet_p=None, reflections=None\n ):\n\n # Get required data\n pv = self._pv.select(isel)\n D = self._D.select(isel)\n\n if dd_ddet_p is None:\n\n # get the derivatives of detector d matrix for this panel\n dd_ddet_p = parameterisation.get_ds_dp(\n multi_state_elt=panel_id, use_none_as_null=True\n )\n\n # replace explicit null derivatives with None\n dd_ddet_p = [\n None if e is None else flex.mat3_double(len(D), e.elems)\n for e in dd_ddet_p\n ]\n\n # calculate the derivative of pv for this parameter\n dpv_ddet_p = [\n der if der is None else (D * (der * -1.0)) * pv for der in dd_ddet_p\n ]\n\n return dpv_ddet_p",
"def _calc_psi_deriv(self):\n try:\n self.bkg['psi'].mean()\n except:\n self.build_bkg()\n \n # psi = self.eqdsk.psi\n # self.dpsidR = np.zeros((self.eqdsk.nzbox, self.eqdsk.nrbox))\n # self.dpsidZ = np.zeros((self.eqdsk.nzbox, self.eqdsk.nrbox))\n psi = self.bkg['psi']\n self.dpsidR = np.zeros((self.nz, self.nR))\n self.dpsidZ = np.zeros((self.nz, self.nR)) \n deriv = np.gradient(psi)\n # Note np.gradient gives y\n # derivative first, then x derivative\n ddR = deriv[1]\n ddZ = deriv[0]\n # dRdi = np.asarray(1.0)/np.gradient(self.R_eqd)\n # dRdi = np.tile(dRdi, [self.eqdsk.nzbox,1])\n # dZdi = np.asarray(1.0)/np.gradient(self.Z_eqd)\n # dZdi = np.tile(dZdi, [self.eqdsk.nrbox,1])\n # dZdi = np.transpose(dZdi)\n dRdi = np.asarray(1.0)/np.gradient(self.bkg['R'])\n dRdi = np.tile(dRdi, [self.nz,1])\n dZdi = np.asarray(1.0)/np.gradient(self.bkg['z'])\n dZdi = np.tile(dZdi, [self.nR,1])\n dZdi = np.transpose(dZdi)\n #print(\"shape ddR:\",np.shape(ddR),'shape dRdi:', np.shape(dRdi))\n #print('shape ddZ:',np.shape(ddZ),'shape dZdi:', np.shape(dZdi))\n \n self.dpsidR[:, :] = ddR*dRdi\n self.dpsidZ[:, :] = ddZ*dZdi",
"def cov_matrix(self, params):\n if self.freeze_psd:\n if self.ac1 is None or self.ac2 is None:\n raise AssertionError(\"Autocovariance matrices are not available. Did you fit the PSDs?\")\n ac1 = self.ac1\n ac2 = self.ac2\n else:\n ac1 = self.mlpsd1.cov_matrix(params)\n ac2 = self.mlpsd2.cov_matrix(params)\n\n cc = self.cross_cov_matrix(params)\n\n return np.vstack([np.hstack([ac1, cc.T]), np.hstack([cc, ac2])])",
"def get_mle_covariance(self, x = None, ddof = 1):\n if is_none(x):\n x = self.x\n\n # small number to avoid singularities\n return np.cov(x, ddof = 1, rowvar = False) + 1e-6 * np.identity(x.shape[1])",
"def christoffel_deriv(self):\n q_inv = self.induced_metric(inverse=True)\n dq_inv = self.induced_metric(inverse=True, diff=1)\n dq = self.induced_metric(diff=1)\n ddq = self.induced_metric(diff=2)\n return christoffel_deriv(q_inv, dq_inv, dq, ddq)",
"def _beam_derivatives(\n self, isel, parameterisation=None, ds0_dbeam_p=None, reflections=None\n ):\n\n # Get required data\n r = self._r.select(isel)\n e_X_r = self._e_X_r.select(isel)\n e_r_s0 = self._e_r_s0.select(isel)\n D = self._D.select(isel)\n\n if ds0_dbeam_p is None:\n\n # get the derivatives of the beam vector wrt the parameters\n ds0_dbeam_p = parameterisation.get_ds_dp(use_none_as_null=True)\n\n ds0_dbeam_p = [\n None if e is None else flex.vec3_double(len(r), e.elems)\n for e in ds0_dbeam_p\n ]\n\n dphi_dp = []\n dpv_dp = []\n\n # loop through the parameters\n for der in ds0_dbeam_p:\n\n if der is None:\n dphi_dp.append(None)\n dpv_dp.append(None)\n continue\n\n # calculate the derivative of phi for this parameter\n dphi = (der.dot(r) / e_r_s0) * -1.0\n dphi_dp.append(dphi)\n\n # calculate the derivative of pv for this parameter\n dpv_dp.append(D * (e_X_r * dphi + der))\n\n return dpv_dp, dphi_dp",
"def _get_d_u_d_params(self, parameters):\n # Setup\n gradient = {param: 0 for param in parameters}\n as_dict = self.model.parameters.as_dict()\n\n # Get source location\n trajectory = self.model.get_trajectory(self.dataset.time)\n u_ = np.sqrt(trajectory.x**2 + trajectory.y**2)\n\n # Calculate derivatives\n d_u_d_x = trajectory.x / u_\n d_u_d_y = trajectory.y / u_\n dt = self.dataset.time - as_dict['t_0']\n\n # Exactly 2 out of (u_0, t_E, t_eff) must be defined and\n # gradient depends on which ones are defined.\n t_E = self.model.parameters.t_E\n t_eff = self.model.parameters.t_eff\n if 't_eff' not in as_dict:\n gradient['t_0'] = -d_u_d_x / t_E\n gradient['u_0'] = d_u_d_y\n gradient['t_E'] = d_u_d_x * -dt / t_E**2\n elif 't_E' not in as_dict:\n gradient['t_0'] = -d_u_d_x * as_dict['u_0'] / t_eff\n gradient['u_0'] = (d_u_d_y + d_u_d_x * dt / t_eff)\n gradient['t_eff'] = (d_u_d_x * -dt * as_dict['u_0'] / t_eff**2)\n elif 'u_0' not in as_dict:\n gradient['t_0'] = -d_u_d_x / t_E\n gradient['t_E'] = (d_u_d_x * dt - d_u_d_y * t_eff) / t_E**2\n gradient['t_eff'] = d_u_d_y / t_E\n else:\n raise KeyError(\n 'Something is wrong with ModelParameters in ' +\n 'FitData.calculate_chi2_gradient():\\n', as_dict)\n\n # Below we deal with parallax only.\n if 'pi_E_N' in parameters or 'pi_E_E' in parameters:\n warnings.warn(\n \"\\n\\nTests indicate that chi2 gradient for models with \"\n \"parallax has BUGS!!!\\n It's better not to use it or contact \"\n \"code authors.\\n\")\n # JCY Not happy about this as it requires importing from other\n # modules. It is inelegant, which in my experience often means it\n # needs to be refactored.\n kwargs = dict()\n if self.dataset.ephemerides_file is not None:\n kwargs['satellite_skycoord'] = self.dataset.satellite_skycoord\n\n parameters_no_piE = {**self.model.parameters.as_dict()}\n parameters_no_piE.pop('pi_E_N')\n parameters_no_piE.pop('pi_E_E')\n\n trajectory_no_piE = Trajectory(\n self.dataset.time, ModelParameters(parameters_no_piE),\n **kwargs)\n dx = trajectory.x - trajectory_no_piE.x\n dy = trajectory.y - trajectory_no_piE.y\n delta_E = dx * as_dict['pi_E_E'] + dy * as_dict['pi_E_N']\n delta_N = dx * as_dict['pi_E_N'] - dy * as_dict['pi_E_E']\n det = as_dict['pi_E_N']**2 + as_dict['pi_E_E']**2\n gradient['pi_E_N'] = (d_u_d_x * delta_N + d_u_d_y * delta_E) / det\n gradient['pi_E_E'] = (d_u_d_x * delta_E - d_u_d_y * delta_N) / det\n\n return gradient",
"def get_cpsd(self, params=None):\n if params is None:\n params = self.params\n\n if self.cpsd_model is None:\n return np.array([self.params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n return np.log(self.cpsd_model(self.params, self.fbins.bin_cent))",
"def det_jacobian(eps):\n # f(ep) = exp(-ep)\n # dfdep = -exp(-ep)\n # d2fdep2 = exp(-ep)\n # 1/det(J) = 1/prod(exp(-ep)) = 1/exp(-sum(eps))\n return exp(-sum(eps))",
"def grad_norm_in_params(params):\n a=0\n for item in params.values():\n a += nd.sum(item.grad() ** 2).asscalar()\n return a ** 0.5",
"def gradFunc(param):\n\n return np.array(\n GeneralizedExtremeValueDistribution.computeNegLogLikelihoodGrad(\n param[0], param[1], param[2], data\n ))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
psd = pylag.mlfit.MLPSD.get_psd(params) Calculate the power spectrum in each frequency bin for a given set of parameters.
|
def get_psd(self, params=None):
if params is None:
params = self.params
if self.model is None:
return np.array([self.params[p].value for p in self.params])
else:
return np.log(self.model(self.params, self.fbins.bin_cent))
|
[
"def welch_psd(data, fs, plot=True, window='hanning', overlap=0.5, len_seg=None, axis=-1):\n# data = data.reshape(data.shape[0],)\n if len_seg is None:\n overlap = overlap * 256\n else:\n overlap = overlap * len_seg\n freqs, psd = signal.welch(data, fs=fs, noverlap=overlap, nperseg=len_seg,\n window=window, nfft=None, detrend='constant',\n return_onesided=True, scaling='density', axis=axis)\n if plot:\n plt.plot(freqs, psd)\n plt.show()\n return freqs, psd",
"def lperiodgram(psd, dofw = 1, alpha = 0.05, Nens = 2, Nband = 1, smoo = True, ax = -1):\n if smoo == True:\n #N = np.floor(psd.shape[ax]/int(Nens))\n N = np.floor(int(Nens))\n win = windows.boxcar(N)\n win = win/win.sum()\n win.resize((N,) + tuple(np.int8(np.ones(psd.ndim - 1))))\n if ax != 0 and ax != -1:\n win = np.rollaxis(win, 0, start = ax + 1)\n elif ax != 0 and ax == -1:\n win = np.rollaxis(win, 0, start = psd.ndim)\n elif ax == 0:\n win = win\n else:\n raise ValueError, \"Pick your axis better.\"\n\n mpsd = sci_fftconvolve(psd, win, mode = 'same')\n else:\n mpsd = binav(psd, bins = Nens, ax = ax)\n \n if Nband > 1:\n if Nband % 2 != 1:\n Nband = Nband + 1\n \n wbd = windows.boxcar(Nband)\n wbd = wbd / wbd.sum()\n wbd.resize((Nband,) + tuple(np.int8(np.ones(mpsd.ndim - 1))))\n if ax != 0 and ax != -1:\n wbd = np.rollaxis(wbd, 0, start = ax + 1)\n elif ax != 0 and ax == -1:\n wbd = np.rollaxis(wbd, 0, start = mpsd.ndim)\n elif ax == 0:\n wdb = wbd\n else:\n raise ValueError, \"Pick your axis better.\"\n \n mpsd = sci_fftconvolve(mpsd, wbd, mode = 'same')\n \n dof = 2*Nens*Nband*dofw # for non-overlaping segments\n psd_hi = (dof * mpsd) / (stats.chi2.ppf(.5 * alpha, dof))\n psd_lo = (dof * mpsd) / (stats.chi2.ppf(1-(alpha/2), dof))\n loci = np.log10(dof / stats.chi2.ppf(1-(alpha/2), dof))\n hici = np.log10(dof / stats.chi2.ppf(.5 * alpha, dof))\n mpsd = mpsd\n Stats = tuple([psd_lo, psd_hi, loci, hici, dof])\n return mpsd, Stats",
"def fit_psd(self):\n print(\"Fitting PSD of light curve 1...\")\n self.mlpsd1.fit()\n self.ac1 = self.mlpsd1.cov_matrix(self.mlpsd1.params)\n\n print(\"Fitting PSD of light curve 2...\")\n self.mlpsd2.fit()\n self.ac2 = self.mlpsd1.cov_matrix(self.mlpsd2.params)\n\n if self.cpsd_model is None:\n # set an initial estimate of the cross power spectrum as the average of the two band powers\n # minus a little bit - this helps the fit on its way!\n for i in range(len(self.fbins)):\n self.params['ln_cpsd%01d' % i].value = 0.5 * (self.mlpsd1.psd[i] + self.mlpsd2.psd[i]) - 1.",
"def psd_11(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n fql = np.logspace(np.log10(0.5/n), np.log10(0.5*dt), 6)\n\n \n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100, nMult=4,\n input_psd=['broken_powerlaw', [1e-6, -1, -3, 3e-3]])\n lc[:,1] = lc[:,1] - lc[:,1].mean(1)[:,None] + lc[:,1].mean()\n\n fit_log_psd(fql, lc, extra, '11')",
"def pspec(psd2, return_index=True, wavenumber=False, return_stddev=False, azbins=1, binsize=1.0, view=False, **kwargs):\n #freq = 1 + numpy.arange( numpy.floor( numpy.sqrt((image.shape[0]/2)**2+(image.shape[1]/2)**2) ) )\n\n azbins,(freq,zz) = azimuthalAverageBins(psd2,azbins=azbins,interpnan=True, binsize=binsize, **kwargs)\n if len(zz) == 1: zz=zz[0]\n # the \"Frequency\" is the spatial frequency f = 1/x for the standard numpy fft, which follows the convention\n # A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}\n # or\n # F_f = Sum( a_m e^(-2 pi i f x_m) over the range m,m_max where a_m are the values of the pixels, x_m are the\n # indices of the pixels, and f is the spatial frequency\n freq = freq.astype('float') # there was a +1.0 here before, presumably to deal with div-by-0, but that shouldn't happen and shouldn't have been \"accounted for\" anyway\n\n if return_index:\n if wavenumber:\n fftwavenum = (numpy.fft.fftfreq(zz.size*2)[:zz.size])\n return_vals = list((fftwavenum,zz))\n #return_vals = list((len(freq)/freq,zz))\n else:\n return_vals = list((freq,zz))\n # return_vals = list((freq/len(freq),zz))\n else:\n return_vals = list(zz)\n if return_stddev:\n zzstd = azimuthalAverageBins(psd2,azbins=azbins,stddev=True,interpnan=True, binsize=binsize, **kwargs)\n return_vals.append(zzstd)\n\n if view and pyplotOK:\n pyplot.loglog(freq,zz)\n pyplot.xlabel(\"Spatial Frequency\")\n pyplot.ylabel(\"Spectral Power\")\n\n return return_vals",
"def PSSM_freqs(PSSM_all, pseudocount):\n PSSM_all_psc = PSSM_pseudocount(PSSM_all, pseudocount)\n \n PSSM_all_f = []\n for PSSM in PSSM_all_psc:\n PSSM_colsums = np.sum(PSSM,0,dtype='float')\n PSSM_all_f.append(PSSM / PSSM_colsums)\n \n return(PSSM_all_f)",
"def psd_1(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n fql = np.logspace(np.log10(1./(dt*n)), np.log10(0.5*dt), 6)\n\n \n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100)\n\n fit_log_psd(fql, lc, extra, '1')",
"def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, **kwargs):\n if not self._hold: self.cla()\n pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap)\n pxx.shape = len(freqs),\n freqs += Fc\n\n self.plot(freqs, 10*npy.log10(pxx), **kwargs)\n self.set_xlabel('Frequency')\n self.set_ylabel('Power Spectrum (dB)')\n self.grid(True)\n vmin, vmax = self.viewLim.intervaly().get_bounds()\n intv = vmax-vmin\n logi = int(npy.log10(intv))\n if logi==0: logi=.1\n step = 10*logi\n #print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1\n ticks = npy.arange(math.floor(vmin), math.ceil(vmax)+1, step)\n self.set_yticks(ticks)\n\n return pxx, freqs",
"def psd_with_formula():\n # Load data\n lfp = np.load(\"Data_LFP_other/lfp_run26.npy\")[4:-6, :-1]\n\n n_points = lfp.shape[1] # number of points\n samp_rate_lfp = 2000 # sample rate\n dt = 1 / samp_rate_lfp # delta t\n df = samp_rate_lfp/n_points # delta f\n\n # Getting the sample frequencies of the Discrete Fourier Transform (DFT)\n f_dft = np.fft.fftfreq(n_points, d=dt)\n freq = f_dft[:n_points//2] # only positive frequencies (one-sided)\n\n # Compute the DFT of lfp by using the Fast Fourier Transform (FFT)\n fft = np.abs(np.fft.fft(lfp))/n_points\n # Power spectrum (PS)\n ps = 2*(fft[:, :n_points//2]**2) # only positive frequencies (one-sided)\n # Power spectrum density (PSD)\n psd = ps / df\n # Computing the mean psd\n psd_mean = np.mean(psd, axis=0)\n # Plotting\n plt.plot(np.log10(freq), np.log10(psd_mean),\n linewidth=0.5, label='formula')",
"def sum(ps):\n ells = [p.ells for p in ps]\n assert all(ell == ells[0] for ell in ells), \"Can't add spectra with different ells.\"\n maps = reduce(lambda x, y: x & y, [set(p.get_maps()) for p in ps])\n spectra = SymmetricTensorDict([(k,sum(p.spectra[k] for p in ps)) for k in pairs(maps)],rank=2)\n if all([p.cov for p in ps]): cov = SymmetricTensorDict([(k,sum(p.cov[k] for p in ps)) for k in pairs(pairs(maps))],rank=4)\n else: cov=None\n return PowerSpectra(spectra,cov,ells[0],binning=ps[0].binning)",
"def psd_with_periodogramn():\n # Load data\n lfp = np.load(\"Data_LFP_other/lfp_run26.npy\")[4:-6, :-1]\n samp_rate_lfp = 2000 # sample rate\n # Calculate psd for each channel\n f, pxx = periodogram(lfp, samp_rate_lfp)\n # Calculate mean\n psd_mean = np.mean(pxx, axis=0)\n # Plot\n plt.plot(np.log10(f), np.log10(psd_mean),\n linewidth=0.5, label='periodogram')",
"def psdfull(x, Fs, window='hann'):\n # Rename variables to conventional names. \n # Length of the input series.\n N = len(x)\n tmax = N/Fs\n \n # Calculate the FFT. By default use the hanning window. \n if window.lower == 'hann':\n xdft = np.fft.fft(x*np.hanning(len(x))) \n else:\n xdft = np.fft.fft(x)\n # Shift the fft to be symmetric.\n xdft = np.fft.fftshift(xdft)\n # Calculate the power spectrum. \n psdx = abs(xdft)**2\n\n # Manually create an fftshifted index vector (this could be done more efficiently, but whatever). \n freq_index= np.fft.fftshift(np.arange(N))\n # Find the zero frequency index.\n zero_index = np.where(freq_index == 0)[0][0]\n # Let all frequencies left of this be negative frequencies.\n leftside = -np.arange(zero_index)-1\n # Insert this vector (flipped) into the freq_index vector.\n freq_index[0:zero_index] = leftside[::-1]\n \n # Convert to frequency vector.\n freq = freq_index/tmax\n\n # Rescale. See page 389 of 'Numerical Methods'. First the default dft sum ignores the sampling frequency and just sums the terms.\n psdx = psdx/(Fs**2)\n # We generally want the signal to be independent of the number of samples (psd per unit time). So divide by tmax.\n psdx = psdx/tmax\n\n return freq, psdx",
"def psd(signal, fs = 100, freq = [2,40]): \n \n # Frequency Boundaries\n f_res = signal.shape[0]/fs\n flow = int(freq[0]*f_res)\n fup = int(freq[1]*f_res) + 1 # upper boundary\n \n # # multiply the fft by hanning window\n # signal = np.multiply(signal,np.hanning(winsize))\n \n # get power spectrum\n xdft = np.square(np.absolute(fft(signal)))\n \n # normalize to signal\n xdft = xdft * (1/(fs*signal.shape[0]))\n \n # multiply *2 to conserve energy in positive frequencies\n psdx = 2*xdft[0:int(xdft.shape[0]/2+1)] \n\n return np.sum(psdx[flow:fup])",
"def getPowerSpectrumInfo(vals, dt, maxFreqMHz = 0, normFactor = 1, shortenOutput = True):\n\n # Get frequencies from sample times\n freqs = makeFreqsMHz(dtNs = dt, N = len(vals))\n freqsBand = []\n\n # Limit frequency information returned by maximum freq optional arg\n if maxFreqMHz > 0:\n freqsBand = [freq for freq in freqs if freq < maxFreqMHz]\n else:\n freqsBand = freqs\n\n # Do FFT and convert to power spectum and phase\n theFFT = np.fft.fft(vals)\n powSpec = abs(theFFT)**2\n powSpec /= normFactor\n N = len(vals)\n \n if shortenOutput == True:\n powSpec = 2*powSpec[:N/2+1]\n powSpec[0]/=2\n powSpec[-1]/=2\n\n phaseInfo = np.angle(theFFT)\n phaseInfoDeg = [samp*360/math.pi for samp in phaseInfo]\n\n # Return a list of lists\n return freqsBand, powSpec[:len(freqsBand)], phaseInfoDeg[:len(freqsBand)]",
"def plot_psd(self):\n fig = self.all.current.raw.plot_psd(average=False,\n spatial_colors=False, show=False)\n win = fig.canvas.manager.window\n win.setWindowTitle(\"Power spectral density\")\n fig.show()",
"def pspec(psd2, return_index=True, wavenumber=False, return_stddev=False, azbins=1, binsize=1.0, **kwargs):\n # freq = 1 + numpy.arange( numpy.floor( numpy.sqrt((image.shape[0]/2)**2+(image.shape[1]/2)**2) ) )\n\n azbins, (freq, zz) = azimuthalAverageBins(psd2, azbins=azbins, interpnan=True, binsize=binsize, **kwargs)\n if len(zz) == 1: zz = zz[0]\n # the \"Frequency\" is the spatial frequency f = 1/x for the standard numpy fft, which follows the convention\n # A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}\n # or\n # F_f = Sum( a_m e^(-2 pi i f x_m) over the range m,m_max where a_m are the values of the pixels, x_m are the\n # indices of the pixels, and f is the spatial frequency\n freq = freq.astype(\n 'float') # there was a +1.0 here before, presumably to deal with div-by-0, but that shouldn't happen and shouldn't have been \"accounted for\" anyway\n\n if return_index:\n if wavenumber:\n return_vals = list((len(freq) / freq, zz))\n else:\n return_vals = list((freq / len(freq), zz))\n else:\n return_vals = list(zz)\n if return_stddev:\n zzstd = azimuthalAverageBins(psd2, azbins=azbins, stddev=True, interpnan=True, binsize=binsize, **kwargs)\n return_vals.append(zzstd)\n\n return return_vals",
"def psd_welch(signal, fs = 100, freq = [5,25]): \n \n # calculate pwelch\n f,p = welch(signal, fs=fs, window='hann')\n \n # get frequency boundaries\n flow = find_nearest(f, freq[0])\n fup = find_nearest(f, freq[1])\n\n return np.sum(p[flow:fup])",
"def psd_13(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n\n \n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100, nMult=4,\n input_psd=['broken_powerlaw', [1e-4, -1, -2, 1e-3]], gaussNoise=1,\n gaps=[3372, 50], sameGap=True)\n \n fql = np.logspace(np.log10(0.5/(lc[0,0,-1]-lc[0,0,0])), np.log10(0.5*dt), 6)\n\n lc[:,1] = lc[:,1] - lc[:,1].mean(1)[:,None] + lc[:,1].mean()\n\n fit_log_psd(fql, lc, extra, '13')",
"def PSSM_freqs_dict(PSSM_all, pseudocount):\n PSSM_all_psc = PSSM_pseudocount_dict(PSSM_all, pseudocount)\n \n PSSM_all_f = {}\n for k_PSSM in PSSM_all_psc.keys():\n PSSM_colsums = np.sum(PSSM_all_psc[k_PSSM],0,dtype='float')\n PSSM_all_f[k_PSSM] = (PSSM_all_psc[k_PSSM] / PSSM_colsums)\n \n return(PSSM_all_f)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
pylag.mlfit.MLPSD.process_fit_results(fit_result, params) Process a scipy.optimise fit result to calculate the bestfitting power spectrum and error from the model.
|
def process_fit_results(self, fit_result, params):
self.psd = self.get_psd()
if self.model is None:
self.psd_error = self.param_error
else:
# calculate the error on each PSD point from the error on each parameter
psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent)
self.psd_error = np.sum([e * psd_deriv[..., i] for i, e in enumerate(self.param_error)], axis=0) / self.psd
if np.any(np.isnan(self.psd_error)):
self.psd_error = None
|
[
"def process_fit_results(self, fit_result, params):\n hess = fit_result.hess_inv(fit_result.x) if callable(fit_result.hess_inv) else np.diag(fit_result.hess_inv)\n\n self.cpsd = self.get_cpsd()\n if self.cpsd_model is None:\n self.cpsd_error = hess[:len(self.fbins)] ** 0.5\n else:\n return NotImplemented\n # # calculate the error on each PSD point from the error on each parameter\n # psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent)\n # self.psd_error = np.sum([e * psd_deriv[..., i] for i, e in enumerate(self.param_error)], axis=0) / self.psd\n # if np.any(np.isnan(self.psd_error)):\n # self.psd_error = None\n\n self.lag = self.get_lag()\n if self.cpsd_model is None:\n self.lag_error = hess[len(self.fbins):] ** 0.5 / (2. * np.pi * self.fbins.bin_cent)\n else:\n return NotImplemented",
"def fit_report(fit_result, modelpars=None, show_correl=True, min_correl=0.1,\n sort_pars=True, _larch=None, **kws):\n result = getattr(fit_result, 'fit_details', fit_result)\n if isinstance(result, MinimizerResult):\n return lmfit.fit_report(result, modelpars=modelpars,\n show_correl=show_correl,\n min_correl=min_correl, sort_pars=sort_pars)\n elif isinstance(result, ModelResult):\n return result.fit_report(modelpars=modelpars,\n show_correl=show_correl,\n min_correl=min_correl, sort_pars=sort_pars)\n else:\n result = getattr(fit_result, 'params', fit_result)\n if isinstance(result, Parameters):\n return lmfit.fit_report(result, modelpars=modelpars,\n show_correl=show_correl,\n min_correl=min_correl, sort_pars=sort_pars)\n else:\n try:\n result = group2params(fit_result, _larch=_larch)\n return lmfit.fit_report(result, modelpars=modelpars,\n show_correl=show_correl,\n min_correl=min_correl, sort_pars=sort_pars)\n except (ValueError, AttributeError):\n pass\n return \"Cannot make fit report with %s\" % repr(fit_result)",
"def from_fit(result):\n params = result.params\n return {name : mp.gummy(param.value,param.stderr) for name,param in params.items()}",
"def _process_scipy_result(res: OptimizeResult, options: Dict[str, Any]) -> None:\n if \"success\" not in res.keys() or \"status\" not in res.keys():\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\", category=OptimizationWarning)\n warnings.warn(\n \"Optimization failed within `scipy.optimize.minimize` with no \"\n \"status returned to `res.`\",\n OptimizationWarning,\n )\n elif not res.success:\n if (\n \"ITERATIONS REACHED LIMIT\" in res.message\n or \"Iteration limit reached\" in res.message\n ):\n logger.info(\n \"`scipy.minimize` exited by reaching the iteration limit of \"\n f\"`maxiter: {options.get('maxiter')}`.\"\n )\n elif \"EVALUATIONS EXCEEDS LIMIT\" in res.message:\n logger.info(\n \"`scipy.minimize` exited by reaching the function evaluation limit of \"\n f\"`maxfun: {options.get('maxfun')}`.\"\n )\n elif \"Optimization timed out after\" in res.message:\n logger.info(res.message)\n else:\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\", category=OptimizationWarning)\n warnings.warn(\n f\"Optimization failed within `scipy.optimize.minimize` with status \"\n f\"{res.status} and message {res.message}.\",\n OptimizationWarning,\n )",
"def fit(self, data, fit='quantiles'):\n if fit == 'MLE':\n self.setParamsMLE(data)\n self.setDistObj()\n isConverged = True # assume stats.fit will always return a distribution\n else:\n dataMoments = np.array([np.mean(data), np.std(data, ddof=1), moment(data, 3)])\n\n def objFunc(X):\n [self.shape, self.loc, self.scale] = X\n if self.fixedAtZero:\n self.loc = 0\n self.setDistObj()\n if fit == 'quantiles':\n obj = probPlotSqrErr(data, self, self.type, showPlots=False)[0]\n elif fit == 'MOM':\n distMoments = self.moments()\n weights = [1, 1,\n 0.1] # scale the influence of each moment # set last entry to remove skewness from the assessment\n # scale each moment error relative to the data moment value, but replace the data moment with a constant if it is close to zero\n obj = np.sum([abs(dataMoments[i] - distMoments[i]) / max(dataMoments[i], 1E-6) * weights[i] for i in\n range(\n self.nParams)]) # only use the number of moments needed to specify the distribution to match the data # np.sum((distMoments-dataMoments)**2) # np.sum([abs( (dataMoments[i]-distMoments[i])**(1/(i+1)) ) for i in range(3)]) #np.sum((dist.moments()-dataMoments)**2)\n return obj\n\n X = [self.shape, self.loc, self.scale]\n\n res = minimize(objFunc, X, method='SLSQP', options={'disp': True, 'maxiter': 600,\n 'ftol': 1e-8}) # , bounds=bnds, constraints=cons, # options={'maxiter': 500, 'gtol': 1e-6, 'disp': True}\n # method='SLSQP' 'TNC' 'L-BFGS-B' 'COBYLA' #\n # seems to ignore the constraint if bounds not included with method='SLSQP'\n isConverged = res.success\n if isConverged:\n [self.shape, self.loc, self.scale] = res.x\n else:\n [self.shape, self.loc, self.scale] = X # revert to previous values\n\n if self.fixedAtZero:\n self.loc = 0\n\n self.setDistObj()\n return isConverged",
"def fit_image(self):\n self.params = self.all_params['Fit 0']\n self.fit_results = minimize(self.fit_dict[self.fit_type], self.params,\n args = ())\n #report_fit(self.fit_results)\n sel.fparams = self.fit_results.params",
"def fit_errors(self):\n assert self.errors is not None\n\n slope, intercept, r_value, p_value, std_err = stats.linregress(self.errors[:, 0], np.log(self.errors[:, 1]))\n\n a = np.exp(intercept)\n tau = -1.0 / slope\n\n self.popt = (a, tau)",
"def fit_powerlaw(xdata, ydata, fitparams=None, domain=None, showfit=False, showstartfit=False,\n verbose=True, **kwarg):\n if fitparams is None:\n print(\"Please specify fit parameters in function input\")\n return\n\n if domain is not None:\n fitdatax, fitdatay = selectdomain(xdata, ydata, domain)\n else:\n fitdatax = xdata\n fitdatay = ydata\n\n fitfunc_string = \"y = p[0] + p[1] * x ^ p[2]\"\n params, param_errs = fitbetter(fitdatax, fitdatay, powerlawfunc, fitparams, domain=None, showfit=showfit,\n showstartfit=showstartfit, **kwarg)\n\n if verbose:\n print(fitfunc_string)\n parnames = [\"Offset\", \"Multiplicator\", \"Exponent\"]\n print(tabulate(zip(parnames, params, param_errs), headers=[\"Parameter\", \"Value\", \"Std\"],\n tablefmt=\"fancy_grid\", floatfmt=\"\", numalign=\"center\", stralign='left'))\n plot_fitresult(fitdatax, fitdatay, params, param_errs, fitparam_names=parnames)\n\n return params, param_errs",
"def write_results(js, fit_rms, fit_err, hold_rms_=None, hold_err_=None, \\\n sigma2_=None):\n _bcs.f90wrap_write_results(js=js, fit_rms=fit_rms, fit_err=fit_err, \\\n hold_rms_=hold_rms_, hold_err_=hold_err_, sigma2_=sigma2_)",
"def multiple_fits(self):\n self.subtract_background()\n k = 1\n for key in self.fit_names:\n #get params for this fit\n #with new lmfit might not need to do this\n self.params = copy.deepcopy(self.all_params[key])\n\n results = minimize(self.fit_dict[self.fit_type], self.params,\n args = ())\n self.params = results.params\n\n #then if k > num_fits copy result values to params dictionary and fit\n if k < self.num_fits:\n #update parameters\n next_key = self.fit_names[k]\n for i in self.all_params[next_key].keys():\n self.all_params[next_key][i].value = self.params[i].value\n\n #move to next iteration\n k = k + 1\n\n self.fit_results = results",
"def __call__(self, _data, _model, staterror=None, syserror=None, weight=None):\n parvals_key = tuple('%.4e' % x for x in self.model.parvals)\n try:\n fit_stat = self.cache_fit_stat[parvals_key]\n self.logger.info('nmass_model: Cache hit %s' % str(parvals_key))\n except KeyError:\n fit_stat = self.model.calc_stat()\n\n self.logger.info('Fit statistic: %.4f' % fit_stat)\n self.cache_fit_stat[parvals_key] = fit_stat\n\n if self.min_fit_stat is None or fit_stat < self.min_fit_stat:\n self.min_fit_stat = fit_stat\n self.min_parvals = self.model.parvals\n\n return fit_stat, np.ones(1)",
"def fit(self, init_params=None, update_params=True, **kwargs):\n if init_params is None:\n init_params = self.params\n\n self.fit_result = self._dofit(init_params, **kwargs)\n print(self.fit_result)\n\n if True or self.fit_result.success and update_params:\n for par, value in zip([p for p in init_params if init_params[p].vary], self.fit_result.x):\n self.params[par].value = value\n\n hess = self.fit_result.hess_inv(self.fit_result.x) if callable(self.fit_result.hess_inv) else np.diag(self.fit_result.hess_inv)\n\n # make sure we only get the finite parameter errors\n self.param_error = np.zeros(len(self.params))\n self.param_error[hess>0] = hess[hess>0] ** 0.5\n\n self.process_fit_results(self.fit_result, self.params)",
"def write_max_like_results(result_dict,comp_dict,run_dir):\n\t# Extract elements from dictionaries\n\tpar_names = []\n\tpar_best = []\n\tsig\t = []\n\tfor key in result_dict:\n\t\tpar_names.append(key)\n\t\tpar_best.append(result_dict[key]['med'])\n\t\tsig.append(result_dict[key]['std'])\n\tif 0: \n\t\tfor i in range(0,len(par_names),1):\n\t\t\tprint(par_names[i],par_best[i],sig[i])\n\t# Write best-fit parameters to FITS table\n\tcol1 = fits.Column(name='parameter', format='30A', array=par_names)\n\tcol2 = fits.Column(name='best_fit' , format='E' , array=par_best)\n\tcol3 = fits.Column(name='sigma'\t, format='E' , array=sig)\n\tcols = fits.ColDefs([col1,col2,col3])\n\thdu = fits.BinTableHDU.from_columns(cols)\n\thdu.writeto(run_dir+'log/par_table.fits',overwrite=True)\n\tdel hdu\n\t# Write best-fit components to FITS file\n\tcols = []\n\t# Construct a column for each parameter and chain\n\tfor key in comp_dict:\n\t\tcols.append(fits.Column(name=key, format='E', array=comp_dict[key]['comp']))\n\t# Write to fits\n\tcols = fits.ColDefs(cols)\n\thdu = fits.BinTableHDU.from_columns(cols)\n\thdu.writeto(run_dir+'log/best_model_components.fits',overwrite=True)\n\tdel hdu\n\t# Collect garbage\n\tdel result_dict\n\tdel comp_dict\n\tdel par_names\n\tdel par_best\n\tdel sig\n\tdel cols\n\tgc.collect()\n\n\treturn None",
"def make_results(self):\n fitted = self.fitted\n self.results = OrderedDict()\n ## fitting results\n self.results.update(\n nfev = fitted.nfev,\n ndata = fitted.ndata,\n nvarys = fitted.nvarys, # number of varible paramters\n nfree = fitted.nfree, # degree of freem\n chisqr = fitted.chisqr,\n redchi = fitted.redchi,\n aic = fitted.aic,\n bic = fitted.bic)\n params = fitted.params\n pnames = list(params.keys())\n pvalues = OrderedDict()\n for pn in pnames:\n par = params.get(pn)\n pvalues[pn] = [par.value, par.min, par.max, par.vary]\n self.results[\"params\"] = pvalues\n ## confidence intervals\n if hasattr(self, \"ci\") and self.ci is not None:\n ci = self.ci\n ci_values = OrderedDict()\n ci_sigmas = [ \"ci%02d\" % (v[0]*100) for v in ci.get(pnames[0]) ]\n ci_names = sorted(list(set(ci_sigmas)))\n ci_idx = { k: [] for k in ci_names }\n for cn, idx in zip(ci_sigmas, range(len(ci_sigmas))):\n ci_idx[cn].append(idx)\n # parameters ci\n for pn in pnames:\n ci_pv = OrderedDict()\n pv = [ v[1] for v in ci.get(pn) ]\n # best\n pv_best = pv[ ci_idx[\"ci00\"][0] ]\n ci_pv[\"best\"] = pv_best\n # ci of each sigma\n pv2 = [ v-pv_best for v in pv ]\n for cn in ci_names[1:]:\n ci_pv[cn] = [ pv2[idx] for idx in ci_idx[cn] ]\n ci_values[pn] = ci_pv\n self.results[\"ci\"] = ci_values",
"def optimization_layer(result, iprint=-1):\n x = result._to_statevec()\n print('Optimization layer: running with %i model parameters...' % result.ndim)\n xopt, fval, optimizer_dict = fmin_l_bfgs_b(\n result._gradient, x, args=(result.input_matrix,), iprint=iprint, maxiter=int(2e4), factr=100)\n print(' ...finished with %i gradient (L-BFGS) iterations.' % optimizer_dict['nit'])\n print(' chi^2 = %.3e (%s)^2' % (fval, result.unit))\n result._from_statevec(xopt)\n result.additional['Opt'] = optimizer_dict\n return result",
"def fit(self):\n self.procedure_id = uuid4().hex\n self.procedure_date = str(datetime.today())\n t = perf_counter()\n self.__check_data()\n if self.error_free:\n max_iter = self.parameters[\"max iterations\"]\n conv_criteria = self.parameters[\"convergence level\"]\n\n if self.matrix.is_omx():\n self.output = AequilibraeMatrix()\n self.output.create_from_omx(self.output.random_name(), self.matrix.file_path,\n cores=self.matrix.view_names)\n self.output.computational_view()\n else:\n self.output = self.matrix.copy(self.output_name)\n if self.nan_as_zero:\n self.output.matrix_view[:, :] = np.nan_to_num(self.output.matrix_view)[:, :]\n\n rows = self.rows.data[self.row_field]\n columns = self.columns.data[self.column_field]\n tot_matrix = np.nansum(self.output.matrix_view[:, :])\n\n # Reporting\n self.report.append(\"Target convergence criteria: \" + str(conv_criteria))\n self.report.append(\"Maximum iterations: \" + str(max_iter))\n self.report.append(\"\")\n self.report.append(\"Rows:\" + str(self.rows.entries))\n self.report.append(\"Columns: \" + str(self.columns.entries))\n\n self.report.append(\"Total of seed matrix: \" + \"{:28,.4f}\".format(float(tot_matrix)))\n self.report.append(\"Total of target vectors: \" + \"{:25,.4f}\".format(float(np.nansum(rows))))\n self.report.append(\"\")\n self.report.append(\"Iteration, Convergence\")\n self.gap = conv_criteria + 1\n\n iter = 0\n while self.gap > conv_criteria and iter < max_iter:\n iter += 1\n # computes factors for zones\n marg_rows = self.__tot_rows(self.output.matrix_view[:, :])\n row_factor = self.__factor(marg_rows, rows)\n # applies factor\n self.output.matrix_view[:, :] = np.transpose(\n np.transpose(self.output.matrix_view[:, :]) * np.transpose(row_factor)\n )[:, :]\n\n # computes factors for columns\n marg_cols = self.__tot_columns(self.output.matrix_view[:, :])\n column_factor = self.__factor(marg_cols, columns)\n\n # applies factor\n self.output.matrix_view[:, :] = self.output.matrix_view[:, :] * column_factor\n\n # increments iterarions and computes errors\n self.gap = max(\n abs(1 - np.min(row_factor)),\n abs(np.max(row_factor) - 1),\n abs(1 - np.min(column_factor)),\n abs(np.max(column_factor) - 1),\n )\n\n self.report.append(str(iter) + \" , \" + str(\"{:4,.10f}\".format(float(np.nansum(self.gap)))))\n\n self.report.append(\"\")\n self.report.append(\"Running time: \" + str(\"{:4,.3f}\".format(perf_counter() - t)) + \"s\")",
"def scale_results():\n res_scale = 1.0\n command_result, res_scale = ri.RhinoGet.GetNumber(\"Input scale factor for results.\", False, res_scale)\n if command_result != rc.Commands.Result.Success: return\n if res_scale == 1.0: return\n rm.scale_centric(\"SOF_Results\", res_scale)",
"def compute_fit(self):\n self.z = np.polyfit(self.a, self.e, 2) # Getting the fit parameters\n self.f = np.poly1d(self.z) ## Getting the new function\n self.x_fit = np.linspace(self.a[0], self.a[-1], 100)\n self.y_fit = self.f(self.x_fit)\n\n # Similarly for the volume\n self.vz = np.polyfit(self.v, self.e, 2) # Getting the fit parameters\n self.vf = np.poly1d(self.vz) ## Getting the new function\n self.v_x_fit = np.linspace(self.v[0], self.v[-1], 100)\n self.v_y_fit = self.vf(self.v_x_fit)\n\n # Getting the minimum energy\n self.E_optimized = min(self.y_fit)\n self.E_optimized_printable = self.E_optimized.astype(np.float)\n\n # Getting the optimized lattice constant\n self.min_index = np.argmin(self.y_fit)\n self.a0_optimized = self.x_fit.flat[self.min_index]\n self.v0_optimized = self.v_x_fit.flat[self.min_index] # There are four primitive cells in a single conventional cell\n\n # Calculations\n # Getting the double derivative using a 2nd degree polynomial\n self.dda0 = 2*self.z[0]#.flat[0]\n self.ddv0 = 2*self.vz[0]#.flat[0]\n self.B = eVA32GPa(self.v0_optimized*self.ddv0) # 1 eV/Angstrom3 = 160.21766208 GPa",
"def fitting(fitfunc, X, Y, start_parm, correlated=True, verbose=True):\n errfunc = lambda p, x, y, error: np.dot(error, (y-fitfunc(p,x)).T)\n \n # compute inverse, cholesky decomposed covariance matrix\n if not correlated:\n cov = np.diag(np.diagonal(np.cov(Y.T)))\n else:\n cov = np.cov(Y.T)\n cov = (np.linalg.cholesky(np.linalg.inv(cov))).T\n\n # degrees of freedom\n dof = float(Y.shape[1]-len(start_parm)) \n # create results arrays\n res = np.zeros((Y.shape[0], len(start_parm)))\n res_cov = np.zeros((len(start_parm), len(start_parm)))\n chisquare = np.zeros(Y.shape[0])\n # The FIT to the boostrap samples\n for b in range(0, Y.shape[0]):\n p,cov1,infodict,mesg,ier = leastsq(errfunc, start_parm, \n args=(X, Y[b,:], cov), full_output=1, factor=0.1)\n chisquare[b] = float(sum(infodict['fvec']**2.))\n res[b] = np.array(p)\n if b==0:\n # print(cov1)\n res_cov = cov1*chisquare[b]/dof\n # print(res_cov)\t\n # calculate mean and standard deviation\n res_mean, res_std = af.calc_error(res)\n # chi2 = np.median(chisquare)\n # p-value calculated\n pvals_originfit = 1. - scipy.stats.chi2.cdf(chisquare[0], dof)\n \n # The fit to the mean value\n y = np.mean(Y, axis=0)\n p,cov1,infodict,mesg,ier = leastsq(errfunc, start_parm, \\\n args=(X, y, cov), full_output=1)\n chisquare_meanfit = float(sum(infodict['fvec']**2.))\n pvals_meanfit = 1. - scipy.stats.chi2.cdf(chisquare_meanfit, dof)\n # writing results to screen\n if verbose:\n if correlated:\n print(\"fit results for a correlated fit:\")\n else:\n print(\"fit results for an uncorrelated fit:\")\n print(\"degrees of freedom: %f\\n\" % dof)\n \n print(\"bootstrap fit:\")\n for rm, rs in zip(res_mean, res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n #print(\"Chi^2/dof: %.6e +/- %.6e\\n\" % (chi2/dof, np.std(chisquare)/dof))\n\n print(\"mean value fit:\")\n for rm, rs in zip(p, res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n print(\" Chi^2/dof: %.6e \" % (chisquare_meanfit / dof))\n print(\" p-value: %lf\" % pvals_meanfit) \n\n print(\"original data fit:\")\n for rm, rs in zip(res[0], res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n print(\" Chi^2/dof: %.6e \" % (chisquare[0]/dof))\n print(\" p-value: %lf\" % pvals_originfit) \n return res, res_cov.flatten(), chisquare[0]/dof, pvals_originfit"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
pylag.mlfit.MLCrossSpectrum.fit_psd() Perform preliminary fits of the power spectra to the individual light curves. This function will prepopulate the stored autocovariance components of the matrix (when the psd is frozen) and will set an initial estimate of the cross spectral powers to use as the starting point for fitting the full cross spectral model. Unless you're fitting the power and cross spectra simultaneously, this method should be run before running fit().
|
def fit_psd(self):
print("Fitting PSD of light curve 1...")
self.mlpsd1.fit()
self.ac1 = self.mlpsd1.cov_matrix(self.mlpsd1.params)
print("Fitting PSD of light curve 2...")
self.mlpsd2.fit()
self.ac2 = self.mlpsd1.cov_matrix(self.mlpsd2.params)
if self.cpsd_model is None:
# set an initial estimate of the cross power spectrum as the average of the two band powers
# minus a little bit - this helps the fit on its way!
for i in range(len(self.fbins)):
self.params['ln_cpsd%01d' % i].value = 0.5 * (self.mlpsd1.psd[i] + self.mlpsd2.psd[i]) - 1.
|
[
"def fit_powerlaw(xdata, ydata, fitparams=None, domain=None, showfit=False, showstartfit=False,\n verbose=True, **kwarg):\n if fitparams is None:\n print(\"Please specify fit parameters in function input\")\n return\n\n if domain is not None:\n fitdatax, fitdatay = selectdomain(xdata, ydata, domain)\n else:\n fitdatax = xdata\n fitdatay = ydata\n\n fitfunc_string = \"y = p[0] + p[1] * x ^ p[2]\"\n params, param_errs = fitbetter(fitdatax, fitdatay, powerlawfunc, fitparams, domain=None, showfit=showfit,\n showstartfit=showstartfit, **kwarg)\n\n if verbose:\n print(fitfunc_string)\n parnames = [\"Offset\", \"Multiplicator\", \"Exponent\"]\n print(tabulate(zip(parnames, params, param_errs), headers=[\"Parameter\", \"Value\", \"Std\"],\n tablefmt=\"fancy_grid\", floatfmt=\"\", numalign=\"center\", stralign='left'))\n plot_fitresult(fitdatax, fitdatay, params, param_errs, fitparam_names=parnames)\n\n return params, param_errs",
"def psd_11(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n fql = np.logspace(np.log10(0.5/n), np.log10(0.5*dt), 6)\n\n \n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100, nMult=4,\n input_psd=['broken_powerlaw', [1e-6, -1, -3, 3e-3]])\n lc[:,1] = lc[:,1] - lc[:,1].mean(1)[:,None] + lc[:,1].mean()\n\n fit_log_psd(fql, lc, extra, '11')",
"def psd_13(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n\n \n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100, nMult=4,\n input_psd=['broken_powerlaw', [1e-4, -1, -2, 1e-3]], gaussNoise=1,\n gaps=[3372, 50], sameGap=True)\n \n fql = np.logspace(np.log10(0.5/(lc[0,0,-1]-lc[0,0,0])), np.log10(0.5*dt), 6)\n\n lc[:,1] = lc[:,1] - lc[:,1].mean(1)[:,None] + lc[:,1].mean()\n\n fit_log_psd(fql, lc, extra, '13')",
"def power_law(sources: Union[BaseSource, BaseSample], outer_radius: Union[str, Quantity],\n inner_radius: Union[str, Quantity] = Quantity(0, 'arcsec'), redshifted: bool = False,\n lum_en: Quantity = Quantity([[0.5, 2.0], [0.01, 100.0]], \"keV\"), start_pho_index: float = 1.,\n lo_en: Quantity = Quantity(0.3, \"keV\"), hi_en: Quantity = Quantity(7.9, \"keV\"),\n freeze_nh: bool = True, par_fit_stat: float = 1., lum_conf: float = 68., abund_table: str = \"angr\",\n fit_method: str = \"leven\", group_spec: bool = True, min_counts: int = 5, min_sn: float = None,\n over_sample: float = None, one_rmf: bool = True, num_cores: int = NUM_CORES,\n timeout: Quantity = Quantity(1, 'hr')):\n sources, inn_rad_vals, out_rad_vals = _pregen_spectra(sources, outer_radius, inner_radius, group_spec, min_counts,\n min_sn, over_sample, one_rmf, num_cores)\n sources = _check_inputs(sources, lum_en, lo_en, hi_en, fit_method, abund_table, timeout)\n\n # This function is for a set model, either absorbed powerlaw or absorbed zpowerlw\n # These will be inserted into the general XSPEC script template, so lists of parameters need to be in the form\n # of TCL lists.\n lum_low_lims = \"{\" + \" \".join(lum_en[:, 0].to(\"keV\").value.astype(str)) + \"}\"\n lum_upp_lims = \"{\" + \" \".join(lum_en[:, 1].to(\"keV\").value.astype(str)) + \"}\"\n if redshifted:\n model = \"constant*tbabs*zpowerlw\"\n par_names = \"{factor nH PhoIndex Redshift norm}\"\n else:\n model = \"constant*tbabs*powerlaw\"\n par_names = \"{factor nH PhoIndex norm}\"\n\n script_paths = []\n outfile_paths = []\n src_inds = []\n for src_ind, source in enumerate(sources):\n spec_objs = source.get_spectra(out_rad_vals[src_ind], inner_radius=inn_rad_vals[src_ind], group_spec=group_spec,\n min_counts=min_counts, min_sn=min_sn, over_sample=over_sample)\n\n # This is because many other parts of this function assume that spec_objs is iterable, and in the case of\n # a source with only a single valid instrument for a single valid observation this may not be the case\n if isinstance(spec_objs, Spectrum):\n spec_objs = [spec_objs]\n\n if len(spec_objs) == 0:\n raise NoProductAvailableError(\"There are no matching spectra for {s}, you \"\n \"need to generate them first!\".format(s=source.name))\n\n # Turn spectra paths into TCL style list for substitution into template\n specs = \"{\" + \" \".join([spec.path for spec in spec_objs]) + \"}\"\n # For this model, we have to know the redshift of the source.\n if redshifted and source.redshift is None:\n raise ValueError(\"You cannot supply a source without a redshift if you have elected to fit zpowerlw.\")\n elif redshifted and source.redshift is not None:\n par_values = \"{{{0} {1} {2} {3} {4}}}\".format(1., source.nH.to(\"10^22 cm^-2\").value, start_pho_index,\n source.redshift, 1.)\n else:\n par_values = \"{{{0} {1} {2} {3}}}\".format(1., source.nH.to(\"10^22 cm^-2\").value, start_pho_index, 1.)\n\n # Set up the TCL list that defines which parameters are frozen, dependant on user input\n if redshifted and freeze_nh:\n freezing = \"{F T F T F}\"\n elif not redshifted and freeze_nh:\n freezing = \"{F T F F}\"\n elif redshifted and not freeze_nh:\n freezing = \"{F F F T F}\"\n elif not redshifted and not freeze_nh:\n freezing = \"{F F F F}\"\n\n # Set up the TCL list that defines which parameters are linked across different spectra,\n # dependant on user input\n if redshifted:\n linking = \"{F T T T T}\"\n else:\n linking = \"{F T T T}\"\n\n # If the powerlaw with redshift has been chosen, then we use the redshift attached to the source object\n # If not we just pass a filler redshift and the luminosities are invalid\n if redshifted or (not redshifted and source.redshift is not None):\n z = source.redshift\n else:\n z = 1\n warnings.warn(\"{s} has no redshift information associated, so luminosities from this fit\"\n \" will be invalid, as redshift has been set to one.\".format(s=source.name))\n\n out_file, script_file = _write_xspec_script(source, spec_objs[0].storage_key, model, abund_table, fit_method,\n specs, lo_en, hi_en, par_names, par_values, linking, freezing,\n par_fit_stat, lum_low_lims, lum_upp_lims, lum_conf, z, False, \"{}\",\n \"{}\", \"{}\", \"{}\", True)\n\n # If the fit has already been performed we do not wish to perform it again\n try:\n res = source.get_results(out_rad_vals[src_ind], model, inn_rad_vals[src_ind], None, group_spec, min_counts,\n min_sn, over_sample)\n except ModelNotAssociatedError:\n script_paths.append(script_file)\n outfile_paths.append(out_file)\n src_inds.append(src_ind)\n\n run_type = \"fit\"\n return script_paths, outfile_paths, num_cores, run_type, src_inds, None, timeout",
"def fit_powerlaw_plateau(x, y):\n\n M_guess = np.max(y)\n #a_guess = (-np.log(1 - (y / M_guess)) / np.log(x)).mean()\n a_guess = 1\n\n opt, cov = curve_fit(powerlaw_plateau, x, y, p0=(M_guess, a_guess))\n\n return opt, cov",
"def bootstrap_spectral(data, fs, nperseg, fwin, nit=1000, ci=95,\n trim=0.2, calc_coherence=True):\n print(r\"Spectral estimates from {:.1f} to {:.1f} Hz\".format(*fwin))\n nchans = data.shape[0]\n # get the indices for the confidence intervals\n ci_idx = np.array([\n int((0.5 - ci/200.)*(nit-1)), # lower CI\n (nit-1)//2, # mean\n int(np.ceil((0.5 + ci/200.)*(nit-1))) # upper CI\n ])\n # get the frequencies\n f = np.fft.rfftfreq(nperseg, d=1./fs)\n f_keep = np.all([\n f >= fwin[0],\n f <= fwin[1]],\n axis = 0)\n print('Number of Fourier coefficients: %d' % f_keep.sum())\n f = f[f_keep]\n psd_segs = scipy.signal.spectral._spectral_helper(data, data, axis=-1,\n nperseg = nperseg, fs=fs, mode='psd',\n scaling='density')[2][:,f_keep,:]\n # get the indices with replacement of the array for the bootstrap\n bootstrap_indices = np.random.random_integers(\n low = 0, high = psd_segs.shape[-1] - 1,\n size = (nit, psd_segs.shape[-1]))\n # perform the bootstrap for the psd\n psd_bootstrap = np.array(\n [scipy.stats.trim_mean(psd_segs[...,idx], trim, axis=-1)\n for idx in bootstrap_indices])\n if calc_coherence:\n # perform the bootstrap for coh and icoh\n coh = []\n icoh = []\n phs = []\n for i in range(nchans):\n for j in range(i):\n print('Channel %d vs. %d.' % (i + 1, j + 1))\n csd_segs = scipy.signal.spectral._spectral_helper(\n data[i], data[j], axis=-1, nperseg = nperseg, fs=fs,\n mode='psd', scaling='density')[2][f_keep]\n # perform the bootstrap\n csd_bootstrap = np.array([\n (scipy.stats.trim_mean(\n np.real(csd_segs[...,idx]), trim, axis=-1) + \n 1j*scipy.stats.trim_mean(\n np.imag(csd_segs[...,idx]), trim, axis=-1))\n for idx in bootstrap_indices])\n # get the phase spectrum confidence intervals\n phs.append(np.sort(np.angle(csd_bootstrap,\n deg=True), axis=0)[ci_idx])\n # normalize the csd bootstrap with the product of the psds\n # for the coherence estimates\n csd_bootstrap /= np.sqrt(psd_bootstrap[:,i]*psd_bootstrap[:,j])\n # get the confidence interval for coherence and icoh\n coh.append(np.sort(np.abs(csd_bootstrap), axis=0)[ci_idx])\n icoh.append(np.sort(np.imag(csd_bootstrap), axis=0)[ci_idx])\n # get the CI of the psd\n psd = np.swapaxes(np.sort(psd_bootstrap, axis=0)[ci_idx], 0, 1)\n if calc_coherence:\n return f, psd, np.array(coh), np.array(icoh), np.array(phs)\n else:\n return f, psd",
"def bootstrap_spectral2(data1, data2, fs, nperseg, fwin, nit=1000, ci=95,\n trim=0.2, calc_coherence=True):\n print(\"Spectral estimates from %.1f to %.1f Hz\" % (fwin[0], fwin[1]))\n nchans1 = data1.shape[0]\n nchans2 = data2.shape[0]\n # get the indices for the confidence intervals\n ci_idx = np.array([\n int((0.5 - ci/200.)*(nit-1)), # lower CI\n (nit-1)//2, # mean\n int(np.ceil((0.5 + ci/200.)*(nit-1))) # upper CI\n ])\n # get the frequencies\n f = np.fft.rfftfreq(nperseg, d=1./fs)\n f_keep = np.all([\n f >= fwin[0],\n f <= fwin[1]],\n axis = 0)\n print('Number of Fourier coefficients: %d' % f_keep.sum())\n f = f[f_keep]\n psd_segs1 = scipy.signal.spectral._spectral_helper(data1, data1, axis=-1,\n nperseg = nperseg, fs=fs, mode='psd',\n scaling='density')[2][:,f_keep,:]\n psd_segs2 = scipy.signal.spectral._spectral_helper(data2, data2,\n axis=-1, nperseg = nperseg, fs=fs, mode='psd',\n scaling='density')[2][:,f_keep,:]\n # get the indices with replacement of the array for the bootstrap\n bootstrap_indices = np.random.random_integers(\n low = 0, high = psd_segs1.shape[-1] - 1,\n size = (nit, psd_segs1.shape[-1]))\n # perform the bootstrap for the psd\n psd_bootstrap1 = np.array(\n [scipy.stats.trim_mean(psd_segs1[...,idx], trim, axis=-1)\n for idx in bootstrap_indices])\n psd_bootstrap2 = np.array(\n [scipy.stats.trim_mean(psd_segs2[...,idx], trim, axis=-1)\n for idx in bootstrap_indices])\n if calc_coherence:\n # perform the bootstrap for coh and icoh\n coh = []\n icoh = []\n phs = []\n for i in range(nchans1):\n for j in range(nchans2):\n print('Channel %d vs. %d.' % (i + 1, j + 1))\n csd_segs = scipy.signal.spectral._spectral_helper(\n data1[i], data2[j], axis=-1, nperseg = nperseg, fs=fs,\n mode='psd', scaling='density')[2][f_keep]\n # perform the bootstrap\n csd_bootstrap = np.array([\n (scipy.stats.trim_mean(\n np.real(csd_segs[...,idx]), trim, axis=-1) + \n 1j*scipy.stats.trim_mean(\n np.imag(csd_segs[...,idx]), trim, axis=-1))\n for idx in bootstrap_indices])\n # get the phase spectrum confidence intervals\n phs.append(np.sort(np.angle(csd_bootstrap,\n deg=True), axis=0)[ci_idx])\n # normalize the csd bootstrap with the product of the psds\n # for the coherence estimates\n csd_bootstrap /= (\n np.sqrt(psd_bootstrap1[:,i]*psd_bootstrap2[:,j]))\n # get the confidence interval for coherence and icoh\n coh.append(np.sort(np.abs(csd_bootstrap), axis=0)[ci_idx])\n icoh.append(np.sort(np.imag(csd_bootstrap), axis=0)[ci_idx])\n # get the CI of the psd\n psd1 = np.swapaxes(np.sort(psd_bootstrap1, axis=0)[ci_idx], 0, 1)\n psd2 = np.swapaxes(np.sort(psd_bootstrap2, axis=0)[ci_idx], 0, 1)\n if calc_coherence:\n return f, psd1, psd2, np.array(coh), np.array(icoh), np.array(phs)\n else:\n return f, psd1, psd2",
"def psf_builder(data, masks, shifts, fit_parms, fit_vars, parms):\n xs = parms.patch_grid[0].ravel()[None, :] + shifts[:, 0, None]\n ys = parms.patch_grid[1].ravel()[None, :] + shifts[:, 1, None]\n\n bkgs = make_background(data, fit_parms, parms.background)\n scaled = np.abs(data - bkgs) / fit_parms[:, 0][:, None]\n assert parms.background == 'constant', 'need to generalize for diff bkgs'\n scaled_vars = (scaled / fit_parms[:, 0][:, None]) ** 2. * \\\n fit_vars[:, 0, None]\n scaled_vars += (1. / fit_parms[:, 0][:, None]) ** 2. * fit_vars[:, -1, None]\n\n ivars = np.zeros(masks[masks == True].size)\n values = np.zeros(masks[masks == True].size)\n masked_xs = np.zeros(masks[masks == True].size)\n masked_ys = np.zeros(masks[masks == True].size)\n ind = 0\n for i in range(data.shape[0]):\n chunk = masks[i][masks[i] == 1].size\n values[ind: ind + chunk] = scaled[i][masks[i]]\n ivars[ind: ind + chunk] = 1. / scaled_vars[i][masks[i]]\n masked_xs[ind: ind + chunk] = xs[i][masks[i]]\n masked_ys[ind: ind + chunk] = ys[i][masks[i]]\n ind += chunk\n\n return binned_model(masked_xs, masked_ys, values, ivars, parms.patch_shape,\n parms.psf_model_shape)\n #return kernel_model(values, masked_xs, masked_ys, parms)",
"def _run_fit(self, use_lc, prot_lims=[0.1,70]):\n\n if use_lc==\"raw\":\n logging.debug(\"fitting raw lc\")\n tt, ff, uu = self.time, self.flux, self.unc_flux\n elif (use_lc==\"detrended\") or (use_lc==\"det\"):\n logging.debug(\"fitting detrended lc\")\n tt, ff, uu = self.time, self.det_flux, self.det_unc\n else:\n logging.debug(\"fitting other lc\")\n tt, ff, uu = use_lc\n\n # Test the periodogram and pick the best period and power\n ls_out = prot.run_ls(tt, ff, uu, threshold=self.power_threshold,\n prot_lims=prot_lims, run_bootstrap=True)\n# fund_prot, fund_power, periods_to_test, periodogram = ls_out[:4]\n\n return ls_out",
"def cross_cov_matrix_deriv(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array(\n [params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n if self.cpsd_model is None:\n cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi\n in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],\n axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)\n\n if self.lag_model is None:\n lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid\n in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],\n axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)\n\n # this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)\n # and (2) the phases\n return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)",
"def run_fit(spec, ftype=0):\n\n if ftype <= 3: # All fits between 2-7 keV\n\n spec.ignore('**-2.0, 7.0-**') # Tail only for spectral variation!\n\n model = xs.Model('phabs*powerlaw')\n model.phabs.nH = 0.7\n model.phabs.nH.frozen = True\n model.powerlaw.PhoIndex = 1\n model.powerlaw.norm = 1\n xs.Fit.perform() # First round fit to phabs*po alone, regardless of ftype\n\n if ftype == 1: # Excise S line\n\n spec.ignore('2.3-2.6')\n xs.Fit.perform()\n spec.notice('2.3-2.6')\n\n elif ftype == 2: # Excise S and Ar lines\n\n spec.ignore('2.3-2.6, 3.0-3.2')\n xs.Fit.perform()\n spec.notice('2.3-2.6, 3.0-3.2')\n\n elif ftype == 3: # Fit S line\n\n # Get parameter values from current phabs*po model\n plist = [p.values[0] for p in [model(1), model(2), model(3)]]\n plist.extend([2.45, 2e-2, 5e-7]) # LineE, Sigma, norm\n\n # Make new model w/ Si line, restore old parameters and add guesses\n model = xs.Model('phabs*(powerlaw + gaussian)')\n model.setPars(*plist)\n model.phabs.nH.frozen=True # Must reissue freeze command\n\n model.gaussian.Sigma.frozen=True\n model.gaussian.LineE.frozen=True\n xs.Fit.perform()\n\n model.gaussian.Sigma.frozen=False\n xs.Fit.perform()\n model.gaussian.LineE.frozen=False\n xs.Fit.perform()\n\n elif ftype == 4: # All fits between 2.6-7 keV\n\n spec.ignore('**-2.6, 7.0-**')\n\n model = xs.Model('phabs*powerlaw')\n model.phabs.nH = 0.7\n model.phabs.nH.frozen = True\n model.powerlaw.PhoIndex = 1\n model.powerlaw.norm = 1\n xs.Fit.perform() # First round fit to phabs*po alone, regardless of ftype\n\n else:\n raise Exception('Invalid fit type specified (got {})'.format(ftype))\n\n return model",
"def fitting(fitfunc, X, Y, start_parm, correlated=True, verbose=True):\n errfunc = lambda p, x, y, error: np.dot(error, (y-fitfunc(p,x)).T)\n \n # compute inverse, cholesky decomposed covariance matrix\n if not correlated:\n cov = np.diag(np.diagonal(np.cov(Y.T)))\n else:\n cov = np.cov(Y.T)\n cov = (np.linalg.cholesky(np.linalg.inv(cov))).T\n\n # degrees of freedom\n dof = float(Y.shape[1]-len(start_parm)) \n # create results arrays\n res = np.zeros((Y.shape[0], len(start_parm)))\n res_cov = np.zeros((len(start_parm), len(start_parm)))\n chisquare = np.zeros(Y.shape[0])\n # The FIT to the boostrap samples\n for b in range(0, Y.shape[0]):\n p,cov1,infodict,mesg,ier = leastsq(errfunc, start_parm, \n args=(X, Y[b,:], cov), full_output=1, factor=0.1)\n chisquare[b] = float(sum(infodict['fvec']**2.))\n res[b] = np.array(p)\n if b==0:\n # print(cov1)\n res_cov = cov1*chisquare[b]/dof\n # print(res_cov)\t\n # calculate mean and standard deviation\n res_mean, res_std = af.calc_error(res)\n # chi2 = np.median(chisquare)\n # p-value calculated\n pvals_originfit = 1. - scipy.stats.chi2.cdf(chisquare[0], dof)\n \n # The fit to the mean value\n y = np.mean(Y, axis=0)\n p,cov1,infodict,mesg,ier = leastsq(errfunc, start_parm, \\\n args=(X, y, cov), full_output=1)\n chisquare_meanfit = float(sum(infodict['fvec']**2.))\n pvals_meanfit = 1. - scipy.stats.chi2.cdf(chisquare_meanfit, dof)\n # writing results to screen\n if verbose:\n if correlated:\n print(\"fit results for a correlated fit:\")\n else:\n print(\"fit results for an uncorrelated fit:\")\n print(\"degrees of freedom: %f\\n\" % dof)\n \n print(\"bootstrap fit:\")\n for rm, rs in zip(res_mean, res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n #print(\"Chi^2/dof: %.6e +/- %.6e\\n\" % (chi2/dof, np.std(chisquare)/dof))\n\n print(\"mean value fit:\")\n for rm, rs in zip(p, res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n print(\" Chi^2/dof: %.6e \" % (chisquare_meanfit / dof))\n print(\" p-value: %lf\" % pvals_meanfit) \n\n print(\"original data fit:\")\n for rm, rs in zip(res[0], res_std):\n print(\" %.6e +/- %.6e\" % (rm, rs))\n print(\" Chi^2/dof: %.6e \" % (chisquare[0]/dof))\n print(\" p-value: %lf\" % pvals_originfit) \n return res, res_cov.flatten(), chisquare[0]/dof, pvals_originfit",
"def CatPower(X, factors, RMSE=1, alpha=0.05):\n\n varis = [1] + [len(x) - 1 for x in factors]\n if np.sum(varis) != X.shape[1]:\n raise Exception(\"Number of variables do not match\")\n n = X.shape[0]\n p = X.shape[1]-1\n nvar = len(varis)\n vec = []\n for i in np.arange(nvar):\n n_i = varis[i]\n vec.append( np.tile( [1,-1], int( np.ceil( n_i/2+1) ) )[0:n_i] )\n beta_A = np.concatenate( vec )\n XX = np.dot( np.transpose(X), X) \n try:\n XX_inv = np.linalg.inv(XX)\n except:\n pows = []\n for i in np.arange(1,nvar):\n pows.append(np.nan)\n return pows\n # skip intercept for the time being. To do: add power analysis for continuous parameters\n pows = []\n for i in np.arange(1,nvar):\n p_i = int( np.sum(varis[0:i]))\n n_i = varis[i]\n lambda_i = Linc(XX_inv, beta_A, i, n_i, varis, nvar, RMSE)\n fc_i = FDist.ppf(1-alpha, n_i, n-p-1)\n pow_i = 1-ncFDist.cdf(fc_i, n_i, n-p-1,lambda_i)\n pows.append(pow_i)\n return pows",
"def call_pls(chrom,xdata,factors,mask,data):\n scores = []\n \n for i in range(chrom.shape[0]):\n if _remdup(chrom[i]) == 0:\n #extract vars from xdata\n slice = scipy.take(xdata,chrom[i,:].tolist(),1)\n collate = 0\n for nF in range(mask.shape[1]):\n #split in to training and test\n try:\n pls_output = pls(slice,data['class'][:,0][:,nA],mask[:,nF].tolist(),factors)\n \n if min(pls_output['rmsec']) <= min(pls_output['rmsepc']):\n collate += pls_output['RMSEPC']\n else:\n collate += 10.0**5\n except:\n collate = 0\n \n if collate != 0:\n scores.append(collate/float(mask.shape[1]))\n else:\n scores.append(10.0**5)\n else:\n scores.append(10.0**5)\n \n return scipy.asarray(scores)[:,nA]",
"def _get_power_straps_for_hardmacros(self, layer_name: str, pitch: Decimal, width: Decimal, spacing: Decimal, offset: Decimal, bbox: Optional[List[Decimal]], nets: List[str]) -> None:\n check_abut = self.get_setting(\"par.power_straps_abutment\")\n\n fp_consts = self.get_placement_constraints()\n # Limit only to hardmacro type. Other types are not relevant.\n hardmacros = list(filter(lambda c: c.type == PlacementConstraintType.HardMacro, fp_consts))\n\n # Need to check against power obstructions\n obs = list(filter(lambda c: c.type == PlacementConstraintType.Obstruction, fp_consts))\n pwr_obs = list(filter(lambda c: c.obs_types is not None and ObstructionType.Power in c.obs_types, obs))\n\n # Get stackup information\n stackup = self.get_stackup()\n layer = stackup.get_metal(layer_name)\n dbu = stackup.grid_unit\n\n for macro in hardmacros:\n # Skip if master is not given\n if macro.master is None:\n continue\n elif self.get_setting(\"par.power_straps_abutment_macros\") is not None:\n if macro.master not in self.get_setting(\"par.power_straps_abutment_macros\"):\n continue\n # Skip if hardmacro is physical only\n if get_or_else(macro.create_physical, False):\n continue\n # Confine to {top_layer, top_layer + 1}, skip if not given\n if macro.top_layer is None:\n continue\n else:\n top_idx = stackup.get_metal(macro.top_layer).index\n if layer.index < top_idx or layer.index > top_idx + 1:\n continue\n\n # Skip and log error if macro falls outside bbox (TODO: support rectilinear bbox)\n oob = False\n orientation = get_or_else(macro.orientation, \"r0\").lower()\n if bbox is not None:\n # Check ll corner if width & height are given\n if macro.width is not None and macro.height is not None:\n # Width/height swap depending on rotation\n if orientation in [\"r90\", \"r270\"]:\n oob = macro.x + macro.height < bbox[0] or macro.y + macro.height < bbox[1]\n oob = macro.x + macro.width < bbox[0] or macro.y + macro.height < bbox[1]\n oob = macro.x > bbox[2] or macro.y > bbox[3]\n if oob:\n self.logger.error(f\"Hardmacro instance \\\"{macro.path}\\\" is not placed within the power strap bounding box for layer {layer.name}! Double check that you will supply power to it.\")\n continue\n\n # Log error if a power obstruction intersects with macro (no skip)\n check_layer_idx = top_idx + (not check_abut)\n layer_pwr_obs = list(filter(lambda o: o.layers is not None and layer_name in o.layers, pwr_obs))\n if layer.index == check_layer_idx and len(layer_pwr_obs) > 0 and macro.width is not None and macro.height is not None:\n m_ll_x = macro.x\n m_ll_y = macro.y\n m_ur_x = macro.x + macro.width\n m_ur_y = macro.y + macro.height\n # Width/height swap depending on rotation\n if orientation.lower() in [\"r90\", \"r270\"]:\n m_ur_x = macro.x + macro.height\n m_ur_y = macro.y + macro.width\n\n for po in layer_pwr_obs:\n o_ll_x = po.x\n o_ll_y = po.y\n o_ur_x = po.x + po.width\n o_ur_y = po.y + po.height\n # Check for any overlap\n if not(m_ur_x <= o_ll_x or o_ur_x <= m_ll_x or m_ur_y <= o_ll_y or o_ur_y <= m_ll_y):\n self.logger.error(f\"Hardmacro instance \\\"{macro.path}\\\" is partially/fully obstructed on layer {layer.name} by power obstruction \\\"{po.path}\\\"! Double check that you will supply power to it.\")\n\n # Translate offset to the macro's origin\n if layer.direction == RoutingDirection.Vertical:\n offset_trans = (offset - macro.x) % pitch\n elif layer.direction == RoutingDirection.Horizontal:\n offset_trans = (offset - macro.y) % pitch\n else: # redistribution not supported\n continue\n # If offset + width of group is larger than width/height, at least first strap in group can't abut\n last_edge = offset_trans + (len(nets) - 1) * (width + spacing) + width\n oob = False\n if macro.width is not None and macro.height is not None:\n if layer.direction == RoutingDirection.Vertical:\n oob = (orientation in [\"r90\", \"r270\"] and last_edge > macro.height) or last_edge > macro.width\n if layer.direction == RoutingDirection.Horizontal:\n oob = (orientation in [\"r90\", \"r270\"] and last_edge > macro.width) or last_edge > macro.height\n if oob and layer.index == check_layer_idx:\n if check_abut:\n self.logger.error(f\"Hardmacro instance \\\"{macro.path}\\\" is placed such that a full group of power straps on layer {layer.name} cannot abut it! Double check your macro placement/size vs. power strap group pitch.\")\n else:\n self.logger.error(f\"Hardmacro instance \\\"{macro.path}\\\" is placed such that a full group of power straps on layer {layer.name} cannot via down! Double check your macro placement/size vs. power strap group pitch.\")\n\n # Append instance info\n self._hardmacro_power_straps.append({\n \"master\": macro.master,\n \"top_layer\": macro.top_layer,\n \"path\": macro.path,\n \"orientation\": orientation,\n \"layer\": layer_name,\n \"direction\": layer.direction,\n \"net_order\": nets,\n \"width\": int(width / dbu),\n \"spacing\": int(spacing / dbu),\n \"group_pitch\": int(pitch / dbu),\n \"offset\": int(offset_trans / dbu)\n })",
"def psd_1(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n fql = np.logspace(np.log10(1./(dt*n)), np.log10(0.5*dt), 6)\n\n \n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100)\n\n fit_log_psd(fql, lc, extra, '1')",
"def _fit_coefficients(fluxes, flux_uncertainties, scatter, lv_array,\n full_output=False):\n\n variance = flux_uncertainties**2 + scatter**2\n CiA = lv_array.T * np.tile(1./variance, (lv_array.shape[0], 1)).T\n ATCiAinv = np.linalg.inv(np.dot(lv_array, CiA))\n\n ATY = np.dot(lv_array, fluxes/variance)\n coefficients = np.dot(ATCiAinv, ATY)\n\n return (coefficients, ATCiAinv, variance)",
"def bootstrap(fit_func, xdata, ydata, iterations=100):\n opt_parameters, _ = curve_fit(fit_func, xdata, ydata)\n # NOTE: Do not use [[]]*len(...) since this creates three references to the same []\n samples = [[] for i in range(len(opt_parameters))]\n\n new_xdata = [0 for i in range(len(xdata))]\n new_ydata = [0 for i in range(len(ydata))]\n\n for _ in range(iterations):\n # Resample\n for i in range(len(ydata)):\n index_choice = random.randrange(0, len(xdata))\n new_xdata[i] = xdata[index_choice]\n new_ydata[i] = ydata[index_choice]\n\n # Curve fit and store the samples\n sampled_pars, _spcov = curve_fit(fit_func, new_xdata, new_ydata)\n\n for i, sp in enumerate(sampled_pars):\n samples[i].append(sp)\n\n return {np.average(sample): np.std(sample) for sample in samples}",
"def _calculate_fit_coeffs(data, params, fit_binning, nu_params=None,\n mu_params=None):\n logging.debug('Calculating fit coefficients')\n\n config = from_file(params['discr_sys_sample_config'].value)\n\n degree = int(params['poly_degree'].value)\n force_through_nominal = params['force_through_nominal'].value\n\n if force_through_nominal:\n def fit_func(vals, *poly_coeffs):\n return np.polynomial.polynomial.polyval(\n vals, [1.] + list(poly_coeffs)\n )\n else:\n def fit_func(vals, *poly_coeffs):\n return np.polynomial.polynomial.polyval(\n vals, list(poly_coeffs)\n )\n # add free param for constant term\n degree += 1\n\n template_maker = Pipeline(params['pipeline_config'].value)\n dataset_param = template_maker.params['dataset']\n\n def parse(string):\n return string.replace(' ', '').split(',')\n\n sys_fit_coeffs = OrderedDict()\n if nu_params is not None:\n sys_list = parse(config.get('neutrinos', 'sys_list'))\n nu_params = deepcopy(map(lambda x: x[3:], nu_params))\n\n if set(nu_params) != set(sys_list):\n raise AssertionError(\n 'Systematics list listed in the sample config file does '\n 'not match the params in the pipeline config file\\n {0} '\n '!= {1}'.format(set(nu_params), set(sys_list))\n )\n\n for sys in sys_list:\n ev_sys = 'neutrinos|' + sys\n runs = parse(config.get(ev_sys, 'runs')[1: -1])\n nominal = config.get(ev_sys, 'nominal')\n\n mapset_dict = OrderedDict()\n flavint_groups = None\n for run in runs:\n logging.info('Loading run {0} of systematic '\n '{1}'.format(run, sys))\n dataset_param.value = ev_sys + '|' + run\n template_maker.update_params(dataset_param)\n template = template_maker.get_outputs(\n idx=int(params['stop_after_stage'].m)\n )\n if not isinstance(template, Data):\n raise AssertionError(\n 'Template output is not a Data object, instead is '\n 'type {0}'.format(type(template))\n )\n if flavint_groups is None:\n flavint_groups = template.flavint_groups\n else:\n if set(flavint_groups) != set(template.flavint_groups):\n raise AssertionError(\n 'Mismatch of flavint_groups - ({0}) does not '\n 'match flavint_groups '\n '({1})'.format(flavint_groups,\n template.flavint_groups)\n )\n\n outputs = []\n for fig in template.keys():\n outputs.append(template.histogram(\n kinds = fig,\n binning = fit_binning,\n weights_col = 'pisa_weight',\n errors = False,\n name = str(NuFlavIntGroup(fig))\n ))\n mapset_dict[run] = MapSet(outputs, name=run)\n\n nom_mapset = mapset_dict[nominal]\n fracdiff_mapset_dict = OrderedDict()\n for run in runs:\n mapset = []\n for flavintg_map in mapset_dict[run]:\n # TODO(shivesh): error propagation?\n flavintg = flavintg_map.name\n mask = ~(nom_mapset[flavintg].hist == 0.)\n div = np.zeros(flavintg_map.shape)\n with np.errstate(divide='ignore', invalid='ignore'):\n div[mask] = \\\n unp.nominal_values(flavintg_map.hist[mask]) /\\\n unp.nominal_values(nom_mapset[flavintg].hist[mask])\n mapset.append(Map(\n name=flavintg, binning=flavintg_map.binning,\n hist=div\n ))\n fracdiff_mapset_dict[run] = MapSet(mapset)\n\n delta_runs = np.array([float(x) for x in runs])-float(nominal)\n\n coeff_binning = OneDimBinning(\n name='coeff', num_bins=degree, is_lin=True, domain=[-1, 1]\n )\n combined_binning = fit_binning + coeff_binning\n\n params_mapset = []\n for fig in template.keys():\n # TODO(shivesh): Fix numpy warning on this line\n pvals_hist = np.empty(map(int, combined_binning.shape),\n dtype=object)\n hists = [fracdiff_mapset_dict[run][fig].hist for run in runs]\n zip_hists = np.dstack(hists)\n for idx in np.ndindex(fit_binning.shape):\n y_values = []\n y_sigma = []\n for run in fracdiff_mapset_dict:\n y_values.append(unp.nominal_values(fracdiff_mapset_dict[run][fig].hist[idx]))\n y_sigma.append(unp.std_devs(fracdiff_mapset_dict[run][fig].hist[idx]))\n\n if np.any(y_sigma):\n popt, pcov = curve_fit(\n fit_func, delta_runs, y_values, sigma=y_sigma,\n p0=np.ones(degree)\n )\n else:\n popt, pcov = curve_fit(\n fit_func, delta_runs, y_values,\n p0=np.ones(degree)\n )\n # perr = np.sqrt(np.diag(pcov))\n # pvals = unp.uarray(popt, perr)\n pvals_hist[idx] = popt\n pvals_hist = np.array(pvals_hist.tolist())\n params_mapset.append(Map(\n name=fig, binning=combined_binning, hist=pvals_hist\n ))\n params_mapset = MapSet(params_mapset, name=sys)\n\n if sys in sys_fit_coeffs:\n sys_fit_coeffs[sys] = MapSet(\n [sys_fit_coeffs[sys], params_mapset]\n )\n else:\n sys_fit_coeffs[sys] = params_mapset\n\n if mu_params is not None:\n sys_list = parse(config.get('muons', 'sys_list'))\n mu_params = deepcopy(map(lambda x: x[3:], mu_params))\n\n if set(mu_params) != set(sys_list):\n raise AssertionError(\n 'Systematics list listed in the sample config file does '\n 'not match the params in the pipeline config file\\n {0} '\n '!= {1}'.format(set(mu_params), set(sys_list))\n )\n\n for sys in sys_list:\n ev_sys = 'muons|' + sys\n runs = parse(config.get(ev_sys, 'runs')[1: -1])\n nominal = config.get(ev_sys, 'nominal')\n\n map_dict = OrderedDict()\n flavint_groups = None\n for run in runs:\n logging.info('Loading run {0} of systematic '\n '{1}'.format(run, sys))\n dataset_param.value = ev_sys + '|' + run\n template_maker.update_params(dataset_param)\n template = template_maker.get_outputs(\n idx=int(params['stop_after_stage'].m)\n )\n if not isinstance(template, Data):\n raise AssertionError(\n 'Template output is not a Data object, instead is '\n 'type {0}'.format(type(template))\n )\n if not template.contains_muons:\n raise AssertionError(\n 'Template output does not contain muons'\n )\n\n output = template.histogram(\n kinds = 'muons',\n binning = fit_binning,\n # NOTE: weights cancel in fraction\n weights_col = None,\n errors = False,\n name = 'muons'\n )\n map_dict[run] = output\n\n nom_map = map_dict[nominal]\n fracdiff_map_dict = OrderedDict()\n for run in runs:\n mask = ~(nom_map.hist == 0.)\n div = np.zeros(nom_map.shape)\n with np.errstate(divide='ignore', invalid='ignore'):\n div[mask] = \\\n unp.nominal_values(map_dict[run].hist[mask]) /\\\n unp.nominal_values(nom_map.hist[mask])\n fracdiff_map_dict[run] = Map(\n name='muons', binning = nom_map.binning, hist=div\n )\n\n delta_runs = np.array([float(x) for x in runs])-float(nominal)\n\n coeff_binning = OneDimBinning(\n name='coeff', num_bins=degree, is_lin=True, domain=[-1, 1]\n )\n combined_binning = fit_binning + coeff_binning\n\n pvals_hist = np.empty(map(int, combined_binning.shape),\n dtype=object)\n hists = [fracdiff_map_dict[run].hist for run in runs]\n zip_hists = np.dstack(hists)\n for idx in np.ndindex(fit_binning.shape):\n y_values = [] \n y_sigma = []\n for run in fracdiff_mapset_dict:\n y_values.append(unp.nominal_values(fracdiff_mapset_dict[run][fig].hist[idx]))\n y_sigma.append(unp.std_devs(fracdiff_mapset_dict[run][fig].hist[idx]))\n if np.any(y_sigma):\n popt, pcov = curve_fit(\n fit_func, delta_runs, y_values, sigma=y_sigma,\n p0=np.ones(degree)\n )\n else:\n popt, pcov = curve_fit(\n fit_func, delta_runs, y_values,\n p0=np.ones(degree)\n )\n # perr = np.sqrt(np.diag(pcov))\n # pvals = unp.uarray(popt, perr)\n pvals_hist[idx] = popt\n pvals_hist = np.array(pvals_hist.tolist())\n params_map = Map(\n name='muons', binning=combined_binning, hist=pvals_hist\n )\n if sys in sys_fit_coeffs:\n sys_fit_coeffs[sys] = MapSet(\n [sys_fit_coeffs[sys], params_map]\n )\n else:\n sys_fit_coeffs[sys] = params_map\n\n return sys_fit_coeffs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
cc = pylag.mlfit.MLCrossSpectrum.cross_cov_matrix(params) Calculate the cross component of the covariance matrix for the model cross spectrum (i.e. the upper right and lower left quadrants of the matrix for the terms that use rates from both light curves).
|
def cross_cov_matrix(self, params):
# if no model is specified, the PSD model is just the PSD value in each frequency bin
if self.cpsd_model is None:
cpsd = np.exp(np.array([params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm
else:
cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm
# likewise for the (phase) lags
if self.lag_model is None:
lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])
else:
lags = self.lag_model(params, self.fbins.bin_cent)
cov = np.sum(np.array([p * (c * np.cos(phi) - s * np.sin(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)]), axis=0)
return cov
|
[
"def cov_matrix(self, params):\n if self.freeze_psd:\n if self.ac1 is None or self.ac2 is None:\n raise AssertionError(\"Autocovariance matrices are not available. Did you fit the PSDs?\")\n ac1 = self.ac1\n ac2 = self.ac2\n else:\n ac1 = self.mlpsd1.cov_matrix(params)\n ac2 = self.mlpsd2.cov_matrix(params)\n\n cc = self.cross_cov_matrix(params)\n\n return np.vstack([np.hstack([ac1, cc.T]), np.hstack([cc, ac2])])",
"def cov_matrix_deriv(self, params):\n cc = self.cross_cov_matrix_deriv(params)\n\n if self.freeze_psd:\n Z = np.zeros_like(self.ac1)\n return np.stack(\n [np.vstack([np.hstack([Z, cc[..., p].T]), np.hstack([cc[..., p], Z])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n else:\n ac1 = self.mlpsd1.cov_matrix_deriv(params)\n ac2 = self.mlpsd2.cov_matrix_deriv(params)\n return np.stack(\n [np.vstack([np.hstack([ac1[..., p], cc[..., p].T]), np.hstack([cc[..., p], ac2[..., p]])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n return np.stack([np.vstack([np.hstack([ac1[...,p], cc[...,p].T]), np.hstack([cc[...,p], ac2[...,p]])]) for p in range(len(self.params))], axis=-1)",
"def cross_cov_matrix_deriv(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array(\n [params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n if self.cpsd_model is None:\n cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi\n in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],\n axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)\n\n if self.lag_model is None:\n lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid\n in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],\n axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)\n\n # this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)\n # and (2) the phases\n return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)",
"def cov_matrix_deriv(self, params):\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n\n # in this simple case, the covariance matrix is just a linear sum of each frequency term\n # so the derivative is simple - we multiply by p when we're talking about the log\n return np.stack([c * p for c, p in zip(self.cos_integral, psd)], axis=-1)\n else:\n psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n return np.stack([np.sum([c * p for c, p in zip(self.cos_integral, psd_deriv[:, par])], axis=0) for par in range(psd_deriv.shape[-1])], axis=-1)",
"def cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n # note the factor of 2 to integrate over the negative frequencies too!\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n else:\n psd = self.model(params, self.fbins.bin_cent) * self.psdnorm\n\n cov = np.sum(np.array([p * c for p, c in zip(psd, self.cos_integral)]), axis=0)\n\n return cov",
"def cross_product_matrix(Y):\n cpm = np.asarray([ [0, -Y[2],Y[1]],\\\n [Y[2], 0, -Y[0]],\\\n [-Y[1],Y[0], 0]])\n return cpm",
"def compute_covariance_matrix(X):\n return np.cov(X, rowvar=0)",
"def cal_cov_matrix(training_data):\n\t# cov_matrix = np.transpose(training_data).dot(training_data)/(training_data.shape[0] - 1)\n\tcov_matrix = training_data.T.dot(training_data)\n\t# cal cov_matrix by numpy\n\t# cov_matrix = np.cov(training_data, rowvar=False, bias=True)\n\tprint('cov_matrix shape ::: ', cov_matrix.shape)\n\t\"\"\" cal eig vector and value \"\"\"\n\teig_val, eig_vec = np.linalg.eig(cov_matrix)\n\t# print('val :::', eig_val)\n\t# print('sorted val :::', np.sort(eig_val))\n\t\"\"\" return the largest max_index eignvalues \"\"\"\n\tsort_index = np.argsort(-eig_val)\n\teig_val = sorted(eig_val, reverse=True)\n\t# eig_val = np.sort(-eig_val)\n\treturn sort_index, eig_val, eig_vec",
"def cross_covariance(\n self, kernel: Kernel, x: Float[Array, \"N D\"], y: Float[Array, \"M D\"]\n ) -> Float[Array, \"N M\"]:\n # TODO: This is currently a dense implementation. We should implement a sparse LinearOperator for non-square cross-covariance matrices.\n cross_cov = vmap(lambda x: vmap(lambda y: kernel(x, y))(y))(x)\n return cross_cov",
"def get_cross_cov(self, pose1, pose2, robot_idx, odom):\n if not odom:\n return np.zeros((6, 6))\n\n if robot_idx == 'a':\n single_graph = self.gtsam_graph1\n elif robot_idx == 'b':\n single_graph = self.gtsam_graph2\n assert pose1.j == pose2.i == 'w'\n inversed_cross_cov = single_graph.cross_cov(pose1.i, pose2.j)\n return inversed_cross_cov",
"def covariance_matrices(self):\n return [x.covariance_matrix for x in self.random_effects]",
"def calculate_covariance_matrix(X, Y=None):\n if Y is None:\n Y = X \n n_samples = np.shape(X)[0]\n covar_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n return np.array(covar_matrix, dtype=float)",
"def covarMatrix(x):\n return np.matrix(x - np.mean(x, axis=0)[np.newaxis, :]).T * np.matrix(x - np.mean(x, axis=0)[np.newaxis, :])",
"def constant_cov(x,y,c):\n return c*np.ones(x.shape[0])",
"def cross_correlation(self) -> float:\n return self.moment(1, 1)",
"def get_covariance(self):\n log.info(\"Calculating covariance matrix (this may take a while...)\")\n return int_nf.get_covariance(\n frame_data=self.frames.data,\n frame_valid=self.frames.valid,\n frame_weight=self.frames.relative_weight,\n channel_flags=self.channels.data.flag,\n channel_weight=self.channels.data.weight,\n sample_flags=self.frames.sample_flag,\n frame_flags=self.frames.flag,\n source_flags=self.flagspace.convert_flag('SOURCE_FLAGS').value)",
"def random_matrix_theory_based_cov(self, returns_matrix):\r\n\t\tfiltered_covariance_matrix = self.strategyHelperFunctions.random_matrix_theory_based_cov(returns_matrix)\r\n\t\treturn filtered_covariance_matrix",
"def covariance_matrix(self):\n self.covariance = np.dot(self.matrix, self.matrix.transpose())",
"def crossCorrelation(y1,y2,mode='same'):\n\tCC=_np.correlate(y1,y2,mode=mode)\n\treturn CC"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
c = pylag.mlfit.MLCrossSpectrum.cov_matrix(params) Calculate the cross spectral model covariance matrix for the specified parameter values. The cross spectral covariance matrix is the stack of the autocovariance and crosscovariance matrices, and is applicable to the stacked data vector. If the freeze_psd member variable is True, the stored autocovariance matrices will be used, otherwise they will be computed for the current model parameters.
|
def cov_matrix(self, params):
if self.freeze_psd:
if self.ac1 is None or self.ac2 is None:
raise AssertionError("Autocovariance matrices are not available. Did you fit the PSDs?")
ac1 = self.ac1
ac2 = self.ac2
else:
ac1 = self.mlpsd1.cov_matrix(params)
ac2 = self.mlpsd2.cov_matrix(params)
cc = self.cross_cov_matrix(params)
return np.vstack([np.hstack([ac1, cc.T]), np.hstack([cc, ac2])])
|
[
"def cross_cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array([params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n cov = np.sum(np.array([p * (c * np.cos(phi) - s * np.sin(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)]), axis=0)\n return cov",
"def cov_matrix_deriv(self, params):\n cc = self.cross_cov_matrix_deriv(params)\n\n if self.freeze_psd:\n Z = np.zeros_like(self.ac1)\n return np.stack(\n [np.vstack([np.hstack([Z, cc[..., p].T]), np.hstack([cc[..., p], Z])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n else:\n ac1 = self.mlpsd1.cov_matrix_deriv(params)\n ac2 = self.mlpsd2.cov_matrix_deriv(params)\n return np.stack(\n [np.vstack([np.hstack([ac1[..., p], cc[..., p].T]), np.hstack([cc[..., p], ac2[..., p]])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n return np.stack([np.vstack([np.hstack([ac1[...,p], cc[...,p].T]), np.hstack([cc[...,p], ac2[...,p]])]) for p in range(len(self.params))], axis=-1)",
"def cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n # note the factor of 2 to integrate over the negative frequencies too!\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n else:\n psd = self.model(params, self.fbins.bin_cent) * self.psdnorm\n\n cov = np.sum(np.array([p * c for p, c in zip(psd, self.cos_integral)]), axis=0)\n\n return cov",
"def cross_cov_matrix_deriv(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array(\n [params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n if self.cpsd_model is None:\n cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi\n in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],\n axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)\n\n if self.lag_model is None:\n lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid\n in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],\n axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)\n\n # this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)\n # and (2) the phases\n return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)",
"def cov_matrix_deriv(self, params):\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n\n # in this simple case, the covariance matrix is just a linear sum of each frequency term\n # so the derivative is simple - we multiply by p when we're talking about the log\n return np.stack([c * p for c, p in zip(self.cos_integral, psd)], axis=-1)\n else:\n psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n return np.stack([np.sum([c * p for c, p in zip(self.cos_integral, psd_deriv[:, par])], axis=0) for par in range(psd_deriv.shape[-1])], axis=-1)",
"def compute_covariance_matrix(X):\n return np.cov(X, rowvar=0)",
"def get_covariance(self):\n log.info(\"Calculating covariance matrix (this may take a while...)\")\n return int_nf.get_covariance(\n frame_data=self.frames.data,\n frame_valid=self.frames.valid,\n frame_weight=self.frames.relative_weight,\n channel_flags=self.channels.data.flag,\n channel_weight=self.channels.data.weight,\n sample_flags=self.frames.sample_flag,\n frame_flags=self.frames.flag,\n source_flags=self.flagspace.convert_flag('SOURCE_FLAGS').value)",
"def random_matrix_theory_based_cov(self, returns_matrix):\r\n\t\tfiltered_covariance_matrix = self.strategyHelperFunctions.random_matrix_theory_based_cov(returns_matrix)\r\n\t\treturn filtered_covariance_matrix",
"def covariance_matrix(self):\n self.covariance = np.dot(self.matrix, self.matrix.transpose())",
"def cal_cov_matrix(training_data):\n\t# cov_matrix = np.transpose(training_data).dot(training_data)/(training_data.shape[0] - 1)\n\tcov_matrix = training_data.T.dot(training_data)\n\t# cal cov_matrix by numpy\n\t# cov_matrix = np.cov(training_data, rowvar=False, bias=True)\n\tprint('cov_matrix shape ::: ', cov_matrix.shape)\n\t\"\"\" cal eig vector and value \"\"\"\n\teig_val, eig_vec = np.linalg.eig(cov_matrix)\n\t# print('val :::', eig_val)\n\t# print('sorted val :::', np.sort(eig_val))\n\t\"\"\" return the largest max_index eignvalues \"\"\"\n\tsort_index = np.argsort(-eig_val)\n\teig_val = sorted(eig_val, reverse=True)\n\t# eig_val = np.sort(-eig_val)\n\treturn sort_index, eig_val, eig_vec",
"def covariance_matrix_reconstruction(self):\n matrix_cov = np.zeros_like(self.cov_matrix)\n self.reconstruction_d_matrix()\n np.savetxt(os.path.join(os.getcwd(), 'd_matrix_test.dat'), self.d_matrix)\n np.savetxt(os.path.join(os.getcwd(), 'sigma_1_test.dat'), self.screen_data)\n dxinv = linalg.pinv2(self.d_matrix, cond=1e-8, rcond=1.e-8)\n np.savetxt(os.path.join(os.getcwd(), 'd_matrix_inv_test.dat'), dxinv)\n matrix_repr = np.dot(dxinv, self.screen_data)\n for ix in np.arange(self.cov_matrix.shape[0]):\n for iy in np.arange(self.cov_matrix.shape[1]):\n if ix == 0:\n value = deepcopy(matrix_repr[iy])\n elif ix == 1:\n if iy == 0:\n value = deepcopy(matrix_repr[1])\n elif iy == 1:\n value = deepcopy(matrix_repr[4])\n elif iy == 2:\n value = deepcopy(matrix_repr[5])\n else:\n value = deepcopy(matrix_repr[6])\n elif ix == 2:\n if iy == 0:\n value = deepcopy(matrix_repr[2])\n elif iy == 1:\n value = deepcopy(matrix_repr[5])\n elif iy == 2:\n value = deepcopy(matrix_repr[7])\n else:\n value = deepcopy(matrix_repr[8])\n else:\n if iy == 0:\n value = deepcopy(matrix_repr[3])\n elif iy == 1:\n value = deepcopy(matrix_repr[6])\n elif iy == 2:\n value = deepcopy(matrix_repr[8])\n else:\n value = deepcopy(matrix_repr[9])\n matrix_cov[ix, iy] = deepcopy(value)\n setattr(self, 'cov_matrix', matrix_cov)\n\n directory = os.path.join('C:\\\\', 'Users', 'qfi29231', 'Documents', 'spawn_emittances', 'Emittance_GUI',\n 'quad_scan_setup_0')\n np.savetxt(os.path.join(os.getcwd(), 'cov_matrix_test.dat'), self.cov_matrix)\n np.savetxt(os.path.join(os.getcwd(), 'cov_matrix_test_sdds.dat'),\n self.cov_matrix_from_simframe('CLA-S02-DIA-SCR-02', directory))\n np.savetxt(os.path.join(os.getcwd(), 'Cov_times_Smatrix.txt'), np.dot(self.cov_matrix, self.Smatrix))",
"def covariance_matrices(self):\n return [x.covariance_matrix for x in self.random_effects]",
"def Kcov(self,masses):\n from numpy.linalg import eigh\n from statistics import calc_cov\n #average and standar deviation for kinetic energy\n Kav=numpy.zeros(self.nat); Kdev=numpy.zeros(self.nat)\n #covariance of the kinetic energy\n Kcov=numpy.zeros(self.nat*self.nat).reshape(self.nat,self.nat)\n while self.loadframe():\n v2=(self.frame*self.frame).sum(axis=1);\n K=0.5*masses*v2; Kav+=K; Kdev+=K*K\n Kcov+=numpy.dot(K.reshape(self.nat,1),K.reshape(1,self.nat))\n results=calc_cov(Kav,Kdev,Kcov,self.nat,self.nframe)\n [evals,evecs]=eigh(results['cov']); #diagonalize\n perm=numpy.argsort(evals)[::-1] #sort from bigger to smaller\n evals=evals[perm]; evecs=evecs[:,perm]\n return {'Kav':results['av'], 'Kdev':results['dev'],\n 'Kcov':results['cov'], 'evals':evals, 'evecs':evecs}",
"def calc_monte_carlo_covariances(mats):\n\n # check input\n assert all([e.is_square() for e in mats])\n n = mats[0].n_rows()\n assert all([e.n_rows() == n for e in mats])\n\n # create an empty var-cov matrix\n covmat = flex.double(flex.grid(n**2,n**2), 0.0)\n\n for i in range(covmat.all()[0]):\n for j in range(covmat.all()[1]):\n a = [m[i] for m in mats]\n b = [m[j] for m in mats]\n covmat[i,j] = cov(a,b)\n\n return covmat",
"def get_unfolded_cov_matrix(self):\n return self._X_unfolded_covariance",
"def constant_cov(x,y,c):\n return c*np.ones(x.shape[0])",
"def get_cov(self, j):\n\t\tC = self.extern(self.sigma[j])\n\t\tif self.diag:\n\t\t\tC=diag(C)\n\t\treturn C",
"def calc_cov(self):\n cov = CovManipulation.correlation_to_cov(self.std_dev_df['s.d.(1)'].values, self.corr_df.values)\n cov_df = self.corr_df.copy()\n cov_df.loc[:, :] = cov\n\n return cov_df",
"def shared_covariance_model_fit(inputs, targets):\n if len(inputs.shape) == 1:\n inputs = inputs.rehape(inputs.size,1)\n N, D = inputs.shape\n inputs0 = inputs[targets==0,:]\n inputs1 = inputs[targets==1,:]\n N0 = inputs0.shape[0]\n N1 = inputs1.shape[0]\n pi = N1/N\n mean0, S0 = max_lik_mv_gaussian(inputs0)\n mean1, S1 = max_lik_mv_gaussian(inputs1)\n covmtx = (N0/N)*S0 + (N1/N)*S1\n return pi, mean0, mean1, covmtx"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
dc = pylag.mlfit.MLPSD.cross_cov_matrix_deriv(params) Calculate the first derivative of the cross components of the covariance matrix wrt the parameters
|
def cross_cov_matrix_deriv(self, params):
# if no model is specified, the PSD model is just the PSD value in each frequency bin
if self.cpsd_model is None:
cpsd = np.exp(np.array(
[params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm
else:
cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm
# likewise for the (phase) lags
if self.lag_model is None:
lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])
else:
lags = self.lag_model(params, self.fbins.bin_cent)
if self.cpsd_model is None:
cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)
else:
psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm
cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi
in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],
axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)
if self.lag_model is None:
lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)
else:
lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm
lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid
in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],
axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)
# this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)
# and (2) the phases
return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)
|
[
"def cov_matrix_deriv(self, params):\n cc = self.cross_cov_matrix_deriv(params)\n\n if self.freeze_psd:\n Z = np.zeros_like(self.ac1)\n return np.stack(\n [np.vstack([np.hstack([Z, cc[..., p].T]), np.hstack([cc[..., p], Z])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n else:\n ac1 = self.mlpsd1.cov_matrix_deriv(params)\n ac2 = self.mlpsd2.cov_matrix_deriv(params)\n return np.stack(\n [np.vstack([np.hstack([ac1[..., p], cc[..., p].T]), np.hstack([cc[..., p], ac2[..., p]])]) for p in\n range(len([p for p in params if params[p].vary]))], axis=-1)\n\n return np.stack([np.vstack([np.hstack([ac1[...,p], cc[...,p].T]), np.hstack([cc[...,p], ac2[...,p]])]) for p in range(len(self.params))], axis=-1)",
"def cov_matrix_deriv(self, params):\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n\n # in this simple case, the covariance matrix is just a linear sum of each frequency term\n # so the derivative is simple - we multiply by p when we're talking about the log\n return np.stack([c * p for c, p in zip(self.cos_integral, psd)], axis=-1)\n else:\n psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n return np.stack([np.sum([c * p for c, p in zip(self.cos_integral, psd_deriv[:, par])], axis=0) for par in range(psd_deriv.shape[-1])], axis=-1)",
"def cross_cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array([params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n cov = np.sum(np.array([p * (c * np.cos(phi) - s * np.sin(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)]), axis=0)\n return cov",
"def cov_matrix(self, params):\n if self.freeze_psd:\n if self.ac1 is None or self.ac2 is None:\n raise AssertionError(\"Autocovariance matrices are not available. Did you fit the PSDs?\")\n ac1 = self.ac1\n ac2 = self.ac2\n else:\n ac1 = self.mlpsd1.cov_matrix(params)\n ac2 = self.mlpsd2.cov_matrix(params)\n\n cc = self.cross_cov_matrix(params)\n\n return np.vstack([np.hstack([ac1, cc.T]), np.hstack([cc, ac2])])",
"def get_mle_covariance(self, x = None, ddof = 1):\n if is_none(x):\n x = self.x\n\n # small number to avoid singularities\n return np.cov(x, ddof = 1, rowvar = False) + 1e-6 * np.identity(x.shape[1])",
"def gradient_energy(params, X):\n\n # electronic densities\n n_edens = X[1].shape[-1]\n edens = X[1].dot(params[-n_edens:])\n \n with np.errstate(divide='ignore'):\n # (-1) is there for the constant parameter value\n tmp = np.nan_to_num(-1.0/(2.0*np.sqrt(edens)) + 2.0*params[0]*edens)\n\n grad = np.empty((X[0].shape[0], len(params)), dtype=float)\n grad[:, 1:-n_edens] = X[0] # pair\n grad[:, 0] = np.sum(edens**2, axis=1) # embed\n grad[:, -n_edens:] = np.sum(tmp[:, :, None]*X[1], axis=1) # edens\n\n return grad",
"def cross_derivative(expr, dims, fd_order, deriv_order, x0=None, **kwargs):\n x0 = x0 or {}\n for d, fd, dim in zip(deriv_order, fd_order, dims):\n expr = generic_derivative(expr, dim=dim, fd_order=fd, deriv_order=d, x0=x0,\n **kwargs)\n\n return expr",
"def build_loss(cross_cov_mats, d, lambda_param=10):\n\n N = cross_cov_mats.shape[1] #or cross_cov_mats.shape[2]\n def loss(V_flat):\n\n V = np.reshape(V_flat, (N, d))\n reg_val = ortho_reg_fn(V, lambda_param)\n return -calc_pi_from_cross_cov_mats(cross_cov_mats, V) + reg_val\n\n return loss",
"def _detector_derivatives(\n self, isel, panel_id, parameterisation=None, dd_ddet_p=None, reflections=None\n ):\n\n # Get required data\n pv = self._pv.select(isel)\n D = self._D.select(isel)\n\n if dd_ddet_p is None:\n\n # get the derivatives of detector d matrix for this panel\n dd_ddet_p = parameterisation.get_ds_dp(\n multi_state_elt=panel_id, use_none_as_null=True\n )\n\n # replace explicit null derivatives with None\n dd_ddet_p = [\n None if e is None else flex.mat3_double(len(D), e.elems)\n for e in dd_ddet_p\n ]\n\n # calculate the derivative of pv for this parameter\n dpv_ddet_p = [\n der if der is None else (D * (der * -1.0)) * pv for der in dd_ddet_p\n ]\n\n return dpv_ddet_p",
"def log_likelihood(self, params, eval_gradient=True):\n c = self.cov_matrix(params)\n\n # add white noise along the leading diagonal\n # this should be the Poisson noise term when calculating a PSD\n if self.noise is not None:\n c += np.diag(self.noise)\n\n try:\n L = cho_factor(c, lower=True, check_finite=False)[0]\n except np.linalg.LinAlgError:\n try:\n # try doubling the noise first\n L = cho_factor(c + np.diag(self.noise), lower=True, check_finite=False)[0]\n except np.linalg.LinAlgError:\n #printmsg(2, \"WARNING: Couldn't invert covariance matrix with parameters \" + param2array(params))\n return (-1e6, np.zeros(len([p for p in params if params[p].vary])) - 1e6) if eval_gradient else -1e6\n except ValueError:\n return (np.inf, np.zeros(len([p for p in params if params[p].vary]))) if eval_gradient else -np.inf\n\n alpha = cho_solve((L, True), self.data, check_finite=False)\n\n log_likelihood_dims = -0.5 * np.einsum(\"ik,ik->k\", self.data, alpha)\n log_likelihood_dims -= np.log(np.diag(L)).sum()\n log_likelihood_dims -= c.shape[0] / 2 * np.log(2 * np.pi)\n log_likelihood = log_likelihood_dims.sum(-1)\n\n if eval_gradient:\n c_gradient = self.cov_matrix_deriv(params)\n tmp = np.einsum(\"ik,jk->ijk\", alpha, alpha)\n tmp -= cho_solve((L, True), np.eye(c.shape[0]))[:, :, np.newaxis]\n gradient_dims = 0.5 * np.einsum(\"ijl,ijk->kl\", tmp, c_gradient)\n gradient = gradient_dims.sum(-1)\n\n # note we return -log_likelihood, so we can minimize it!\n return (log_likelihood, gradient) if eval_gradient else log_likelihood",
"def cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n # note the factor of 2 to integrate over the negative frequencies too!\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n else:\n psd = self.model(params, self.fbins.bin_cent) * self.psdnorm\n\n cov = np.sum(np.array([p * c for p, c in zip(psd, self.cos_integral)]), axis=0)\n\n return cov",
"def cross_covariance(\n self, kernel: Kernel, x: Float[Array, \"N D\"], y: Float[Array, \"M D\"]\n ) -> Float[Array, \"N M\"]:\n # TODO: This is currently a dense implementation. We should implement a sparse LinearOperator for non-square cross-covariance matrices.\n cross_cov = vmap(lambda x: vmap(lambda y: kernel(x, y))(y))(x)\n return cross_cov",
"def get_cpsd(self, params=None):\n if params is None:\n params = self.params\n\n if self.cpsd_model is None:\n return np.array([self.params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n return np.log(self.cpsd_model(self.params, self.fbins.bin_cent))",
"def _beam_derivatives(\n self, isel, parameterisation=None, ds0_dbeam_p=None, reflections=None\n ):\n\n # Get required data\n r = self._r.select(isel)\n e_X_r = self._e_X_r.select(isel)\n e_r_s0 = self._e_r_s0.select(isel)\n D = self._D.select(isel)\n\n if ds0_dbeam_p is None:\n\n # get the derivatives of the beam vector wrt the parameters\n ds0_dbeam_p = parameterisation.get_ds_dp(use_none_as_null=True)\n\n ds0_dbeam_p = [\n None if e is None else flex.vec3_double(len(r), e.elems)\n for e in ds0_dbeam_p\n ]\n\n dphi_dp = []\n dpv_dp = []\n\n # loop through the parameters\n for der in ds0_dbeam_p:\n\n if der is None:\n dphi_dp.append(None)\n dpv_dp.append(None)\n continue\n\n # calculate the derivative of phi for this parameter\n dphi = (der.dot(r) / e_r_s0) * -1.0\n dphi_dp.append(dphi)\n\n # calculate the derivative of pv for this parameter\n dpv_dp.append(D * (e_X_r * dphi + der))\n\n return dpv_dp, dphi_dp",
"def compute_covariance_matrix(X):\n return np.cov(X, rowvar=0)",
"def flderiv(b, c, d):\n b2 = b * b\n c2 = c * c\n d2 = d * d\n bd = b * d\n df1db = 2*b + 6*d\n df1dc = 4*c\n df1dd = 6*b + 30*d\n df2db = 4*c * (b + 12*d)\n df2dc = 2 * (b2 + 24*bd + 105*d2 + 2)\n df2dd = 4 * c * (12*b + 105*d)\n df3db = 24 * (d + c2 * (2*b + 28*d) + 48 * d**3)\n df3dc = 48 * c * (1 + b2 + 28*bd + 141*d2)\n df3dd = 24 * (b + 28*b * c2 + 2 * d * (12 + 48*bd +\n 141*c2 + 225*d2) + d2 * (48*b + 450*d))\n return np.matrix([[df1db, df1dc, df1dd],\n [df2db, df2dc, df2dd],\n [df3db, df3dc, df3dd]])",
"def dfield_dpar(self, X, par):\r\n (D, M) = np.shape(X)\r\n deriv_par = np.zeros((D,M,len(par))) # initialize the output\r\n\r\n #=========================type your code below=========================\r\n no need to change this line if using 'lib_dynamics'\r\n #===============================end here===============================\r\n return deriv_par",
"def christoffel_deriv(self):\n q_inv = self.induced_metric(inverse=True)\n dq_inv = self.induced_metric(inverse=True, diff=1)\n dq = self.induced_metric(diff=1)\n ddq = self.induced_metric(diff=2)\n return christoffel_deriv(q_inv, dq_inv, dq, ddq)",
"def scipy_derivative(self, t, X, m1, m2):\n\t\tX = np.array(X)\n\t\tif X.ndim == 1:\n\t\t\tX = X[None,:]\n\t\t\tto_reshape = True\n\t\telse:\n\t\t\tto_reshape = False\n\n\t\tv = X[:,0] #(N,)\n\t\tL = X[:,1:4]\t#(N,3)\n\t\tS1 = X[:,4:7]\t#(N,3)\n\t\tS2 = X[:,7:]\t#(N,3)\n\t\t\n#\t\tv_dot, L_dot, S1_dot, S2_dot = self.__derivative(v,L,S1, S2, m1*self.M_sun, m2*self.M_sun)\n\t\tv_dot, L_dot, S1_dot, S2_dot = self.__derivative(v,L,S1, S2, m1, m2)\n\n\t\tres = np.concatenate([v_dot[:,None], L_dot, S1_dot, S2_dot], axis = 1) #(N,10)\n\n\t\tif to_reshape:\n\t\t\treturn res[0,:]\n\t\telse:\n\t\t\treturn res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
dc = pylag.mlfit.MLPSD.cov_matrix_deriv(params) Calculate the first derivative of the covariance matrix wrt the parameters by stacking the derivatives of the four quadrants of the covariance matrix (the autocovariance and crosscovariance matrices)
|
def cov_matrix_deriv(self, params):
cc = self.cross_cov_matrix_deriv(params)
if self.freeze_psd:
Z = np.zeros_like(self.ac1)
return np.stack(
[np.vstack([np.hstack([Z, cc[..., p].T]), np.hstack([cc[..., p], Z])]) for p in
range(len([p for p in params if params[p].vary]))], axis=-1)
else:
ac1 = self.mlpsd1.cov_matrix_deriv(params)
ac2 = self.mlpsd2.cov_matrix_deriv(params)
return np.stack(
[np.vstack([np.hstack([ac1[..., p], cc[..., p].T]), np.hstack([cc[..., p], ac2[..., p]])]) for p in
range(len([p for p in params if params[p].vary]))], axis=-1)
return np.stack([np.vstack([np.hstack([ac1[...,p], cc[...,p].T]), np.hstack([cc[...,p], ac2[...,p]])]) for p in range(len(self.params))], axis=-1)
|
[
"def cross_cov_matrix_deriv(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array(\n [params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n if self.cpsd_model is None:\n cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi\n in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],\n axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)\n\n if self.lag_model is None:\n lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid\n in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],\n axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)\n\n # this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)\n # and (2) the phases\n return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)",
"def cov_matrix_deriv(self, params):\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n\n # in this simple case, the covariance matrix is just a linear sum of each frequency term\n # so the derivative is simple - we multiply by p when we're talking about the log\n return np.stack([c * p for c, p in zip(self.cos_integral, psd)], axis=-1)\n else:\n psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n return np.stack([np.sum([c * p for c, p in zip(self.cos_integral, psd_deriv[:, par])], axis=0) for par in range(psd_deriv.shape[-1])], axis=-1)",
"def cross_cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array([params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n cov = np.sum(np.array([p * (c * np.cos(phi) - s * np.sin(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)]), axis=0)\n return cov",
"def flderiv(b, c, d):\n b2 = b * b\n c2 = c * c\n d2 = d * d\n bd = b * d\n df1db = 2*b + 6*d\n df1dc = 4*c\n df1dd = 6*b + 30*d\n df2db = 4*c * (b + 12*d)\n df2dc = 2 * (b2 + 24*bd + 105*d2 + 2)\n df2dd = 4 * c * (12*b + 105*d)\n df3db = 24 * (d + c2 * (2*b + 28*d) + 48 * d**3)\n df3dc = 48 * c * (1 + b2 + 28*bd + 141*d2)\n df3dd = 24 * (b + 28*b * c2 + 2 * d * (12 + 48*bd +\n 141*c2 + 225*d2) + d2 * (48*b + 450*d))\n return np.matrix([[df1db, df1dc, df1dd],\n [df2db, df2dc, df2dd],\n [df3db, df3dc, df3dd]])",
"def _calc_psi_deriv(self):\n try:\n self.bkg['psi'].mean()\n except:\n self.build_bkg()\n \n # psi = self.eqdsk.psi\n # self.dpsidR = np.zeros((self.eqdsk.nzbox, self.eqdsk.nrbox))\n # self.dpsidZ = np.zeros((self.eqdsk.nzbox, self.eqdsk.nrbox))\n psi = self.bkg['psi']\n self.dpsidR = np.zeros((self.nz, self.nR))\n self.dpsidZ = np.zeros((self.nz, self.nR)) \n deriv = np.gradient(psi)\n # Note np.gradient gives y\n # derivative first, then x derivative\n ddR = deriv[1]\n ddZ = deriv[0]\n # dRdi = np.asarray(1.0)/np.gradient(self.R_eqd)\n # dRdi = np.tile(dRdi, [self.eqdsk.nzbox,1])\n # dZdi = np.asarray(1.0)/np.gradient(self.Z_eqd)\n # dZdi = np.tile(dZdi, [self.eqdsk.nrbox,1])\n # dZdi = np.transpose(dZdi)\n dRdi = np.asarray(1.0)/np.gradient(self.bkg['R'])\n dRdi = np.tile(dRdi, [self.nz,1])\n dZdi = np.asarray(1.0)/np.gradient(self.bkg['z'])\n dZdi = np.tile(dZdi, [self.nR,1])\n dZdi = np.transpose(dZdi)\n #print(\"shape ddR:\",np.shape(ddR),'shape dRdi:', np.shape(dRdi))\n #print('shape ddZ:',np.shape(ddZ),'shape dZdi:', np.shape(dZdi))\n \n self.dpsidR[:, :] = ddR*dRdi\n self.dpsidZ[:, :] = ddZ*dZdi",
"def gradient_energy(params, X):\n\n # electronic densities\n n_edens = X[1].shape[-1]\n edens = X[1].dot(params[-n_edens:])\n \n with np.errstate(divide='ignore'):\n # (-1) is there for the constant parameter value\n tmp = np.nan_to_num(-1.0/(2.0*np.sqrt(edens)) + 2.0*params[0]*edens)\n\n grad = np.empty((X[0].shape[0], len(params)), dtype=float)\n grad[:, 1:-n_edens] = X[0] # pair\n grad[:, 0] = np.sum(edens**2, axis=1) # embed\n grad[:, -n_edens:] = np.sum(tmp[:, :, None]*X[1], axis=1) # edens\n\n return grad",
"def cov_matrix(self, params):\n if self.freeze_psd:\n if self.ac1 is None or self.ac2 is None:\n raise AssertionError(\"Autocovariance matrices are not available. Did you fit the PSDs?\")\n ac1 = self.ac1\n ac2 = self.ac2\n else:\n ac1 = self.mlpsd1.cov_matrix(params)\n ac2 = self.mlpsd2.cov_matrix(params)\n\n cc = self.cross_cov_matrix(params)\n\n return np.vstack([np.hstack([ac1, cc.T]), np.hstack([cc, ac2])])",
"def _goniometer_derivatives(\n self, isel, parameterisation=None, dS_dgon_p=None, reflections=None\n ):\n\n # Get required data\n axis = self._axis.select(isel)\n fixed_rotation = self._fixed_rotation.select(isel)\n phi_calc = self._phi_calc.select(isel)\n h = self._h.select(isel)\n s1 = self._s1.select(isel)\n e_X_r = self._e_X_r.select(isel)\n e_r_s0 = self._e_r_s0.select(isel)\n UB = self._UB.select(isel)\n D = self._D.select(isel)\n\n if dS_dgon_p is None:\n\n # get derivatives of the setting matrix S wrt the parameters\n dS_dgon_p = [\n None if der is None else flex.mat3_double(len(isel), der.elems)\n for der in parameterisation.get_ds_dp(use_none_as_null=True)\n ]\n\n dphi_dp = []\n dpv_dp = []\n\n # loop through the parameters\n for der in dS_dgon_p:\n\n if der is None:\n dphi_dp.append(None)\n dpv_dp.append(None)\n continue\n\n # calculate the derivative of r for this parameter\n tmp = fixed_rotation * (UB * h)\n dr = der * tmp.rotate_around_origin(axis, phi_calc)\n\n # calculate the derivative of phi for this parameter\n dphi = -1.0 * dr.dot(s1) / e_r_s0\n dphi_dp.append(dphi)\n\n # calculate the derivative of pv for this parameter\n dpv_dp.append(D * (dr + e_X_r * dphi))\n\n return dpv_dp, dphi_dp",
"def _beam_derivatives(\n self, isel, parameterisation=None, ds0_dbeam_p=None, reflections=None\n ):\n\n # Get required data\n r = self._r.select(isel)\n e_X_r = self._e_X_r.select(isel)\n e_r_s0 = self._e_r_s0.select(isel)\n D = self._D.select(isel)\n\n if ds0_dbeam_p is None:\n\n # get the derivatives of the beam vector wrt the parameters\n ds0_dbeam_p = parameterisation.get_ds_dp(use_none_as_null=True)\n\n ds0_dbeam_p = [\n None if e is None else flex.vec3_double(len(r), e.elems)\n for e in ds0_dbeam_p\n ]\n\n dphi_dp = []\n dpv_dp = []\n\n # loop through the parameters\n for der in ds0_dbeam_p:\n\n if der is None:\n dphi_dp.append(None)\n dpv_dp.append(None)\n continue\n\n # calculate the derivative of phi for this parameter\n dphi = (der.dot(r) / e_r_s0) * -1.0\n dphi_dp.append(dphi)\n\n # calculate the derivative of pv for this parameter\n dpv_dp.append(D * (e_X_r * dphi + der))\n\n return dpv_dp, dphi_dp",
"def calculate_surface_curvature(derivatives, order=False):\n fu = derivatives[:, 1, 0]\n fv = derivatives[:, 0, 1]\n fuu = derivatives[:, 2, 0]\n fvv = derivatives[:, 0, 2]\n fuv = derivatives[:, 1, 1]\n\n normal = np.cross(fu, fv)\n norm = np.linalg.norm(normal, axis=1, keepdims=True)\n normal = normal / norm\n\n nuu = (fuu * normal).sum(axis=1)\n nvv = (fvv * normal).sum(axis=1)\n nuv = (fuv * normal).sum(axis=1)\n\n duu = np.linalg.norm(fu, axis=1) ** 2\n dvv = np.linalg.norm(fv, axis=1) ** 2\n duv = (fu * fv).sum(axis=1)\n\n mean = (duu * nvv - 2 * duv * nuv + dvv * nuu) / (2 * (duu * dvv - duv * duv))\n gauss = (nuu * nvv - nuv * nuv) / (duu * dvv - duv * duv)\n\n n = len(derivatives) # number of params\n L = np.empty((n, 2, 2))\n L[:, 0, 0] = nuu\n L[:, 0, 1] = nuv\n L[:, 1, 0] = nuv\n L[:, 1, 1] = nvv\n\n G = np.empty((n, 2, 2))\n G[:, 0, 0] = duu\n G[:, 0, 1] = duv\n G[:, 1, 0] = duv\n G[:, 1, 1] = dvv\n\n M = np.matmul(np.linalg.inv(G), L)\n eigvals, eigvecs = np.linalg.eig(M)\n # Values of first and second principal curvatures\n c1 = eigvals[:, 0]\n c2 = eigvals[:, 1]\n\n if order:\n c1mask = (c1 < c2)\n c2mask = np.logical_not(c1mask)\n c1_r = np.where(c1mask, c1, c2)\n c2_r = np.where(c2mask, c1, c2)\n else:\n c1_r = c1\n c2_r = c2\n\n # dir_1 corresponds to c1, dir_2 corresponds to c2\n dir_1_x = eigvecs[:, 0, 0][np.newaxis].T\n dir_2_x = eigvecs[:, 0, 1][np.newaxis].T\n dir_1_y = eigvecs[:, 1, 0][np.newaxis].T\n dir_2_y = eigvecs[:, 1, 1][np.newaxis].T\n dir_1 = dir_1_x * fu + dir_1_y * fv\n dir_2 = dir_2_x * fu + dir_2_y * fv\n dir_1 = dir_1 / np.linalg.norm(dir_1, axis=1, keepdims=True)\n dir_2 = dir_2 / np.linalg.norm(dir_2, axis=1, keepdims=True)\n\n if order:\n c1maskT = c1mask[np.newaxis].T\n c2maskT = c2mask[np.newaxis].T\n dir_1_r = np.where(c1maskT, dir_1, -dir_2)\n dir_2_r = np.where(c2maskT, dir_1, dir_2)\n else:\n dir_1_r = dir_1\n dir_2_r = dir_2\n\n return c1_r, c2_r, dir_1_r, dir_2_r, normal, mean, gauss",
"def _detector_derivatives(\n self, isel, panel_id, parameterisation=None, dd_ddet_p=None, reflections=None\n ):\n\n # Get required data\n pv = self._pv.select(isel)\n D = self._D.select(isel)\n\n if dd_ddet_p is None:\n\n # get the derivatives of detector d matrix for this panel\n dd_ddet_p = parameterisation.get_ds_dp(\n multi_state_elt=panel_id, use_none_as_null=True\n )\n\n # replace explicit null derivatives with None\n dd_ddet_p = [\n None if e is None else flex.mat3_double(len(D), e.elems)\n for e in dd_ddet_p\n ]\n\n # calculate the derivative of pv for this parameter\n dpv_ddet_p = [\n der if der is None else (D * (der * -1.0)) * pv for der in dd_ddet_p\n ]\n\n return dpv_ddet_p",
"def diff2eigenvectors(dx,dy,dz): \n u=np.array([dx,dy,dz])\n u=u/np.linalg.norm(u)\n R=vec2vec_rotmat(basis[:,0],u)\n eig0=u\n eig1=np.dot(R,basis[:,1])\n eig2=np.dot(R,basis[:,2])\n eigs=np.zeros((3,3))\n eigs[:,0]=eig0\n eigs[:,1]=eig1\n eigs[:,2]=eig2 \n return eigs, R",
"def diff2eigenvectors(dx,dy,dz):\n u=np.array([dx,dy,dz])\n u=u/np.linalg.norm(u)\n R=vec2vec_rotmat(basis[:,0],u)\n eig0=u\n eig1=np.dot(R,basis[:,1])\n eig2=np.dot(R,basis[:,2])\n eigs=np.zeros((3,3))\n eigs[:,0]=eig0\n eigs[:,1]=eig1\n eigs[:,2]=eig2\n return eigs, R",
"def pdderiv(ar,dx=1.,ax=0,order=4,smth=None):\n if smth is not None:\n ar=gf(ar,sigma=smth)\n if order == 2:\n dar = (np.roll(ar,-1,axis=ax) - 2*ar + np.roll(ar,1,axis=ax))/dx**2\n elif order == 4:\n dar = (-np.roll(ar,-2,axis=ax) + 16*np.roll(ar,-1,axis=ax) - 30*ar + 16*np.roll(ar,1,axis=ax)-np.roll(ar,2,axis=ax))/(12*dx**2)\n\n return dar",
"def gradient(guess, e, v):\n n = guess.shape[0]\n de = np.zeros([n-6, n-6])\n for i in range(n-6):\n for j in range(n-6):\n de[i, j] = de4_f(\n guess[i+3, j+3],\n guess[i+3, j+1], guess[i+4, j+2], guess[i+5, j+3], guess[i+4, j+4], \n guess[i+3, j+5], guess[i+2, j+4], guess[i+1, j+3], guess[i+2, j+2], \n e[i+2, j+1], e[i+3, j+2], e[i+2, j+3], e[i+1, j+2], \n v[0], v[1], v[2], 2/n\n )\n return de",
"def covariance_matrix_reconstruction(self):\n matrix_cov = np.zeros_like(self.cov_matrix)\n self.reconstruction_d_matrix()\n np.savetxt(os.path.join(os.getcwd(), 'd_matrix_test.dat'), self.d_matrix)\n np.savetxt(os.path.join(os.getcwd(), 'sigma_1_test.dat'), self.screen_data)\n dxinv = linalg.pinv2(self.d_matrix, cond=1e-8, rcond=1.e-8)\n np.savetxt(os.path.join(os.getcwd(), 'd_matrix_inv_test.dat'), dxinv)\n matrix_repr = np.dot(dxinv, self.screen_data)\n for ix in np.arange(self.cov_matrix.shape[0]):\n for iy in np.arange(self.cov_matrix.shape[1]):\n if ix == 0:\n value = deepcopy(matrix_repr[iy])\n elif ix == 1:\n if iy == 0:\n value = deepcopy(matrix_repr[1])\n elif iy == 1:\n value = deepcopy(matrix_repr[4])\n elif iy == 2:\n value = deepcopy(matrix_repr[5])\n else:\n value = deepcopy(matrix_repr[6])\n elif ix == 2:\n if iy == 0:\n value = deepcopy(matrix_repr[2])\n elif iy == 1:\n value = deepcopy(matrix_repr[5])\n elif iy == 2:\n value = deepcopy(matrix_repr[7])\n else:\n value = deepcopy(matrix_repr[8])\n else:\n if iy == 0:\n value = deepcopy(matrix_repr[3])\n elif iy == 1:\n value = deepcopy(matrix_repr[6])\n elif iy == 2:\n value = deepcopy(matrix_repr[8])\n else:\n value = deepcopy(matrix_repr[9])\n matrix_cov[ix, iy] = deepcopy(value)\n setattr(self, 'cov_matrix', matrix_cov)\n\n directory = os.path.join('C:\\\\', 'Users', 'qfi29231', 'Documents', 'spawn_emittances', 'Emittance_GUI',\n 'quad_scan_setup_0')\n np.savetxt(os.path.join(os.getcwd(), 'cov_matrix_test.dat'), self.cov_matrix)\n np.savetxt(os.path.join(os.getcwd(), 'cov_matrix_test_sdds.dat'),\n self.cov_matrix_from_simframe('CLA-S02-DIA-SCR-02', directory))\n np.savetxt(os.path.join(os.getcwd(), 'Cov_times_Smatrix.txt'), np.dot(self.cov_matrix, self.Smatrix))",
"def fd_xz_derivatives(mat, region, dx, dz, derivs, stencil_size=5):\n try:\n return _fd_xz_derivatives(mat, region, dx, dz, derivs, stencil_size)\n except (ValueError, IndexError) as e:\n raise NumericalError(\"%s\" % e)",
"def _derivatives(self, coupling_variables=None):\n raise NotImplementedError",
"def get_distance_estimates(fit,cov):\n N = fit.shape[0]/2\n d = fit[:N]/fit[N:]\n\n # Jacobian \n J = np.zeros((N,2*N))\n for k in xrange(N):\n J[k,k] = 1/fit[N+k]\n J[k,k+N] = -fit[k]/fit[N+k]**2\n\n cov_d = J.dot(cov).dot(J.T)\n\n return d, cov_d"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
cpsd = pylag.mlfit.MLCrossSpectrum.get_cpsd(params) Calculate the cross power spectrum in each frequency bin from the model for a given set of parameters.
|
def get_cpsd(self, params=None):
if params is None:
params = self.params
if self.cpsd_model is None:
return np.array([self.params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])
else:
return np.log(self.cpsd_model(self.params, self.fbins.bin_cent))
|
[
"def cross_cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array([params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n cov = np.sum(np.array([p * (c * np.cos(phi) - s * np.sin(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)]), axis=0)\n return cov",
"def cross_cov_matrix_deriv(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array(\n [params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n if self.cpsd_model is None:\n cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi\n in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],\n axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)\n\n if self.lag_model is None:\n lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid\n in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],\n axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)\n\n # this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)\n # and (2) the phases\n return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)",
"def get_psd(self, params=None):\n if params is None:\n params = self.params\n\n if self.model is None:\n return np.array([self.params[p].value for p in self.params])\n else:\n return np.log(self.model(self.params, self.fbins.bin_cent))",
"def fit_psd(self):\n print(\"Fitting PSD of light curve 1...\")\n self.mlpsd1.fit()\n self.ac1 = self.mlpsd1.cov_matrix(self.mlpsd1.params)\n\n print(\"Fitting PSD of light curve 2...\")\n self.mlpsd2.fit()\n self.ac2 = self.mlpsd1.cov_matrix(self.mlpsd2.params)\n\n if self.cpsd_model is None:\n # set an initial estimate of the cross power spectrum as the average of the two band powers\n # minus a little bit - this helps the fit on its way!\n for i in range(len(self.fbins)):\n self.params['ln_cpsd%01d' % i].value = 0.5 * (self.mlpsd1.psd[i] + self.mlpsd2.psd[i]) - 1.",
"def csr_spectrum(self, *args, **kwargs):\n # NOTE: This does not seem to work\n period = args[0] if len(args) > 0 and isinstance(args[0], Number) else kwargs.get('period',\n None)\n x, xlabel, _ = self._unit_and_label(kwargs, Axis.FAXIS, 'x', 'csr_spectrum', 'Hz',\n \"Frequency\")\n y, ylabel, time_prefix = self._unit_and_label(kwargs, Axis.TIME, 'y', 'csr_spectrum',\n 'ts', \"T\")\n if period is None: # if no period provided\n z, zlabel, _ = self._unit_and_label(kwargs, Axis.DATA, 'z', 'csr_spectrum', 'wphz',\n \"Power\")\n if kwargs.get(\"pad_zero\", False):\n z[np.where(z < np.float64(0.0))] = np.float64(1e-100)\n else:\n z, zlabel, _ = self._unit_and_label(kwargs, Axis.DATA, 'z', 'csr_spectrum', 'wphz',\n \"Power\", gen_sub=True)\n return period, x, y, z, xlabel, ylabel, zlabel, time_prefix",
"def bootstrap_spectral(data, fs, nperseg, fwin, nit=1000, ci=95,\n trim=0.2, calc_coherence=True):\n print(r\"Spectral estimates from {:.1f} to {:.1f} Hz\".format(*fwin))\n nchans = data.shape[0]\n # get the indices for the confidence intervals\n ci_idx = np.array([\n int((0.5 - ci/200.)*(nit-1)), # lower CI\n (nit-1)//2, # mean\n int(np.ceil((0.5 + ci/200.)*(nit-1))) # upper CI\n ])\n # get the frequencies\n f = np.fft.rfftfreq(nperseg, d=1./fs)\n f_keep = np.all([\n f >= fwin[0],\n f <= fwin[1]],\n axis = 0)\n print('Number of Fourier coefficients: %d' % f_keep.sum())\n f = f[f_keep]\n psd_segs = scipy.signal.spectral._spectral_helper(data, data, axis=-1,\n nperseg = nperseg, fs=fs, mode='psd',\n scaling='density')[2][:,f_keep,:]\n # get the indices with replacement of the array for the bootstrap\n bootstrap_indices = np.random.random_integers(\n low = 0, high = psd_segs.shape[-1] - 1,\n size = (nit, psd_segs.shape[-1]))\n # perform the bootstrap for the psd\n psd_bootstrap = np.array(\n [scipy.stats.trim_mean(psd_segs[...,idx], trim, axis=-1)\n for idx in bootstrap_indices])\n if calc_coherence:\n # perform the bootstrap for coh and icoh\n coh = []\n icoh = []\n phs = []\n for i in range(nchans):\n for j in range(i):\n print('Channel %d vs. %d.' % (i + 1, j + 1))\n csd_segs = scipy.signal.spectral._spectral_helper(\n data[i], data[j], axis=-1, nperseg = nperseg, fs=fs,\n mode='psd', scaling='density')[2][f_keep]\n # perform the bootstrap\n csd_bootstrap = np.array([\n (scipy.stats.trim_mean(\n np.real(csd_segs[...,idx]), trim, axis=-1) + \n 1j*scipy.stats.trim_mean(\n np.imag(csd_segs[...,idx]), trim, axis=-1))\n for idx in bootstrap_indices])\n # get the phase spectrum confidence intervals\n phs.append(np.sort(np.angle(csd_bootstrap,\n deg=True), axis=0)[ci_idx])\n # normalize the csd bootstrap with the product of the psds\n # for the coherence estimates\n csd_bootstrap /= np.sqrt(psd_bootstrap[:,i]*psd_bootstrap[:,j])\n # get the confidence interval for coherence and icoh\n coh.append(np.sort(np.abs(csd_bootstrap), axis=0)[ci_idx])\n icoh.append(np.sort(np.imag(csd_bootstrap), axis=0)[ci_idx])\n # get the CI of the psd\n psd = np.swapaxes(np.sort(psd_bootstrap, axis=0)[ci_idx], 0, 1)\n if calc_coherence:\n return f, psd, np.array(coh), np.array(icoh), np.array(phs)\n else:\n return f, psd",
"def cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n # note the factor of 2 to integrate over the negative frequencies too!\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n else:\n psd = self.model(params, self.fbins.bin_cent) * self.psdnorm\n\n cov = np.sum(np.array([p * c for p, c in zip(psd, self.cos_integral)]), axis=0)\n\n return cov",
"def cov_matrix_deriv(self, params):\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n\n # in this simple case, the covariance matrix is just a linear sum of each frequency term\n # so the derivative is simple - we multiply by p when we're talking about the log\n return np.stack([c * p for c, p in zip(self.cos_integral, psd)], axis=-1)\n else:\n psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n return np.stack([np.sum([c * p for c, p in zip(self.cos_integral, psd_deriv[:, par])], axis=0) for par in range(psd_deriv.shape[-1])], axis=-1)",
"def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, **kwargs):\n if not self._hold: self.cla()\n pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap)\n pxy.shape = len(freqs),\n # pxy is complex\n freqs += Fc\n\n self.plot(freqs, 10*npy.log10(npy.absolute(pxy)), **kwargs)\n self.set_xlabel('Frequency')\n self.set_ylabel('Cross Spectrum Magnitude (dB)')\n self.grid(True)\n vmin, vmax = self.viewLim.intervaly().get_bounds()\n\n intv = vmax-vmin\n step = 10*int(npy.log10(intv))\n\n ticks = npy.arange(math.floor(vmin), math.ceil(vmax)+1, step)\n self.set_yticks(ticks)\n\n return pxy, freqs",
"def computemycwt (fd, signal):\n # N = signal.size\n fr = np.arange(1, 80) # np.linspace(1, 200, 400); # vector of frequencies\n w0 = 5 \n scales = fd*w0/fr/2/np.pi\n\n # J = 200 \n # scales = np.asarray([2**(i * 0.1) for i in range(J)])\n\n coef = mycwt(signal, scales)\n #t = np.arange (0, N, 1/fd, dtype = np.float64)\n return fr, coef",
"def power_spectrum(x, cross_points=(-1, 0, 1), poly_order=4, freq_max=0.1):\n data = {}\n x -= x.mean()\n freq, power = scipy.signal.periodogram(\n x.resample('1 s').mean().ffill())\n freq_range = np.logical_and(freq > 1e-4, freq < freq_max)\n power = power[freq_range]\n freq = freq[freq_range]\n log_freq = np.log10(freq)\n log_power = np.log10(power)\n\n plt.loglog(freq, power)\n\n p = np.polynomial.Polynomial.fit(log_freq, log_power, poly_order)\n dpdf = p.deriv()\n freq_lin = np.logspace(log_freq.min(), log_freq.max())\n plt.loglog(freq_lin, 10 ** p(np.log10(freq_lin)))\n\n for cross in cross_points:\n data_cross = []\n roots = (dpdf - cross).roots()\n roots = np.real(roots[np.isreal(roots)])\n vals = p(roots)\n # print('crossing point', cross)\n # print('\\troots', 10**roots)\n # print('\\tvals', 10**vals)\n for root, val in zip(roots, vals):\n plt.loglog(10 ** root, 10 ** val, 'rx', markeredgewidth=2)\n data_cross += [{'root': 10 ** root, 'val': 10 ** val}]\n data[cross] = data_cross\n\n # gca().set_xlim([freq.min(), freq.max()])\n # gca().set_ylim([power.min(), power.max()])\n plt.grid()\n plt.title('Power spectrum')\n plt.xlabel('Hz')\n plt.ylabel('Power')\n\n return data",
"def get_mfccs(sig):\n# loginfo('[wav2ivec.get_mfccs] Extracting MFCC features ...')\n fbank_mx = features.mel_fbank_mx(winlen_nfft=WINDOWSIZE / SOURCERATE,\n fs=fs,\n NUMCHANS=NUMCHANS,\n LOFREQ=LOFREQ,\n HIFREQ=HIFREQ)\n fea = features.mfcc_htk(sig,\n window=WINDOWSIZE / SOURCERATE,\n noverlap=(WINDOWSIZE - TARGETRATE) / SOURCERATE,\n fbank_mx=fbank_mx,\n _0='first',\n NUMCEPS=NUMCEPS,\n RAWENERGY=RAWENERGY,\n PREEMCOEF=PREEMCOEF,\n CEPLIFTER=CEPLIFTER,\n ZMEANSOURCE=ZMEANSOURCE,\n ENORMALISE=ENORMALISE,\n ESCALE=0.1,\n SILFLOOR=50.0,\n USEHAMMING=True)\n\n# loginfo('[wav2ivec.get_mfccs] Adding derivatives ...')\n fea = features.add_deriv(fea, (deltawindow, accwindow))\n\n# loginfo('[wav2ivec.get_mfccs] Reshaping to SFeaCat conventions ...')\n return fea.reshape(fea.shape[0], 3, -1).transpose((0, 2, 1)).reshape(fea.shape[0], -1)",
"def cross_power(self, fbins=None, psdslope=0.):\n f, ft = self.ft()\n cross = f**-psdslope * np.abs(ft)\n if fbins is not None:\n cross = fbins.bin(f, cross)\n freq = fbins.bin_cent\n else:\n freq = f\n return freq, cross",
"def isotropic_crossspectrum(*args, **kwargs): # pragma: no cover\n import warnings\n msg = \"This function has been renamed and will disappear in the future.\"\\\n +\" Please use isotropic_cross_spectrum instead\"\n warnings.warn(msg, Warning)\n return isotropic_cross_spectrum(*args, **kwargs)",
"def _make_crossspectrum(self, lc1, lc2):\n\n # chop light curves into segments\n if isinstance(lc1, Lightcurve) and \\\n isinstance(lc2, Lightcurve):\n\n if self.type == \"crossspectrum\":\n self.cs_all, nphots1_all, nphots2_all = \\\n self._make_segment_spectrum(lc1, lc2, self.segment_size)\n\n elif self.type == \"powerspectrum\":\n self.cs_all, nphots1_all = \\\n self._make_segment_spectrum(lc1, self.segment_size)\n\n else:\n raise ValueError(\"Type of spectrum not recognized!\")\n\n else:\n self.cs_all, nphots1_all, nphots2_all = [], [], []\n\n for lc1_seg, lc2_seg in zip(lc1, lc2):\n\n if self.type == \"crossspectrum\":\n cs_sep, nphots1_sep, nphots2_sep = \\\n self._make_segment_spectrum(lc1_seg, lc2_seg,\n self.segment_size)\n nphots2_all.append(nphots2_sep)\n elif self.type == \"powerspectrum\":\n cs_sep, nphots1_sep = \\\n self._make_segment_spectrum(lc1_seg, self.segment_size)\n\n else:\n raise ValueError(\"Type of spectrum not recognized!\")\n\n self.cs_all.append(cs_sep)\n nphots1_all.append(nphots1_sep)\n\n self.cs_all = np.hstack(self.cs_all)\n nphots1_all = np.hstack(nphots1_all)\n\n if self.type == \"crossspectrum\":\n nphots2_all = np.hstack(nphots2_all)\n\n m = len(self.cs_all)\n nphots1 = np.mean(nphots1_all)\n\n power_avg = np.zeros_like(self.cs_all[0].power)\n power_err_avg = np.zeros_like(self.cs_all[0].power_err)\n unnorm_power_avg = np.zeros_like(self.cs_all[0].unnorm_power)\n for cs in self.cs_all:\n power_avg += cs.power\n unnorm_power_avg += cs.unnorm_power\n power_err_avg += (cs.power_err) ** 2\n\n power_avg /= np.float(m)\n power_err_avg = np.sqrt(power_err_avg) / m\n unnorm_power_avg /= np.float(m)\n\n self.freq = self.cs_all[0].freq\n self.power = power_avg\n self.unnorm_power = unnorm_power_avg\n self.m = m\n self.power_err = power_err_avg\n self.df = self.cs_all[0].df\n self.n = self.cs_all[0].n\n self.nphots1 = nphots1\n\n if self.type == \"crossspectrum\":\n self.nphots1 = nphots1\n nphots2 = np.mean(nphots2_all)\n\n self.nphots2 = nphots2",
"def welch_psd(data, fs, plot=True, window='hanning', overlap=0.5, len_seg=None, axis=-1):\n# data = data.reshape(data.shape[0],)\n if len_seg is None:\n overlap = overlap * 256\n else:\n overlap = overlap * len_seg\n freqs, psd = signal.welch(data, fs=fs, noverlap=overlap, nperseg=len_seg,\n window=window, nfft=None, detrend='constant',\n return_onesided=True, scaling='density', axis=axis)\n if plot:\n plt.plot(freqs, psd)\n plt.show()\n return freqs, psd",
"def bloch_spectrum(C,L,R,Bx=101,By=None):\n if By is None:\n By = Bx\n k_val_vec = []\n\n N = C.shape[0]\n\n if len(L)==1:\n k_a = np.linspace(-np.pi,np.pi,Bx)\n else:\n k_a = np.meshgrid(np.linspace(-np.pi,np.pi,Bx),np.linspace(-np.pi,np.pi,By))\n k_a = np.stack((k_a[0].flatten(),k_a[1].flatten())).T\n\n all_data = []\n for k in k_a:\n M = bloch_mat(C,L,R,k=k)\n nu,w,v = ordev(M,vec=True)\n all_data += [np.vstack((np.outer(k,np.ones(N)),nu.reshape((1,-1)),w,v))]#,w,v))]\n all_data = np.r_[all_data]\n return all_data",
"def MBfilter_CF(st, frequencies,\n CN_HP, CN_LP,\n filter_norm, filter_npoles=2,\n var_w=True,\n CF_type='envelope', CF_decay_win=1.0,\n hos_order=4,\n rosenberger_decay_win=1.0,\n rosenberger_filter_power=1.0,\n rosenberger_filter_threshold=None,\n rosenberger_normalize_each=False,\n wave_type='P',\n hos_sigma=None,\n rec_memory=None,\n full_output=False):\n delta = st[0].stats.delta\n Tn = 1. / frequencies\n Nb = len(frequencies)\n CF_decay_nsmps = CF_decay_win / delta\n rosenberger_decay_nsmps = rosenberger_decay_win / delta\n\n if hos_sigma is None:\n hos_sigma = -1.\n\n # Single component analysis\n if len(st) < 2:\n # Use just the first trace in stream\n tr = st[0]\n y = tr.data\n\n YN1 = np.zeros((Nb, len(y)), float)\n CF1 = np.zeros((Nb, len(y)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem = rec_memory[(tr.id, wave_type)][n]\n else:\n rmem = None\n\n YN1[n] = recursive_filter(y, CN_HP[n], CN_LP[n],\n filter_npoles, rmem)\n YN1[n] /= filter_norm[n]\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem)\n\n # 2 (horizontal) components analysis\n elif len(st) == 2:\n # Assumes that 2 horizontal components are used\n tr1 = st.select(channel='*[E,W,1]')[0]\n tr2 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n\n # Initializing arrays\n YN_E = np.zeros((Nb, len(y1)), float)\n YN_N = np.zeros((Nb, len(y1)), float)\n YN1 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n\n YN_E[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN_E[n] /= filter_norm[n]\n YN_N[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN_N[n] /= filter_norm[n]\n # Combining horizontal components\n YN1[n] = np.sqrt(np.power(YN_E[n], 2) + np.power(YN_N[n], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n] / delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem1)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n\n # 3 components analysis, includes polarization P and S decomposition\n else:\n # Vertical\n tr1 = st.select(channel='*[Z,U,D]')[0]\n # Horizontals\n tr2 = st.select(channel='*[E,W,1]')[0]\n tr3 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n y3 = tr3.data\n\n # Initializing arrays\n YN1 = np.zeros((Nb, len(y1)), float)\n YN2 = np.zeros((Nb, len(y1)), float)\n YN3 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n filteredDataP = np.zeros((Nb, len(y1)), float)\n filteredDataS = np.zeros((Nb, len(y1)), float)\n if full_output:\n CF2 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n rmem3 = rec_memory[(tr3.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n rmem3 = None\n\n YN1[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN1[n] /= filter_norm[n]\n YN2[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN2[n] /= filter_norm[n]\n YN3[n] = recursive_filter(y3, CN_HP[n], CN_LP[n],\n filter_npoles, rmem3)\n YN3[n] /= filter_norm[n]\n\n # Define the decay constant\n rosenberger_decay_constant = 1 / rosenberger_decay_nsmps\n\n # print('Rosenberger in process {}/{}\\r'.format(n+1, Nb),\n # sys.stdout.flush())\n\n # third value returned by rosenberger() is the polarizaion filter,\n # which we do not use here\n filt_dataP, filt_dataS, _ =\\\n rosenberger(YN2[n], YN3[n], YN1[n],\n rosenberger_decay_constant,\n pol_filter_power=rosenberger_filter_power,\n pol_filter_threshold=rosenberger_filter_threshold,\n normalize_each=rosenberger_normalize_each)\n\n # Use vertical component for P data\n filteredDataP[n] = filt_dataP[0, :]\n # Use vector composition of the two horizontal component for S data\n filteredDataS[n] = np.sqrt(np.power(filt_dataS[1, :], 2) +\n np.power(filt_dataS[2, :], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n if CF_type == 'envelope':\n if wave_type == 'P':\n CF1[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem2)\n else:\n CF1[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem2)\n\n if CF_type == 'kurtosis':\n if wave_type == 'P':\n CF1[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n else:\n CF1[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n\n if full_output:\n return YN1, CF1, CF2, Tn, Nb, filteredDataP, filteredDataS\n else:\n return YN1, CF1, Tn, Nb",
"def coarseSpectrum():\n\n\t\t# Get powers\n\t\txpol = [row[0] for table in ccpwrs for row in table]\n\t\typol = [row[1] for table in ccpwrs for row in table]\n\n\t\t# Plot histogram\n\t\tplt.figure()\n\t\tplt.plot(xpol[0:512], '-r', label = 'XPOL')\n\t\tplt.plot(ypol[0:512], '-b', label = 'YPOL')\n\t\tplt.title('Coarse Spectrum')\n\t\tplt.legend(loc = 'center right')\n\t\tplt.xlabel('Coarse Bin Number')\n\t\tplt.autoscale(enable=True, axis='x', tight=True)\n\t\t# plt.xlim([0,511])\n\t\tplt.ylabel('Power')\n\t\tplt.yscale('log')\n\t\t# plt.show(block = False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.