query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Reduce mean when distributed training.
def reduce_mean(tensor): if not (dist.is_available() and dist.is_initialized()): return tensor tensor = tensor.clone() dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) return tensor
[ "def vm_impl_reduce_mean(self):\n\n def vm_impl(x, axis):\n x = x.asnumpy()\n out = vm.mean(x, axis)\n return Tensor(out)\n\n return vm_impl", "def average_all(tensor):\n require_init()\n if not using_deepspeed:\n return tensor\n\n require_torch_distributed_init()\n # We copy because modification happens in-place\n averaged = tensor.detach().clone()\n # We use `all_reduce` because it is better supported than `reduce`\n torch.distributed.all_reduce(averaged, torch.distributed.ReduceOp.SUM)\n return averaged / get_world_size()", "def cluster_mean(cluster):\r\n # print(cluster.shape)\r\n return(1/cluster.shape[1])*np.sum(cluster, axis=1)", "def update_mean(X):\n\n return X.sum(axis=0) / X.shape[0]", "def calculate_mean(self):\n\t\t\t\t\t\n avg = 1.0 * sum(self.data) / len(self.data)\n\t\t\n self.mean = avg\n \n return self.mean", "def global_mean(self, data):\r\n local_sum = np.sum(data)\r\n local_size = data.size\r\n global_sum = self.reduce_scalar(local_sum, MPI.SUM)\r\n global_size = self.reduce_scalar(local_size, MPI.SUM)\r\n return global_sum / global_size", "def _XModelWeightedMean(self):\n raise NotImplementedError", "def cross_replica_average(inputs, num_shards, distributed_group_size):\n group_assignment = None\n if num_shards is not None and distributed_group_size != num_shards:\n group_size = distributed_group_size\n group_assignment = []\n for g in range(num_shards // group_size):\n replica_ids = [g * group_size + i for i in range(group_size)]\n group_assignment.append(replica_ids)\n\n return tpu_ops.cross_replica_sum(inputs, group_assignment) / tf.cast(\n distributed_group_size, inputs.dtype)", "def mean_score(self):\n pass", "def mean(self, dim=None, keepdim=False): # real signature unknown; restored from __doc__\n pass", "def mean_allcnnc():\n from backpack.core.layers import Flatten\n return nn.Sequential(nn.AvgPool2d(kernel_size=(6, 6)), Flatten())", "def get_mean_map(self):\n return sum(self.evaluations) / self.folds", "def normalize_by_mean(tensor):\n return tf.divide(tensor, tf.reduce_mean(tensor))", "def mean(self):\r\n\t\treturn np.mean(self.dataset)", "def avg_pool(self, embeddings):\n return torch.mean(embeddings, dim=0)", "def poolMean(inObj):\n\n inObj.gs()", "def update_average(self, batch_index, loss):\n if math.isfinite(loss):\n self.avg_train_loss -= self.avg_train_loss / (batch_index + 1)\n self.avg_train_loss += loss / (batch_index + 1)\n else:\n self.loss_isfinite = False\n logger.info('loss:{} in Nan or inf, error'.format(loss))", "def mean_avoid_0(inp, dim=-1, epsilon=1e-10):\n x = torch.sum(inp, dim=dim)\n y = torch.sum(inp != 0, dim=dim).float()\n return x/(y+epsilon)", "def mean(data_set):\n return reduce(lambda x, y: x + y, data_set) / len(data_set)", "def mean(self) -> float:\n return mean(self.iterable)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GoldsteinPrice's objective function. Only takes two dimensions and has a global minimum at
def goldstein_func(x): if not x.shape[1] == 2: raise IndexError('Goldstein function only takes two-dimensional ' 'input.') if not np.logical_and(x >= -2, x <= 2).all(): raise ValueError('Input for Goldstein-Price function must be within ' '[-2, 2].') x_ = x[:, 0] y_ = x[:, 1] j = ((1 + (x_ + y_ + 1)**2.0 * (19 - 14*x_ + 3*x_**2.0 - 14*y_ + 6*x_*y_ + 3*y_**2.0)) * (30 + (2*x_ - 3 * y_)**2.0 * (18 - 32*x_ + 12*x_**2.0 + 48*y_ - 36*x_*y_ + 27*y_**2.0))) return j
[ "def objective2(x1, x2, x3, x4, x5, x6):\n return x1 ** 2 + x2 ** 2 + x3 ** 2 + x4 ** 2 + x5 ** 2 + x6 ** 2", "def add_minimize(self, co, var):", "def _objective_fn(\n pair: gaussian_mixture_pair.GaussianMixturePair,\n obs0: Observations,\n obs1: Observations,\n ) -> jnp.ndarray:\n q0 = get_q(gmm=pair.gmm0, obs=obs0)\n q1 = get_q(gmm=pair.gmm1, obs=obs1)\n cost_matrix = pair.get_cost_matrix()\n sinkhorn_output = pair.get_sinkhorn(cost_matrix=cost_matrix)\n transport_penalty = sinkhorn_output.reg_ot_cost\n return q0 + q1 - weight_transport * transport_penalty", "def minimise_objective_function_BFGS(self):\r\n result = scipy.optimize.minimize(fun=self.objective_function,\r\n jac=self.gradient,\r\n method=\"BFGS\")\r\n self.best_guess = result.x", "def _objective_function(ndim, voxel_size, sigma_z, sigma_yx, amplitude):\n # define objective gaussian function\n if ndim == 3:\n f = _objective_function_3d(\n voxel_size_z=voxel_size[0],\n voxel_size_yx=voxel_size[-1],\n sigma_z=sigma_z,\n sigma_yx=sigma_yx,\n amplitude=amplitude)\n else:\n f = _objective_function_2d(\n voxel_size_yx=voxel_size[-1],\n sigma_yx=sigma_yx,\n amplitude=amplitude)\n\n return f", "def objective_function(self):\r\n objective_value = 0\r\n for point in self.POINTS:\r\n objective_value += self.deviance(point)**2\r\n\r\n return objective_value", "def objective1(x1, x2, x3, x4, x5, x6):\n return -1 * ((25 * pow(x1 - 2, 2)) +\n pow((x2 - 2), 2) +\n pow(x3 - 1, 2) * pow(x4 - 4, 2) +\n pow(x5 - 1, 2))", "def objective(params):\n\t\t\t# hyperopt casts as float\n\t\t\tparams['num_boost_round'] = int(params['num_boost_round'])\n\t\t\tparams['num_leaves'] = int(params['num_leaves'])\n\n\t\t\t# need to be passed as parameter\n\t\t\tif self.is_unbalance:\n\t\t\t\tparams['is_unbalance'] = True\n\t\t\tparams['verbose'] = -1\n\t\t\tparams['seed'] = 1\n\n\t\t\tif self.with_focal_loss:\n\t\t\t\tfocal_loss = lambda x,y: focal_loss_lgb(x, y,\n\t\t\t\t\tparams['alpha'], params['gamma'])\n\t\t\t\tcv_result = lgb.cv(\n\t\t\t\t\tparams,\n\t\t\t\t\ttrain,\n\t\t\t\t\tnum_boost_round=params['num_boost_round'],\n\t\t\t\t\tfobj = focal_loss,\n\t\t\t\t\tfeval = lgb_focal_f1_score,\n\t\t\t\t\tnfold=3,\n\t\t\t\t\tstratified=True,\n\t\t\t\t\tearly_stopping_rounds=20)\n\t\t\telse:\n\t\t\t\tcv_result = lgb.cv(\n\t\t\t\t\tparams,\n\t\t\t\t\ttrain,\n\t\t\t\t\tnum_boost_round=params['num_boost_round'],\n\t\t\t\t\tmetrics='binary_logloss',\n\t\t\t\t\tfeval = lgb_f1_score,\n\t\t\t\t\tnfold=3,\n\t\t\t\t\tstratified=True,\n\t\t\t\t\tearly_stopping_rounds=20)\n\t\t\tself.early_stop_dict[objective.i] = len(cv_result['f1-mean'])\n\t\t\tscore = round(cv_result['f1-mean'][-1], 4)\n\t\t\tobjective.i+=1\n\t\t\treturn -score", "def optimizer(self, temperature):\n G_V = [] # G for each volume\n # G = E(V) + PV + A_vib(V, T)\n for i, v in enumerate(self.volumes):\n G_V.append(self.energies[i] +\n self.pressure * v * self.gpa_to_ev_ang +\n self.vibrational_free_energy(temperature, v))\n\n # fit equation of state, G(V, T, P)\n eos_fit = self.eos.fit(self.volumes, G_V)\n # minimize the fit eos wrt volume\n # Note: the ref energy and the ref volume(E0 and V0) not necessarily\n # the same as minimum energy and min volume.\n volume_guess = eos_fit.volumes[np.argmin(eos_fit.energies)]\n min_wrt_vol = minimize(eos_fit.func, volume_guess)\n # G_opt=G(V_opt, T, P), V_opt\n return min_wrt_vol.fun, min_wrt_vol.x[0]", "def l1Min(A, b):\n n = np.shape(A)\n zero = np.zeros(n[1]).astype(float)\n ones = np.ones_like(zero)\n\n I = np.eye(n[1])\n top = np.hstack((-I, I))\n bottom = np.hstack((-I,-I))\n I = np.vstack((top,bottom))\n\n z = np.zeros_like(A).astype(float)\n a = np.hstack((z,A))\n\n z2 = np.zeros(2*n[1]).astype(float)\n # z2 = np.reshape(z2,(2*n[1],1))\n\n # print \"z2\\n\", z2, \"\\nb\\n\", b\n\n c = np.array(np.hstack((ones, zero)))\n G = np.array(np.vstack((I,a,-a)))\n h = np.array(np.append(np.append(z2,b),-b))\n\n # print \"c\\n\", c, \"\\nG\\n\", G, \"\\nh\\n\", h, \"\\n\"\n\n c = matrix(c)\n G = matrix(G)\n h = matrix(h)\n\n sol = solvers.lp(c, G, h)\n # print \"sol['x']\", np.ravel(sol['x']), type(np.ravel(sol['x'])), \"\\nn\", n\n # print \"\\n\\n\", \"sol['primal objective']\",sol['primal objective']\n return np.ravel(sol['x'])[n[1]:], sol['primal objective']", "def der_cost_func_p1(es_x, gt_y, p1):\n s = 0\n for ex, gy in zip(es_x, gt_y):\n ey = ex * p1\n s += ((ey - gy) * ex)\n m = len(es_x)\n # gradiente\n g = s / m\n print(g)\n return g", "def _objective_function_2d(voxel_size_yx, sigma_yx, amplitude):\n # sigma is known, we fit mu, amplitude and background\n if sigma_yx is not None and amplitude is None:\n def f(grid, mu_y, mu_x, amplitude, background):\n values = gaussian_2d(\n grid=grid,\n mu_y=mu_y,\n mu_x=mu_x,\n sigma_yx=sigma_yx,\n voxel_size_yx=voxel_size_yx,\n amplitude=amplitude,\n background=background)\n return values\n\n # amplitude is known, we fit sigma, mu and background\n elif amplitude is not None and sigma_yx is None:\n def f(grid, mu_y, mu_x, sigma_yx, background):\n values = gaussian_2d(\n grid=grid,\n mu_y=mu_y,\n mu_x=mu_x,\n sigma_yx=sigma_yx,\n voxel_size_yx=voxel_size_yx,\n amplitude=amplitude,\n background=background)\n return values\n\n # amplitude and sigma are known, we fit mu and background\n elif amplitude is not None and sigma_yx is not None:\n def f(grid, mu_y, mu_x, background):\n values = gaussian_2d(\n grid=grid,\n mu_y=mu_y,\n mu_x=mu_x,\n sigma_yx=sigma_yx,\n voxel_size_yx=voxel_size_yx,\n amplitude=amplitude,\n background=background)\n return values\n\n # we fit mu, sigma, amplitude and background\n else:\n def f(grid, mu_y, mu_x, sigma_yx, amplitude, background):\n values = gaussian_2d(\n grid=grid,\n mu_y=mu_y,\n mu_x=mu_x,\n sigma_yx=sigma_yx,\n voxel_size_yx=voxel_size_yx,\n amplitude=amplitude,\n background=background)\n return values\n\n return f", "def objective(self,data):\r\n F = -0.5*self.lbda*(np.sum(self.U*self.U)+np.sum(self.V*self.V))\r\n for i in xrange(len(self.U)):\r\n f = self.precompute_f(data,i)\r\n for j in f:\r\n F += log(g(f[j]))\r\n for k in f:\r\n F += log(1-g(f[k]-f[j]))\r\n return F", "def __objective_fcn(self, y_true, y_pred, **kwargs):\n obj1 = kwargs['P'](y_true,y_pred) #objective 1\n obj2 = kwargs['ratio_selected_features'] #is objective 2\n \n particle_value = self.obj_function_equation(obj1,obj2, kwargs['alpha'])\n \n return particle_value", "def _minimum_kriging_variance_objective(x, z, mean, corr, args=()):\r\n\r\n # n, m = x.shape\r\n #\r\n # def obj(parameters):\r\n #\r\n # theta = parameters[:m] # parameters used in fitting the kernel function.\r\n # beta = parameters[m:] # parameters used in fitting the trend function\r\n #\r\n # # calculate trend\r\n # mu = mean(x, beta)\r\n #\r\n # # calculate the correlation matrix and standard deviation\r\n # sigma, R, dRd = _calculate_sigma_and_R(x, z, mu, corr, theta, args=())\r\n #\r\n # # calculate the minimum kriging variance [1]\r\n # kv = np.log(dRd) + np.log(la.norm(R))\r\n #\r\n # return kv\r\n #\r\n # return obj\r", "def vm_impl_minimum(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n out = vm.minimum(x, y)\n return Tensor(out)\n\n return vm_impl", "def cost_function(x, svh, svv, theta, gamma, prior_mean, prior_unc, unc=0.8):\n # Fit to the observations\n cost1, dcost1 = cost_obs(x, svh, svv, theta, unc=unc)\n # Fit to the prior\n cost2, dcost2 = cost_prior(x, svh, svv, theta, prior_mean, prior_unc)\n # Smooth evolution of LAI\n n_obs = len(svv)\n lai = x[(6 + n_obs) :]\n cost3, dcost3 = cost_smooth(lai, gamma)\n tmp = np.zeros_like(dcost1)\n tmp[(7 + n_obs) : -1] = dcost3\n return cost1 + cost2 + cost3, dcost1 + dcost2 + tmp", "def vxquad(self):\n\n alpeps = 1e-12 # limit accuracy for convex regularization\n\n # get slopes\n da1 = (self.f1 - self.fbest) / self.a1\n da2 = (self.f2 - self.fbest) / self.a2\n\n # get interpolating quadratic model\n # f(xbest+alp*p)=fbest-alp*kappa+alp^2*lambda\n\n fbest = self.fbest\n a1 = self.a1\n a2 = self.a2\n f1 = self.f1\n f2 = self.f2\n\n try:\n alpf = max(self.falist) - fbest + eps * np.abs(fbest) / \\\n max(abs(self.a1), abs(self.a2)) ** 2\n except:\n # required info not present -- replace by random step\n alp = a1 + np.random.random() * (a2 - a1)\n return\n\n lambdamin = alpeps * alpf\n lambda_ = (da2 - da1) / (a2 - a1)\n kappa = a1 * lambda_ - da1\n kappa2 = kappa / 2\n convex = self.bracket or (lambda_ > lambdamin)\n if False:\n condinv = np.min([(f1 - fbest) / (abs(f1) + abs(fbest)),\n (f2 - fbest) / (abs(f2) + abs(fbest)),\n (da2 - da1) / (abs(da2) + abs(da1))])\n\n if np.isfinite(self.linetarget):\n # get maximal step with predicted gain <= linetarget\n discr = kappa2 ** 2 - lambda_ * self.linetarget\n if discr > 0:\n if kappa2 < 0:\n denom = kappa2 - np.sqrt(discr)\n else:\n denom = kappa2 + np.sqrt(discr)\n\n alp = self.linetarget / denom\n elif lambda_ > 0:\n alp = kappa2 / lambda_\n else:\n alp = 0 # flat function\n\n # alp hier weiter\n else:\n # unrestricted case\n # get safeguarded convex quadratic model\n lambda_ = max(lambda_, lambdamin)\n kappa = a1 * lambda_ - da1\n # predicted optimal step size\n alp = kappa / (2 * lambda_)\n\n oldrep = alp == 0 or alp == a1 or alp == a2\n if oldrep:\n # replace by random step\n alp = a1 + np.random.random() * (a2 - a1)\n\n self.alp = alp", "def bellman_operator(V, cp, return_policy=False):\n # === Simplify names, set up arrays === #\n R, w, Lambda_H, Lambda_E, Pi, beta, u, b = cp.R, cp.w, cp.Lambda_H,cp.Lambda_E, cp.Pi, cp.beta, cp.u, cp.b\n asset_grid, z_vals = cp.asset_grid, cp.z_vals\n new_V = np.empty(V.shape)\n new_h = np.empty(V.shape)\n new_l = np.empty(V.shape)\n z_idx = list(range(len(z_vals)))\n\n\n # === Linear interpolation of V along the asset grid === #\n #vf = lambda a, i_z: np.interp(a, asset_grid, V[:, i_z])\n vf = lambda a, i_z: np.interp(a, asset_grid, V[:, i_z])\n\n # === Solve r.h.s. of Bellman equation === #\n\n def do_bell(i_a):\n a = asset_grid[i_a]\n #print(a)\n for i_z, z in enumerate(z_vals):\n def obj(x): # objective function to be *minimized*\n y = sum(vf(x[0], j) * Pi[i_z, j] for j in z_idx)\n return - u(R*a +w*z*(1-x[1]) - x[0],x[1]) -x[0]*Lambda_H + z*x[1]*Lambda_E - beta * y \n bnds = ((b, cp.grid_max ),(0+1e-4,1- 1e-4))\n cons = ({'type': 'ineq', 'fun': lambda x: R * a + w*z*(1-x[1])-b -x[0]}, {'type': 'ineq', 'fun': lambda x: x[0]})\n h0 = [b, .438]\n #print(h0)\n h_star = optimize.minimize(obj, h0, bounds = bnds,constraints=cons)\n #h_star3= fminbound(obj, b, R * a + w*z + b)\n #print(obj(h_star.x[0]), obj(h_star3))\n if h_star.success != True:\n h_star = optimize.minimize(obj, h0, bounds = bnds,constraints=cons, options={'eps': 1.4901161193847656e-02, 'maxiter': 100, 'ftol': 1e-05})\n if h_star.success != True:\n print(h_star.message)\n #print(h_star.x[1],h_star.x[0])\n if h_star.x[1] == .4328:\n print(a)\n new_h[i_a, i_z],new_l[i_a, i_z], new_V[i_a, i_z] = h_star.x[0],h_star.x[1], -obj(h_star.x)\n if return_policy:\n return new_h[i_a,:], new_l[i_a, :]\n else:\n return new_V[i_a,:]\n\n rang = np.arange(len(asset_grid))\n Pool = ProcessingPool(96)\n new = Pool.map(do_bell, rang)\n #Pool.clear\n return np.asarray(new)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does message equalization work for simple, scalar messages in weighted mean and information matrix parameterization?
def test_GaussianWeightedMeanInfoScalar(self): msg_a = GaussianWeightedMeanInfoMessage([[0]], [[2]]) msg_b = GaussianWeightedMeanInfoMessage([[2]], [[2]]) msg_c = msg_a.combine(msg_b) self.assertEqual(msg_c.weighted_mean, [[2]]) self.assertEqual(msg_c.info, [[4]]) msg_a = GaussianWeightedMeanInfoMessage([[3]], [[3]]) msg_b = GaussianWeightedMeanInfoMessage([[-12]], [[6]]) msg_c = msg_a.combine(msg_b) self.assertEqual(msg_c.weighted_mean, [[-9]]) self.assertEqual(msg_c.info, [[9]]) msg_c = msg_c.convert(GaussianMeanCovMessage)
[ "def test_message_weighting(self):\n self._test(\n weights=self.instance(source=self.source, target=self.target, message=self.message, x_e=self.x_e),\n shape=self.message.shape,\n )", "def test_GaussianWeightedMeanInfoVector(self):\n\n msg_a = GaussianWeightedMeanInfoMessage(weighted_mean=[[1], [0]], info=[[2, 0], [0, 3]])\n msg_b = GaussianWeightedMeanInfoMessage(weighted_mean=[[2], [0]], info=[[3, 0], [0, 1]])\n msg_c = msg_a.combine(msg_b)\n np.testing.assert_allclose(msg_c.weighted_mean, [[3], [0]])\n np.testing.assert_allclose(msg_c.info, [[5, 0], [0, 4]])", "def normalize_msg(msgU,msgD,msgL,msgR):\r\n\r\n avg=np.mean(msgU,axis=2)\r\n msgU -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgD,axis=2)\r\n msgD -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgL,axis=2)\r\n msgL -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgR,axis=2)\r\n msgR -= avg[:,:,np.newaxis]\r\n\r\n return msgU,msgD,msgL,msgR", "def test_expectation_weighted(self):\n self._test_expectation(weights=self._generate_weights())", "def test_NormInvWish():\n\n # Test sample() method:\n mu_0 = np.arange(3.0)\n kappa_0 = 3.0\n Lam_0 = np.eye(3) + 0.01*np.arange(9).reshape(3,3)\n Lam_0 += Lam_0.T # To make symmetric\n nu_0 = 3\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n arr = prior.sample()\n assert isinstance(arr, np.void)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=1)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (1,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=(1,))\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (1,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=10)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (10,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=(10, 20))\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (10, 20)\n assert arr.dtype == prior.model_dtype\n\n # Test like1() method:\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n x = np.arange(3.0)\n mu = np.arange(3.0)+1.0\n Sig = np.eye(3) + 0.03*np.arange(9).reshape(3, 3)\n Sig += Sig.T\n arr = prior.like1(x, mu, Sig)\n assert isinstance(arr, float)\n\n # If trailing axis of x is not dim 3 (for these prior parameters), should get and AssertionError\n xbad = np.arange(2.0)\n np.testing.assert_raises(AssertionError, prior.like1, xbad, mu, Sig)\n\n # And similar checks for mu and Sig\n mubad = np.arange(4.0)\n np.testing.assert_raises(AssertionError, prior.like1, x, mubad, Sig)\n\n Sigbad = np.eye(2)\n np.testing.assert_raises(AssertionError, prior.like1, x, mu, Sigbad)\n\n # Try some non-trival broadcasts\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior.like1(x, mu, Sig)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n for i, r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i], Sig)\n\n theta = np.zeros((2,), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for i, r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i])\n\n mu = np.empty((3, 4, 3), dtype=float)\n Sig = np.empty((3, 4, 3, 3), dtype=float)\n for i in range(3):\n for j in range(4):\n mu[i, j] = np.arange(3.0)\n Sig[i, j] = np.eye(3)+0.1*i+0.2*j\n arr = prior.like1(x, mu, Sig)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i, j], Sig[i, j])\n\n theta = np.empty((3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i, j])\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior.like1(x, mu[:, np.newaxis, np.newaxis, :], Sig)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i], Sig[j, k])\n\n theta = np.empty((2, 3, 4), dtype=prior.model_dtype)\n theta['mu'] = (np.arange(6.0).reshape(2, 3))[:, np.newaxis, np.newaxis, :]\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i, j, k])\n\n # Test __call__() method:\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n mu = np.arange(3.0)\n Sig = np.eye(3)\n arr = prior(mu, Sig)\n assert isinstance(arr, float)\n\n theta = np.zeros(1, dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta[0])\n assert isinstance(arr, float)\n assert arr == prior(mu, Sig)\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior(mu, Sig)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior(mu[i], Sig)\n\n theta = np.zeros(2, dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior(theta[i])\n\n mu = np.empty((3, 4, 3), dtype=float)\n Sig = np.empty((3, 4, 3, 3), dtype=float)\n for i in range(3):\n for j in range(4):\n mu[i, j] = np.arange(3.0)\n Sig[i, j] = np.eye(3)+0.1*i+0.2*j\n arr = prior(mu, Sig)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior(mu[i, j], Sig[i, j])\n\n theta = np.zeros((3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior(theta[i, j])\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior(mu[:, np.newaxis, np.newaxis, :], Sig)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior(mu[i], Sig[j, k])\n\n theta = np.zeros((2, 3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu[:, np.newaxis, np.newaxis, :]\n theta['Sig'] = Sig\n arr = prior(theta)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior(theta[i, j, k])\n\n # Should _post_params method do any broadcasting?\n\n # Test pred method():\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n x = np.arange(3.0)+1\n arr = prior.pred(x)\n assert isinstance(arr, float)\n\n x = np.arange(6.0).reshape(2, 3)\n arr = prior.pred(x)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior.pred(x[i])\n\n x = np.arange(24.0).reshape(2, 4, 3)\n arr = prior.pred(x)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2, 4)\n assert arr.dtype == float\n for (i, j), r in np.ndenumerate(arr):\n np.testing.assert_almost_equal(r, prior.pred(x[i, j]))", "def F(epsilon,FM,FC,BM,BC,y,MessageIn):\n\n A=[]\n for i in range(len(MessageIn)):\n A.append((pair(1,1)*np.exp(MessageIn[i])+pair(1,0))/(pair(0,1)*np.exp(MessageIn[i])+pair(0,0)))\n MessageOut = np.log(single(1,y,epsilon,FM,FC,BM,BC)/single(0,y,epsilon,FM,FC,BM,BC)) + sum(np.log(A))\n return MessageOut", "def test_get_prop_samples_broadcasts_weights_correctly():\n desired_data = [{\n \"solver\": {\n \"sublattice_site_ratios\": [1],\n \"sublattice_occupancies\": [[[0, 0]], [[1, 1]]],\n \"sublattice_configurations\": [[[\"CU\", \"MG\"]], [[\"CU\", \"MG\"]]],\n \"mode\": \"manual\"\n },\n \"conditions\": {\n \"P\": [0, 1], \"T\": [0, 1, 2, 3]},\n \"values\": [[[0, 1], [2, 3], [4, 5], [6, 7]], [[8, 9], [10, 11], [12, 13], [14, 15]]],\n }]\n\n # No weight\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n assert np.all(np.isclose(np.asarray(calculate_dict[\"weights\"]), 1.0))\n\n # Scalar weight\n desired_data[0][\"weight\"] = 5.0\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n assert np.all(np.isclose(np.asarray(calculate_dict[\"weights\"]), 5.0))\n\n # 1D weights aligned in...\n # ... P\n desired_data[0][\"weight\"] = [[[1]], [[2]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"P\",calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # ... T\n desired_data[0][\"weight\"] = [[[1], [2], [3], [4]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"T\",calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # ... configs\n desired_data[0][\"weight\"] = [[[3, 4]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"CONFIGS\", calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # 3D weights aligned\n num_P = 2\n num_T = 4\n prescribed = [[(np.array([1, 2])*i*j).tolist() for j in range(1, num_T+1)] for i in range(1, num_P+1)]\n desired_data[0][\"weight\"] = prescribed\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])", "def test_message_weighting_no_message(self):\n if self.instance.needs_message:\n raise SkipTest(f\"{self.cls} needs messages for weighting them.\")\n self._test(weights=self.instance(source=self.source, target=self.target), shape=self.source.shape)", "def test_topic_weights(self):\n assert self.state.topic_weights == (10, 36, 6, 45)", "def _msgmsd(\n x: torch.Tensor,\n y: torch.Tensor,\n kernel: torch.Tensor,\n weights: torch.Tensor,\n alpha: float = 0.5,\n **kwargs,\n) -> torch.Tensor:\n\n gmsds = []\n\n for i in range(weights.numel()):\n if i > 0:\n x = F.avg_pool2d(x, kernel_size=2, ceil_mode=True)\n y = F.avg_pool2d(y, kernel_size=2, ceil_mode=True)\n\n gmsds.append(_gmsd(x, y, kernel, alpha=alpha, **kwargs))\n\n msgmsd = torch.stack(gmsds, dim=-1) ** 2\n msgmsd = torch.sqrt((msgmsd * weights).sum(dim=-1))\n\n return msgmsd", "def test_binary_average_precision_weighted():\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([0.1, 0.2, 0.3, 4])\n weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (1 * 0.1 / 0.1 + 0 * 2.0 / 2.1 + 1.1 * 1 / 3.1 + 0 * 1 / 4) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test1 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 / 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test2 failed\"\n\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([4, 3, 2, 1])\n weight = torch.Tensor([1, 2, 3, 4])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (0 * 1.0 / 1.0 + 1.0 * 2.0 / 3.0 + 2.0 * 0 / 6.0 + 6.0 * 1.0 / 10.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test3 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test4 failed\"\n\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([1, 4, 2, 3])\n weight = torch.Tensor([1, 2, 3, 4])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (4 * 1.0 / 4.0 + 6 * 1.0 / 6.0 + 0 * 6.0 / 9.0 + 0 * 6.0 / 10.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test5 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 + 2 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test6 failed\"\n\n target = torch.Tensor([0, 0, 0, 0])\n output = torch.Tensor([1, 4, 2, 3])\n weight = torch.Tensor([1.0, 0.1, 0.0, 0.5])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = 0.0\n assert math.fabs(ap - val) < 0.01, \"ap test7 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = 0.0\n assert math.fabs(ap - val) < 0.01, \"ap test8 failed\"\n\n target = torch.Tensor([1, 1, 0])\n output = torch.Tensor([3, 1, 2])\n weight = torch.Tensor([1, 0.1, 3])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (1 * 1.0 / 1.0 + 1 * 0.0 / 4.0 + 1.1 / 4.1) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test9 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test10 failed\"\n\n # Test multiple K's\n target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)\n output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)\n weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n assert (\n math.fabs(\n ap.sum()\n - torch.Tensor(\n [\n (1 * 3.0 / 3.0 + 0 * 3.0 / 5.0 + 3.5 * 1 / 5.5 + 0 * 3.5 / 6.5)\n / 2.0,\n (0 * 1.0 / 1.0 + 1 * 0.5 / 1.5 + 0 * 0.5 / 3.5 + 1 * 3.5 / 6.5)\n / 2.0,\n ]\n ).sum()\n )\n < 0.01\n ), \"ap test11 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n assert (\n math.fabs(\n ap.sum()\n - torch.Tensor(\n [\n (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3 + 0 * 1.0 / 4.0) / 2.0,\n (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2.0 * 1.0 / 4.0) / 2.0,\n ]\n ).sum()\n )\n < 0.01\n ), \"ap test12 failed\"", "def compute_matrix_scores(ppi_matrix, training_ids, params):\n # building weighting vector \n if not hasattr(params, 'weighting') or params.weighting == \"uniform\":\n weights = np.ones(len(training_ids))\n weights /= np.sum(weights)\n scores = np.dot(ppi_matrix[:, training_ids], weights) \n\n elif params.weighting == \"sup\":\n # compute supervised weights\n weights = np.sum(ppi_matrix[training_ids, :][:, training_ids], axis=1)\n \n # normalize \n weights -= np.min(weights)\n weights /= np.sum(weights)\n weights += 1.0 / len(weights)\n weights = weights ** (-1)\n\n weights /= np.sum(weights)\n scores = np.dot(ppi_matrix[:, training_ids], weights) \n\n elif params.weighting == \"mle\":\n #train_pos = training_ids\n #X = ppi_matrix[:, train_pos]\n #N, D = X.shape\n\n #Y = np.zeros(N)\n #Y[train_pos] = 1\n\n #train_neg = get_negatives(Y, params.neg_examples*len(train_pos))\n #train_nodes = np.concatenate((train_pos, train_neg))\n #Y_train = Y[train_nodes]\n #X_train = X[train_nodes, :]\n #model = LogisticRegression(C = 1.0 / params.reg_L2, \n # fit_intercept = False, \n # class_weight = 'balanced')\n #model.fit(X_train, Y_train)\n #weights = np.array(model.coef_).reshape(-1)\n \n #Apply ReLU to Weights\n #weights += np.ones(len(training_ids))\n #weights /= np.sum(weights)\n #scores = np.dot(ppi_matrix[:, training_ids], weights) \n pass\n elif params.weighting == \"pca\":\n logging.error(\"Not Implemented\")\n \n elif params.weighting == \"max\":\n scores = np.max(ppi_matrix[:, training_ids], axis = 1)\n\n else: \n logging.error(\"Weighting scheme not recognized\")\n\n # compute scores \n return scores", "def translate_weights(self):\n pass", "def compute_similarity_transform(S1: torch.Tensor, S2: torch.Tensor) -> torch.Tensor:\n\n batch_size = S1.shape[0]\n S1 = S1.permute(0, 2, 1)\n S2 = S2.permute(0, 2, 1)\n # 1. Remove mean.\n mu1 = S1.mean(dim=2, keepdim=True)\n mu2 = S2.mean(dim=2, keepdim=True)\n X1 = S1 - mu1\n X2 = S2 - mu2\n\n # 2. Compute variance of X1 used for scale.\n var1 = (X1**2).sum(dim=(1,2))\n\n # 3. The outer product of X1 and X2.\n K = torch.matmul(X1, X2.permute(0, 2, 1))\n\n # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are singular vectors of K.\n U, s, V = torch.svd(K)\n Vh = V.permute(0, 2, 1)\n\n # Construct Z that fixes the orientation of R to get det(R)=1.\n Z = torch.eye(U.shape[1]).unsqueeze(0).repeat(batch_size, 1, 1)\n Z[:, -1, -1] *= torch.sign(torch.linalg.det(torch.matmul(U, Vh)))\n\n # Construct R.\n R = torch.matmul(torch.matmul(V, Z), U.permute(0, 2, 1))\n\n # 5. Recover scale.\n trace = torch.matmul(R, K).diagonal(offset=0, dim1=-1, dim2=-2).sum(dim=-1)\n scale = (trace / var1).unsqueeze(dim=-1).unsqueeze(dim=-1)\n\n # 6. Recover translation.\n t = mu2 - scale*torch.matmul(R, mu1)\n\n # 7. Error:\n S1_hat = scale*torch.matmul(R, S1) + t\n\n return S1_hat.permute(0, 2, 1)", "def test_weight_quantizer_ls1_modes():\n torch.manual_seed(1234)\n quantizer_ls1 = weight_quantization.WeightQuantizerLS1(32)\n w = torch.ones(32, 16, 3, 3) * 2\n\n quantizer_ls1.train()\n w_q_train = quantizer_ls1(w) # v1 should be 2 for all channels\n assert torch.all(w_q_train == 2.0)\n\n quantizer_ls1.eval()\n w = torch.rand(32, 16, 3, 3) # some random, but all positive tensor\n w_q_eval = quantizer_ls1(w)\n\n # since every element of matrix is quantized to +1, and scaling factor is 2\n assert torch.all(w_q_train.eq(w_q_eval))", "def mpn(self, hnode, hmess, agraph, bgraph, depth, W_m, W_n):\r\n messages = MPNN(hmess, bgraph, W_m, depth, self.hidden_size)\r\n mess_nei = index_select_ND(messages, 0, agraph)\r\n node_vecs = torch.cat((hnode, mess_nei.sum(dim=1)), dim=-1)\r\n node_vecs = W_n(node_vecs)\r\n return node_vecs, messages", "def test_score_broker_weight(self):\n assert self.score_lt({\n ('T0', 0): ['0'],\n ('T1', 0): ['0'],\n }, {\n ('T0', 0): ['0'],\n ('T1', 0): ['1'],\n })", "def weightedTransformUsing(*args, **kwargs):\n \n pass", "def test_model_matrix_structure(self):\n loader = ImageLoader(10, 10)\n\n train_dir = os.path.join(os.path.dirname(__file__), 'train_data')\n\n images, models, labels, names = loader.load_all_images_and_labels(train_dir, 2, 1)\n\n model = models[0]\n\n model.create_matrices()\n\n for edge, i in model.message_index.items():\n from_index = model.var_index[edge[0]]\n to_index = model.var_index[edge[1]]\n assert model.message_from[i] == from_index, \"Message sender index is wrong\"\n assert model.message_to[i] == to_index, \"Message receiver index is wrong\"\n assert model.message_to_map.getrow(i).getcol(to_index) == 1, \"Message receiver matrix map is wrong\"\n\n assert np.all(np.sum(model.message_to_map.todense(), axis=1) == 1), \\\n \"Message sender map has a row that doesn't sum to 1.0\"\n\n assert np.allclose(model.edge_pot_tensor[:, :, :model.num_edges],\n model.edge_pot_tensor[:, :, model.num_edges:]), \"Edge tensor structure is wrong\"\n assert np.allclose(model.edge_pot_tensor[:, :, :model.num_edges],\n model.edge_pot_tensor[:, :, model.num_edges:].transpose(1, 0, 2)), \\\n \"Edge tensor is not symmetric\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does message equalization work for simple, vector messages in weighted mean and information matrix parameterization?
def test_GaussianWeightedMeanInfoVector(self): msg_a = GaussianWeightedMeanInfoMessage(weighted_mean=[[1], [0]], info=[[2, 0], [0, 3]]) msg_b = GaussianWeightedMeanInfoMessage(weighted_mean=[[2], [0]], info=[[3, 0], [0, 1]]) msg_c = msg_a.combine(msg_b) np.testing.assert_allclose(msg_c.weighted_mean, [[3], [0]]) np.testing.assert_allclose(msg_c.info, [[5, 0], [0, 4]])
[ "def test_GaussianWeightedMeanInfoScalar(self):\n\n msg_a = GaussianWeightedMeanInfoMessage([[0]], [[2]])\n msg_b = GaussianWeightedMeanInfoMessage([[2]], [[2]])\n msg_c = msg_a.combine(msg_b)\n self.assertEqual(msg_c.weighted_mean, [[2]])\n self.assertEqual(msg_c.info, [[4]])\n\n msg_a = GaussianWeightedMeanInfoMessage([[3]], [[3]])\n msg_b = GaussianWeightedMeanInfoMessage([[-12]], [[6]])\n msg_c = msg_a.combine(msg_b)\n self.assertEqual(msg_c.weighted_mean, [[-9]])\n self.assertEqual(msg_c.info, [[9]])\n msg_c = msg_c.convert(GaussianMeanCovMessage)", "def test_message_weighting(self):\n self._test(\n weights=self.instance(source=self.source, target=self.target, message=self.message, x_e=self.x_e),\n shape=self.message.shape,\n )", "def normalize_msg(msgU,msgD,msgL,msgR):\r\n\r\n avg=np.mean(msgU,axis=2)\r\n msgU -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgD,axis=2)\r\n msgD -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgL,axis=2)\r\n msgL -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgR,axis=2)\r\n msgR -= avg[:,:,np.newaxis]\r\n\r\n return msgU,msgD,msgL,msgR", "def F(epsilon,FM,FC,BM,BC,y,MessageIn):\n\n A=[]\n for i in range(len(MessageIn)):\n A.append((pair(1,1)*np.exp(MessageIn[i])+pair(1,0))/(pair(0,1)*np.exp(MessageIn[i])+pair(0,0)))\n MessageOut = np.log(single(1,y,epsilon,FM,FC,BM,BC)/single(0,y,epsilon,FM,FC,BM,BC)) + sum(np.log(A))\n return MessageOut", "def test_expectation_weighted(self):\n self._test_expectation(weights=self._generate_weights())", "def test_NormInvWish():\n\n # Test sample() method:\n mu_0 = np.arange(3.0)\n kappa_0 = 3.0\n Lam_0 = np.eye(3) + 0.01*np.arange(9).reshape(3,3)\n Lam_0 += Lam_0.T # To make symmetric\n nu_0 = 3\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n arr = prior.sample()\n assert isinstance(arr, np.void)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=1)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (1,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=(1,))\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (1,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=10)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (10,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=(10, 20))\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (10, 20)\n assert arr.dtype == prior.model_dtype\n\n # Test like1() method:\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n x = np.arange(3.0)\n mu = np.arange(3.0)+1.0\n Sig = np.eye(3) + 0.03*np.arange(9).reshape(3, 3)\n Sig += Sig.T\n arr = prior.like1(x, mu, Sig)\n assert isinstance(arr, float)\n\n # If trailing axis of x is not dim 3 (for these prior parameters), should get and AssertionError\n xbad = np.arange(2.0)\n np.testing.assert_raises(AssertionError, prior.like1, xbad, mu, Sig)\n\n # And similar checks for mu and Sig\n mubad = np.arange(4.0)\n np.testing.assert_raises(AssertionError, prior.like1, x, mubad, Sig)\n\n Sigbad = np.eye(2)\n np.testing.assert_raises(AssertionError, prior.like1, x, mu, Sigbad)\n\n # Try some non-trival broadcasts\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior.like1(x, mu, Sig)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n for i, r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i], Sig)\n\n theta = np.zeros((2,), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for i, r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i])\n\n mu = np.empty((3, 4, 3), dtype=float)\n Sig = np.empty((3, 4, 3, 3), dtype=float)\n for i in range(3):\n for j in range(4):\n mu[i, j] = np.arange(3.0)\n Sig[i, j] = np.eye(3)+0.1*i+0.2*j\n arr = prior.like1(x, mu, Sig)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i, j], Sig[i, j])\n\n theta = np.empty((3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i, j])\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior.like1(x, mu[:, np.newaxis, np.newaxis, :], Sig)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i], Sig[j, k])\n\n theta = np.empty((2, 3, 4), dtype=prior.model_dtype)\n theta['mu'] = (np.arange(6.0).reshape(2, 3))[:, np.newaxis, np.newaxis, :]\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i, j, k])\n\n # Test __call__() method:\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n mu = np.arange(3.0)\n Sig = np.eye(3)\n arr = prior(mu, Sig)\n assert isinstance(arr, float)\n\n theta = np.zeros(1, dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta[0])\n assert isinstance(arr, float)\n assert arr == prior(mu, Sig)\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior(mu, Sig)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior(mu[i], Sig)\n\n theta = np.zeros(2, dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior(theta[i])\n\n mu = np.empty((3, 4, 3), dtype=float)\n Sig = np.empty((3, 4, 3, 3), dtype=float)\n for i in range(3):\n for j in range(4):\n mu[i, j] = np.arange(3.0)\n Sig[i, j] = np.eye(3)+0.1*i+0.2*j\n arr = prior(mu, Sig)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior(mu[i, j], Sig[i, j])\n\n theta = np.zeros((3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior(theta[i, j])\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior(mu[:, np.newaxis, np.newaxis, :], Sig)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior(mu[i], Sig[j, k])\n\n theta = np.zeros((2, 3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu[:, np.newaxis, np.newaxis, :]\n theta['Sig'] = Sig\n arr = prior(theta)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior(theta[i, j, k])\n\n # Should _post_params method do any broadcasting?\n\n # Test pred method():\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n x = np.arange(3.0)+1\n arr = prior.pred(x)\n assert isinstance(arr, float)\n\n x = np.arange(6.0).reshape(2, 3)\n arr = prior.pred(x)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior.pred(x[i])\n\n x = np.arange(24.0).reshape(2, 4, 3)\n arr = prior.pred(x)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2, 4)\n assert arr.dtype == float\n for (i, j), r in np.ndenumerate(arr):\n np.testing.assert_almost_equal(r, prior.pred(x[i, j]))", "def compute_matrix_scores(ppi_matrix, training_ids, params):\n # building weighting vector \n if not hasattr(params, 'weighting') or params.weighting == \"uniform\":\n weights = np.ones(len(training_ids))\n weights /= np.sum(weights)\n scores = np.dot(ppi_matrix[:, training_ids], weights) \n\n elif params.weighting == \"sup\":\n # compute supervised weights\n weights = np.sum(ppi_matrix[training_ids, :][:, training_ids], axis=1)\n \n # normalize \n weights -= np.min(weights)\n weights /= np.sum(weights)\n weights += 1.0 / len(weights)\n weights = weights ** (-1)\n\n weights /= np.sum(weights)\n scores = np.dot(ppi_matrix[:, training_ids], weights) \n\n elif params.weighting == \"mle\":\n #train_pos = training_ids\n #X = ppi_matrix[:, train_pos]\n #N, D = X.shape\n\n #Y = np.zeros(N)\n #Y[train_pos] = 1\n\n #train_neg = get_negatives(Y, params.neg_examples*len(train_pos))\n #train_nodes = np.concatenate((train_pos, train_neg))\n #Y_train = Y[train_nodes]\n #X_train = X[train_nodes, :]\n #model = LogisticRegression(C = 1.0 / params.reg_L2, \n # fit_intercept = False, \n # class_weight = 'balanced')\n #model.fit(X_train, Y_train)\n #weights = np.array(model.coef_).reshape(-1)\n \n #Apply ReLU to Weights\n #weights += np.ones(len(training_ids))\n #weights /= np.sum(weights)\n #scores = np.dot(ppi_matrix[:, training_ids], weights) \n pass\n elif params.weighting == \"pca\":\n logging.error(\"Not Implemented\")\n \n elif params.weighting == \"max\":\n scores = np.max(ppi_matrix[:, training_ids], axis = 1)\n\n else: \n logging.error(\"Weighting scheme not recognized\")\n\n # compute scores \n return scores", "def _msgmsd(\n x: torch.Tensor,\n y: torch.Tensor,\n kernel: torch.Tensor,\n weights: torch.Tensor,\n alpha: float = 0.5,\n **kwargs,\n) -> torch.Tensor:\n\n gmsds = []\n\n for i in range(weights.numel()):\n if i > 0:\n x = F.avg_pool2d(x, kernel_size=2, ceil_mode=True)\n y = F.avg_pool2d(y, kernel_size=2, ceil_mode=True)\n\n gmsds.append(_gmsd(x, y, kernel, alpha=alpha, **kwargs))\n\n msgmsd = torch.stack(gmsds, dim=-1) ** 2\n msgmsd = torch.sqrt((msgmsd * weights).sum(dim=-1))\n\n return msgmsd", "def test_get_prop_samples_broadcasts_weights_correctly():\n desired_data = [{\n \"solver\": {\n \"sublattice_site_ratios\": [1],\n \"sublattice_occupancies\": [[[0, 0]], [[1, 1]]],\n \"sublattice_configurations\": [[[\"CU\", \"MG\"]], [[\"CU\", \"MG\"]]],\n \"mode\": \"manual\"\n },\n \"conditions\": {\n \"P\": [0, 1], \"T\": [0, 1, 2, 3]},\n \"values\": [[[0, 1], [2, 3], [4, 5], [6, 7]], [[8, 9], [10, 11], [12, 13], [14, 15]]],\n }]\n\n # No weight\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n assert np.all(np.isclose(np.asarray(calculate_dict[\"weights\"]), 1.0))\n\n # Scalar weight\n desired_data[0][\"weight\"] = 5.0\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n assert np.all(np.isclose(np.asarray(calculate_dict[\"weights\"]), 5.0))\n\n # 1D weights aligned in...\n # ... P\n desired_data[0][\"weight\"] = [[[1]], [[2]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"P\",calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # ... T\n desired_data[0][\"weight\"] = [[[1], [2], [3], [4]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"T\",calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # ... configs\n desired_data[0][\"weight\"] = [[[3, 4]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"CONFIGS\", calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # 3D weights aligned\n num_P = 2\n num_T = 4\n prescribed = [[(np.array([1, 2])*i*j).tolist() for j in range(1, num_T+1)] for i in range(1, num_P+1)]\n desired_data[0][\"weight\"] = prescribed\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])", "def update_feature_vectors(self, h, edge_index, messages):\n\n row, col = edge_index\n #message_aggregate = unsorted_segment_sum(messages, row, num_segments = h.size(0), device = self.device)\n message_aggregate = unsorted_segment_sum(messages, row, num_segments = h.size(1), device = self.device)\n feature_inputs = torch.cat([h, message_aggregate], dim = 2)\n #feature_inputs = torch.cat([h, message_aggregate], dim = 1)\n feature_inputs = feature_inputs.to(self.device)\n out = self.feature_mlp(feature_inputs)\n out = out.to(self.device)\n return out, message_aggregate", "def test_message_weighting_no_message(self):\n if self.instance.needs_message:\n raise SkipTest(f\"{self.cls} needs messages for weighting them.\")\n self._test(weights=self.instance(source=self.source, target=self.target), shape=self.source.shape)", "def compute_similarity_transform(S1: torch.Tensor, S2: torch.Tensor) -> torch.Tensor:\n\n batch_size = S1.shape[0]\n S1 = S1.permute(0, 2, 1)\n S2 = S2.permute(0, 2, 1)\n # 1. Remove mean.\n mu1 = S1.mean(dim=2, keepdim=True)\n mu2 = S2.mean(dim=2, keepdim=True)\n X1 = S1 - mu1\n X2 = S2 - mu2\n\n # 2. Compute variance of X1 used for scale.\n var1 = (X1**2).sum(dim=(1,2))\n\n # 3. The outer product of X1 and X2.\n K = torch.matmul(X1, X2.permute(0, 2, 1))\n\n # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are singular vectors of K.\n U, s, V = torch.svd(K)\n Vh = V.permute(0, 2, 1)\n\n # Construct Z that fixes the orientation of R to get det(R)=1.\n Z = torch.eye(U.shape[1]).unsqueeze(0).repeat(batch_size, 1, 1)\n Z[:, -1, -1] *= torch.sign(torch.linalg.det(torch.matmul(U, Vh)))\n\n # Construct R.\n R = torch.matmul(torch.matmul(V, Z), U.permute(0, 2, 1))\n\n # 5. Recover scale.\n trace = torch.matmul(R, K).diagonal(offset=0, dim1=-1, dim2=-2).sum(dim=-1)\n scale = (trace / var1).unsqueeze(dim=-1).unsqueeze(dim=-1)\n\n # 6. Recover translation.\n t = mu2 - scale*torch.matmul(R, mu1)\n\n # 7. Error:\n S1_hat = scale*torch.matmul(R, S1) + t\n\n return S1_hat.permute(0, 2, 1)", "def translate_weights(self):\n pass", "def mpn(self, hnode, hmess, agraph, bgraph, depth, W_m, W_n):\r\n messages = MPNN(hmess, bgraph, W_m, depth, self.hidden_size)\r\n mess_nei = index_select_ND(messages, 0, agraph)\r\n node_vecs = torch.cat((hnode, mess_nei.sum(dim=1)), dim=-1)\r\n node_vecs = W_n(node_vecs)\r\n return node_vecs, messages", "def expectation_value(self, vec1, ham, vec2):", "def similarity(self, embedded, w, b, center=None):\n N = self.opt.speaker_num\n M = self.opt.utter_num \n P = self.opt.embedding_size\n ##S = opt.segment_num\n '''if self.opt.train_type == 'multi_attention' or self.opt.train_type == 'divide_attention':\n P = self.opt.embedding_size * self.opt.attention_head_num \n else: \n P = self.opt.embedding_size'''\n ##embedded_mean = torch.cat([torch.mean(embedded[i*S:(i+1)*S,:], dim=0, keepdim=True) for i in range(N*M)], dim=0)\n embedded_split = torch.reshape(embedded, (N, M, P))\n \n if center is None:\n center = self.normalize(torch.mean(embedded_split, dim=1)) # [N,P] normalized center vectors eq.(1)\n center_except = self.normalize(torch.reshape(torch.sum(embedded_split, dim=1, keepdim=True)\n - embedded_split, (N*M,P))) # [NM,P] center vectors eq.(8)\n # make similarity matrix eq.(9)\n S = torch.cat(\n [torch.cat([torch.sum(center_except[i*M:(i+1)*M,:]*embedded_split[j,:,:], dim=1, keepdim=True) if i==j\n else torch.sum(center[i:(i+1),:]*embedded_split[j,:,:], dim=1, keepdim=True) for i in range(N)],\n dim=1) for j in range(N)], dim=0)\n else :\n # If center(enrollment) exist, use it.\n S = torch.cat(\n [torch.cat([torch.sum(center[i:(i + 1), :] * embedded_split[j, :, :], dim=1, keepdim=True) for i\n in range(N)], dim=1) for j in range(N)], dim=0)\n \n if self.opt.loss_type.split('_')[1] == 'softmax' or self.opt.loss_type.split('_')[1] == 'contrast':\n S = torch.abs(w)*S + b # rescaling\n \n return S", "def _create_m_objective(w, X):\n clusters, cells = w.shape\n genes = X.shape[0]\n w_sum = w.sum(1)\n def objective(m):\n m = m.reshape((X.shape[0], w.shape[0]))\n d = m.dot(w)+eps\n temp = X/d\n w2 = w.dot(temp.T)\n deriv = w_sum - w2.T\n return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes\n return objective", "def test_topic_weights(self):\n assert self.state.topic_weights == (10, 36, 6, 45)", "def test_binary_average_precision_weighted():\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([0.1, 0.2, 0.3, 4])\n weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (1 * 0.1 / 0.1 + 0 * 2.0 / 2.1 + 1.1 * 1 / 3.1 + 0 * 1 / 4) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test1 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 / 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test2 failed\"\n\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([4, 3, 2, 1])\n weight = torch.Tensor([1, 2, 3, 4])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (0 * 1.0 / 1.0 + 1.0 * 2.0 / 3.0 + 2.0 * 0 / 6.0 + 6.0 * 1.0 / 10.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test3 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test4 failed\"\n\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([1, 4, 2, 3])\n weight = torch.Tensor([1, 2, 3, 4])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (4 * 1.0 / 4.0 + 6 * 1.0 / 6.0 + 0 * 6.0 / 9.0 + 0 * 6.0 / 10.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test5 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 + 2 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test6 failed\"\n\n target = torch.Tensor([0, 0, 0, 0])\n output = torch.Tensor([1, 4, 2, 3])\n weight = torch.Tensor([1.0, 0.1, 0.0, 0.5])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = 0.0\n assert math.fabs(ap - val) < 0.01, \"ap test7 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = 0.0\n assert math.fabs(ap - val) < 0.01, \"ap test8 failed\"\n\n target = torch.Tensor([1, 1, 0])\n output = torch.Tensor([3, 1, 2])\n weight = torch.Tensor([1, 0.1, 3])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (1 * 1.0 / 1.0 + 1 * 0.0 / 4.0 + 1.1 / 4.1) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test9 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test10 failed\"\n\n # Test multiple K's\n target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)\n output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)\n weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n assert (\n math.fabs(\n ap.sum()\n - torch.Tensor(\n [\n (1 * 3.0 / 3.0 + 0 * 3.0 / 5.0 + 3.5 * 1 / 5.5 + 0 * 3.5 / 6.5)\n / 2.0,\n (0 * 1.0 / 1.0 + 1 * 0.5 / 1.5 + 0 * 0.5 / 3.5 + 1 * 3.5 / 6.5)\n / 2.0,\n ]\n ).sum()\n )\n < 0.01\n ), \"ap test11 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n assert (\n math.fabs(\n ap.sum()\n - torch.Tensor(\n [\n (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3 + 0 * 1.0 / 4.0) / 2.0,\n (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2.0 * 1.0 / 4.0) / 2.0,\n ]\n ).sum()\n )\n < 0.01\n ), \"ap test12 failed\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a KML file with multiple Documents into geojson to store in an output file
def kml_multiple_to_geojson(infile_path, outdir_path, geojson_properties={}): data = __read_file(infile_path) coord_dict = __get_all_coords(data) if not os.path.exists(outdir_path): os.makedirs(outdir_path) for section_id, coords in list(coord_dict.items()): filename = "%s.json" % section_id path = os.path.join(outdir_path, filename) outfile = file(path,'w') dump = __to_geojson(coords) outfile.write(dump) outfile.close()
[ "def kml_to_geojson(infile_path, outfile_path, geojson_properties={}):\n data = __read_file(infile_path) \n \tcoords = __get_coords(data)\n\toutfile = open(outfile_path, 'w')\n\toutfile.write(__to_geojson(coords))\n\toutfile.close()", "def kml_to_geometry(cls, filename):\n # open local\n with open(filename) as f:\n kml = bf.data(fromstring(f.read()))['kml']['Document']['Folder']['GroundOverlay']\n kml = kml['{http://www.google.com/kml/ext/2.2}LatLonQuad']['coordinates']['$']\n coordinates = [list(map(float, pair.split(','))) for pair in kml.split(' ')]\n coordinates.append(coordinates[0])\n return cls.coordinates_to_geometry(coordinates)", "def convert_json_to_geojson(input_json, output_geojson):\n \n with open(input_json) as f:\n gj = json.load(f)\n \n ### new geojson created from json\n gj1 = {}\n gj1['type'] = 'FeatureCollection'\n gj1['generator'] = gj['generator']\n gj1['copyright'] = gj['osm3s']['copyright']\n gj1['timestamp'] = gj['osm3s']['timestamp_osm_base']\n gj1['features'] = []\n\n for i, f in enumerate(gj['elements']):\n\n if f['type'] == 'way':\n pixels = []\n for p in f['geometry']:\n pixels.append([p['lon'], p['lat']])\n f1 = {}\n f1['type'] = 'Feature'\n f1['properties'] = f['tags']\n f1['bbox'] = [f['bounds']['minlon'], f['bounds']['minlat'], f['bounds']['maxlon'], f['bounds']['maxlat']]\n f1['geometry'] = {'type': 'LineString',\n 'coordinates': pixels}\n f1['id'] = f['type'] + '/' + str(f['id'])\n gj1['features'].append(f1)\n else: # f['type'] == 'node' or 'relation\n continue\n\n # write geojson to new file\n with open(output_geojson, 'w') as f:\n # method 1: f.write(json.dumps(gj))\n json.dump(gj1, f, indent = 2)", "def extractshapes(shape_path, wkt_or_json=\"JSON\"):\n l=[]\n driver = ogr.GetDriverByName('ESRI Shapefile')\n ds = driver.Open(shape_path, 0)\n if ds is None:\n print 'Can not open', ds\n sys.exit(1)\n lyr = ds.GetLayer()\n totfeats = lyr.GetFeatureCount()\n lyr.SetAttributeFilter('')\n print 'Starting to load %s of %s features in shapefile %s...' % (lyr.GetFeatureCount(), totfeats, lyr.GetName())\n pbar = ProgressBar(maxval=lyr.GetFeatureCount()).start()\n k=0\n # iterate the features and access its attributes (including geometry) to store them in MongoDb\n feat = lyr.GetNextFeature()\n while feat:\n geom = feat.GetGeometryRef()\n #mongogeom = geom.ExportToWkt()\n #mongogeom = geom.ExportToJson()\n #print geom.ExportToJson()\n if wkt_or_json.upper()==\"JSON\":\n g = geom.ExportToJson()\n elif wkt_or_json.upper()==\"WKT\":\n g = geom.ExportToWkt()\n \n # iterate the feature's fields to get its values and store them in MongoDb\n feat_defn = lyr.GetLayerDefn()\n for i in range(feat_defn.GetFieldCount()):\n value = feat.GetField(i)\n if isinstance(value, str):\n value = unicode(value, 'latin-1')\n field = feat.GetFieldDefnRef(i)\n fieldname = field.GetName()\n d[fieldname] = value\n l.append(g) \n feat.Destroy()\n feat = lyr.GetNextFeature()\n k = k + 1\n pbar.update(k)\n pbar.finish()\n return l", "def read_and_write_json(output_geojson):\n with open('us_sample.geojson', 'r') as sample_geojson_file:\n data = json.load(sample_geojson_file)\n\n for feature in data['features']:\n # Get state name to query. Overwrite the existing properties\n # with new properties needed to generate data on the map.\n state = feature['properties']['name']\n new_properties = generate_replacement_properties(state)\n feature['properties'] = new_properties\n\n fema_geojson_file = open(output_geojson, 'w')\n fema_geojson_file.write(json.dumps(data, indent=2))\n\n sample_geojson_file.close()\n fema_geojson_file.close()", "def write_geojson_file(self, file_path):\n with open(file_path, 'w') as f:\n f.write(format_to_geojson(self.all))", "def read_geojson_features(filename):\n # ogr2ogr has a ?bug that the JSON file isn't utf8. not sure what's going on.\n json_data = open(filename).read().decode('utf8', 'replace')\n json_data = json.loads(json_data)\n assert json_data['type'] == 'FeatureCollection'\n for feat in json_data['features']:\n shape = shapely.geometry.shape(feat['geometry'])\n feat['shapely'] = shape\n del feat['geometry']\n return json_data['features']", "def toc_example(layer_cnt,infiles,out_file_name):\r\n # \"toc\" is the dictionary that will be encoded to GeoJSON\r\n toc = {}\r\n toc[\"name\"] = \"NewFeatureType\"\r\n toc[\"type\"] = \"FeatureCollection\"\r\n toc[\"crs\"] = {\"type\":\"name\",\r\n # \"properties\" : {\"name\":\"urn:ogc:def:crs:OGC:1.3:CRS83\"}\r\n # FixMe: Get CRS from data.\r\n # This example uses the GeoJSON default: EPSG:4326\r\n }\r\n \r\n # \"features\" is the list that holds all of the features in the GeoJSON\r\n features = []\r\n\r\n for cnt in range(len(infiles)): \r\n \r\n # file name management and \"path\" determination\r\n head, tail = os.path.split(infiles[cnt])\r\n base, ext = os.path.splitext(tail)\r\n \r\n path = \"./\" + tail\r\n path = path.replace(\"\\\\\",\"/\")\r\n print path\r\n \r\n # get the shapefile's \"amigos\"\r\n amigo_extensions = get_amigo_extensions(infiles[cnt])\r\n \r\n # get the extents of the data\r\n driver = ogr.GetDriverByName('ESRI Shapefile') #FixMe: could be any Vector file type\r\n datasource = driver.Open(infiles[cnt], 0)\r\n layer = datasource.GetLayer()\r\n extent = layer.GetExtent()\r\n \r\n # create a GeoJSON feature for the file\r\n features.append({\r\n \"type\":\"Feature\",\r\n \"geometry\":{\"type\": \"Polygon\",\r\n \"coordinates\":[[\r\n [extent[0],extent[3]], #UL X,Y\r\n [extent[1],extent[3]], #UR X,Y \r\n [extent[1],extent[2]], #LR X,Y\r\n [extent[0],extent[2]], #LL X,Y\r\n [extent[0],extent[3]] #UL X,Y\r\n ]]},\r\n \"properties\":{\r\n \"PATH\": path,\r\n \"EXTS\": amigo_extensions,\r\n \"LAYERS\":layer_cnt[cnt], \r\n \"WEO_MISCELLANEOUS_FILE\":\"No\",\r\n \"WEO_TYPE\":\"WEO_FEATURE\"\r\n }\r\n })\r\n \r\n # Create WeoGeo's LOOK_UP_TABLE Feature\r\n layers_properties = {}\r\n layers_properties[\"WEO_TYPE\"] = \"LOOK_UP_TABLE\"\r\n for cnt in range(len(layer_cnt)+1):\r\n layers_properties[str(cnt)] = \"WEOALL=WEOALL\" \r\n # Example:\r\n # 0 : \"WEOALL=WEOALL\"\r\n # 1 : \"WEOALL=WEOALL\"\r\n # etc.\r\n \r\n # Add the LOOK_UP_TABLE Feature to the features list\r\n features.append(\r\n {\r\n \"type\":\"Feature\",\r\n \"geometry\": None,\r\n \"properties\": layers_properties\r\n }\r\n )\r\n \r\n # add the features list to the ToC dictionary\r\n toc[\"features\"] = features\r\n \r\n # create a JSON object\r\n e = json.JSONEncoder()\r\n \r\n # encode the ToC dictionary as (Geo)JSON\r\n # and write the results to a text file\r\n out = open(out_file_name, \"w\")\r\n out.write(e.encode(toc))\r\n out.close()", "def write_geojson(self, outfile):\n\n logging.info('Writing GeoJSON: %s' % outfile)\n\n with open(outfile, 'wb') as f:\n f.write(json.dumps(self.geojson, indent=4))", "def output_geojson(jsn):\n geoj = jsn[0]\n geoj2 = json.dumps(geoj)\n geoj3 = geojson.loads(geoj2)\n return (geoj3, geoj)", "def ljson_exporter(lmk_points, filepath, **kwargs):\n\n lmk_points[np.isnan(lmk_points)] = None\n\n lmk_points = [list(_tmp) for _tmp in lmk_points]\n\n ljson = {\n 'version': 2,\n 'labels': [],\n 'landmarks': {\n 'points': lmk_points\n }\n }\n\n with open(filepath, \"w\") as file_handle:\n\n return json.dump(ljson, file_handle, indent=4, separators=(',', ': '),\n sort_keys=True, allow_nan=False, ensure_ascii=False)", "def index_geojson(self, path):\n\n datadir = f'{path}/data'\n logging.debug(\"Indexing %s\", datadir)\n\n for root, _, files in os.walk(datadir):\n for file in files:\n if '.geojson' in file or '.json' in file:\n srcfile = os.path.join(root, file)\n match = re.search(r'(whereonearth-[^/]*)', srcfile)\n if not match:\n logging.warning(\n 'This file doesn\\'t smell like WhereOnEarth GeoJSON: %s',\n srcfile\n )\n else:\n self.source = match.group(1)\n with open(srcfile, encoding='UTF-8') as ifh:\n src = json.load(ifh)\n\n if 'type' in src and src['type'] == 'Feature':\n self.index_feature(src, srcfile)\n\n elif 'type' in src and src['type'] == 'FeatureCollection':\n for feature in src['features']:\n self.index_feature(feature, srcfile)\n\n else:\n logging.warning('This file doesn\\t smell like GeoJSON: %s', srcfile)\n\n if self.docs:\n logging.info(\"places %s final counter @ %s\", self.version, self.counter)\n self.counter += len(self.docs)\n self._add(self.docs)\n self.docs = []\n\n logging.info(\"Added %s GeoJSON features\", self.counter)\n logging.info(\"Finished indexing %s\", datadir)", "def write_geojson(way_results, node_results, outfp):\n feats = way_results\n\n for node in node_results:\n if not node['properties']['dead_end']:\n node['properties']['intersection'] = 1\n if node['properties']['highway'] == 'traffic_signals':\n node['properties']['signal'] = 1\n feats.append(geojson.Feature(\n geometry=geojson.Point(node['geometry']['coordinates']),\n properties=node['properties'])\n )\n\n feat_collection = geojson.FeatureCollection(feats)\n with open(outfp, 'w') as outfile:\n geojson.dump(feat_collection, outfile)", "def to_kml(path):\n out = (\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<kml xmlns=\"http://earth.google.com/kml/2.1\">\n <Document>\n <Placemark>\n <LineString>\n <extrude>1</extrude>\n <tessellate>1</tessellate>\n <coordinates>\"\"\")\n out += \" \".join(\"%f,%f\" % (loc[::-1]) for loc in path)\n return out + (\"\"\"</coordinates>\n </LineString>\n </Placemark>\n <Placemark>\n <name>Start</name>\n <Point>\n <coordinates>%f,%f,0</coordinates>\n </Point>\n </Placemark>\n <Placemark>\n <name>End</name>\n <Point>\n <coordinates>%f,%f,0</coordinates>\n </Point>\n </Placemark>\n </Document>\n</kml>\"\"\" % ((path[0][::-1]) + (path[-1][::-1])))", "def save_geometric_features(features_dict, output_path, file_name):\n output_path += \"/\"\n path = join(output_path, file_name + \"_\" +\n SAVE_GEOMETRICAL_FEATURES)\n with open(path, 'w') as output_file:\n for k, v in features_dict.items():\n output_file.write(k + \": \" + v + \"\\n\")", "def test_read_and_write_KGML_files(self):\n for p in self.data:\n # Test opening file\n with open(p.infilename, 'rU') as f:\n pathway = read(f)\n # Do we have the correct number of elements of each type\n self.assertEqual((len(pathway.entries), \n len(pathway.orthologs),\n len(pathway.compounds),\n len(pathway.maps)),\n p.element_counts)\n # Test writing file\n with open(p.outfilename, 'w') as f:\n f.write(pathway.get_KGML())\n # Can we read the file we wrote?\n with open(p.outfilename, 'rU') as f:\n pathway = read(f)\n # Do we have the correct number of elements of each type\n self.assertEqual((len(pathway.entries), \n len(pathway.orthologs),\n len(pathway.compounds),\n len(pathway.maps)),\n p.element_counts)", "def shp_to_json(base_path, shp_path, name):\n print \" -- Projecting shapefile to WGS-84 and converting to JSON\"\n\n # define ogr drivers\n shp_driver = ogr.GetDriverByName('ESRI Shapefile')\n json_driver = ogr.GetDriverByName('GeoJSON')\n\n # define the input layer\n shp = shp_driver.Open(shp_path)\n shp_lyr = shp.GetLayer()\n\n # create the output layer\n json_path = os.path.join(base_path, name + \".geojson\")\n if os.path.exists(json_path):\n json_driver.DeleteDataSource(json_path)\n json = json_driver.CreateDataSource(json_path)\n json_lyr = json.CreateLayer(json_path, geom_type=ogr.wkbMultiPolygon)\n json_lyr_defn = json_lyr.GetLayerDefn()\n\n # create the CoordinateTransformation\n json_ref = osr.SpatialReference()\n json_ref.ImportFromEPSG(4326)\n coord_trans = osr.CoordinateTransformation(\n shp_lyr.GetSpatialRef(), json_ref)\n\n # add fields to output layer\n shp_lyr_defn = shp_lyr.GetLayerDefn()\n for i in range(0, shp_lyr_defn.GetFieldCount()):\n field_defn = shp_lyr_defn.GetFieldDefn(i)\n json_lyr.CreateField(field_defn)\n\n # loop through the input features\n shp_feat = shp_lyr.GetNextFeature()\n while shp_feat:\n # reproject the input geometry\n geom = shp_feat.GetGeometryRef()\n geom.Transform(coord_trans)\n # create a new feature\n json_feat = ogr.Feature(json_lyr_defn)\n # set the feature's geometry and attributes\n json_feat.SetGeometry(geom)\n for i in range(0, json_lyr_defn.GetFieldCount()):\n json_feat.SetField(\n json_lyr_defn.GetFieldDefn(i).GetNameRef(),\n shp_feat.GetField(i))\n # add new feature to output Layer\n json_lyr.CreateFeature(json_feat)\n # destroy the features and get the next input feature\n json_feat.Destroy()\n shp_feat.Destroy()\n shp_feat = shp_lyr.GetNextFeature()\n\n # close the datasets\n shp.Destroy()\n json.Destroy()\n\n return json_path", "def run(input_shapefile: \"Input Shapefile\" =\"counties/ctygeom.shp\"):\n # Ceate outline geojson structure\n geojson = {\"type\": \"FeatureCollection\", \"features\": [], \"crs\": {\"type\": \"EPSG\", \"properties\": {\"code\": None}}, \"bbox\": []}\n\n num_ticks = 60\n # input_shapefile = input(\"Enter the path (if necessary) and name fo the input shapefile: \")\n\n # print(\"{}\".format(\"=\" * num_ticks))\n # print(\"Getting information for '{}'\".format(input_shapefile))\n # print(\"{}\\n\".format(\"-\" * num_ticks))\n logging.info(\"Getting information for '{}'\".format(input_shapefile))\n\n try:\n with fiona.open(input_shapefile, \"r\") as fh:\n logging.info(\"Driver: \\t{}\".format(fh.driver))\n logging.info(\"Encoding:\\t{}\".format(fh.encoding))\n logging.info(\"Geometry:\\t{}\".format(fh.schema[\"geometry\"]))\n logging.info(\"CRS: \\t{}\".format(fh.crs[\"init\"].upper()))\n logging.info(\"Bounds: \\t{}\".format(fh.bounds))\n logging.info(\"Features \\t{}\".format(len(fh)))\n\n print(\"Attribute Types\")\n\n # Add crs and bbox properties to the geojson structure\n geojson[\"crs\"][\"properties\"][\"code\"] = int(fh.crs[\"init\"].split(\":\")[1])\n geojson[\"bbox\"] = fh.bounds\n\n header_string = \"\"\n csv_header = \"\"\n for k, v in fh.schema[\"properties\"].items():\n print(\"\\t{:10}\\t{}\".format(k, v))\n header_string += \"\\t{:>30}\".format(k)\n csv_header += \"{}\\t\".format(k)\n print(\"\\n\"+header_string)\n\n with open(input_shapefile.split(\".\")[0]+\".csv\", \"w\") as fh_csv:\n fh_csv.write(\"{}\\n\".format(csv_header[:-1]))\n for feature in fh:\n # add each feature to geojson structure, Fiona gives it to us in a suitable format so no further processing\n # required\n geojson[\"features\"].append(feature)\n\n data_string = \"\"\n csv_data = \"\"\n for k,v in feature[\"properties\"].items():\n data_string+= \"\\t{:>30}\".format(v)\n csv_data += \"{}\\t\".format(v)\n print(data_string)\n fh_csv.write(\"{}\\n\".format(csv_data[:-1]))\n\n # Create output geojson file and convert geojson python stucture to json\n with open(input_shapefile.split(\".\")[0]+\".json\", \"w\") as fh:\n fh.write(json.dumps(geojson))\n\n except Exception as e:\n print(e)\n quit()\n finally:\n print(\"{}\".format(\"=\" * num_ticks))", "def to_geojson(path: Path) -> Any:\n temp = Path.cwd() / \"temp.geojson\"\n\n # Transform shapefile to GeoJSON\n shp_file = geopandas.read_file(path)\n shp_file.to_file(temp, driver=\"GeoJSON\")\n\n # Open the GeoJSON file\n with open(temp, \"r\") as f:\n geojson = json.load(f)\n\n temp.unlink(missing_ok=True)\n return geojson" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a KML file to geojson to store in an output file.
def kml_to_geojson(infile_path, outfile_path, geojson_properties={}): data = __read_file(infile_path) coords = __get_coords(data) outfile = open(outfile_path, 'w') outfile.write(__to_geojson(coords)) outfile.close()
[ "def kml_multiple_to_geojson(infile_path, outdir_path, geojson_properties={}):\n data = __read_file(infile_path)\n coord_dict = __get_all_coords(data)\n if not os.path.exists(outdir_path):\n os.makedirs(outdir_path) \n for section_id, coords in list(coord_dict.items()):\n filename = \"%s.json\" % section_id\n path = os.path.join(outdir_path, filename)\n outfile = file(path,'w')\n dump = __to_geojson(coords)\n outfile.write(dump)\n outfile.close()", "def kml_to_geometry(cls, filename):\n # open local\n with open(filename) as f:\n kml = bf.data(fromstring(f.read()))['kml']['Document']['Folder']['GroundOverlay']\n kml = kml['{http://www.google.com/kml/ext/2.2}LatLonQuad']['coordinates']['$']\n coordinates = [list(map(float, pair.split(','))) for pair in kml.split(' ')]\n coordinates.append(coordinates[0])\n return cls.coordinates_to_geometry(coordinates)", "def convert_json_to_geojson(input_json, output_geojson):\n \n with open(input_json) as f:\n gj = json.load(f)\n \n ### new geojson created from json\n gj1 = {}\n gj1['type'] = 'FeatureCollection'\n gj1['generator'] = gj['generator']\n gj1['copyright'] = gj['osm3s']['copyright']\n gj1['timestamp'] = gj['osm3s']['timestamp_osm_base']\n gj1['features'] = []\n\n for i, f in enumerate(gj['elements']):\n\n if f['type'] == 'way':\n pixels = []\n for p in f['geometry']:\n pixels.append([p['lon'], p['lat']])\n f1 = {}\n f1['type'] = 'Feature'\n f1['properties'] = f['tags']\n f1['bbox'] = [f['bounds']['minlon'], f['bounds']['minlat'], f['bounds']['maxlon'], f['bounds']['maxlat']]\n f1['geometry'] = {'type': 'LineString',\n 'coordinates': pixels}\n f1['id'] = f['type'] + '/' + str(f['id'])\n gj1['features'].append(f1)\n else: # f['type'] == 'node' or 'relation\n continue\n\n # write geojson to new file\n with open(output_geojson, 'w') as f:\n # method 1: f.write(json.dumps(gj))\n json.dump(gj1, f, indent = 2)", "def read_and_write_json(output_geojson):\n with open('us_sample.geojson', 'r') as sample_geojson_file:\n data = json.load(sample_geojson_file)\n\n for feature in data['features']:\n # Get state name to query. Overwrite the existing properties\n # with new properties needed to generate data on the map.\n state = feature['properties']['name']\n new_properties = generate_replacement_properties(state)\n feature['properties'] = new_properties\n\n fema_geojson_file = open(output_geojson, 'w')\n fema_geojson_file.write(json.dumps(data, indent=2))\n\n sample_geojson_file.close()\n fema_geojson_file.close()", "def to_geojson(path: Path) -> Any:\n temp = Path.cwd() / \"temp.geojson\"\n\n # Transform shapefile to GeoJSON\n shp_file = geopandas.read_file(path)\n shp_file.to_file(temp, driver=\"GeoJSON\")\n\n # Open the GeoJSON file\n with open(temp, \"r\") as f:\n geojson = json.load(f)\n\n temp.unlink(missing_ok=True)\n return geojson", "def write_geojson_file(self, file_path):\n with open(file_path, 'w') as f:\n f.write(format_to_geojson(self.all))", "def ljson_exporter(lmk_points, filepath, **kwargs):\n\n lmk_points[np.isnan(lmk_points)] = None\n\n lmk_points = [list(_tmp) for _tmp in lmk_points]\n\n ljson = {\n 'version': 2,\n 'labels': [],\n 'landmarks': {\n 'points': lmk_points\n }\n }\n\n with open(filepath, \"w\") as file_handle:\n\n return json.dump(ljson, file_handle, indent=4, separators=(',', ': '),\n sort_keys=True, allow_nan=False, ensure_ascii=False)", "def write_geojson(self, outfile):\n\n logging.info('Writing GeoJSON: %s' % outfile)\n\n with open(outfile, 'wb') as f:\n f.write(json.dumps(self.geojson, indent=4))", "def to_kml(path):\n out = (\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<kml xmlns=\"http://earth.google.com/kml/2.1\">\n <Document>\n <Placemark>\n <LineString>\n <extrude>1</extrude>\n <tessellate>1</tessellate>\n <coordinates>\"\"\")\n out += \" \".join(\"%f,%f\" % (loc[::-1]) for loc in path)\n return out + (\"\"\"</coordinates>\n </LineString>\n </Placemark>\n <Placemark>\n <name>Start</name>\n <Point>\n <coordinates>%f,%f,0</coordinates>\n </Point>\n </Placemark>\n <Placemark>\n <name>End</name>\n <Point>\n <coordinates>%f,%f,0</coordinates>\n </Point>\n </Placemark>\n </Document>\n</kml>\"\"\" % ((path[0][::-1]) + (path[-1][::-1])))", "def write_features_to_json_file(input_image_path, data, features_model_path):\n object_id = get_object_id(input_image_path)\n features_file_path = get_features_json_file_path(input_image_path)\n features_data = {\n \"id\": object_id,\n \"model\": features_model_path.split('/')[-1],\n \"data\": data\n }\n with open(features_file_path, 'w') as outfile:\n json.dump(features_data, outfile, sort_keys=True, indent=2)\n return features_data", "def read_geojson_features(filename):\n # ogr2ogr has a ?bug that the JSON file isn't utf8. not sure what's going on.\n json_data = open(filename).read().decode('utf8', 'replace')\n json_data = json.loads(json_data)\n assert json_data['type'] == 'FeatureCollection'\n for feat in json_data['features']:\n shape = shapely.geometry.shape(feat['geometry'])\n feat['shapely'] = shape\n del feat['geometry']\n return json_data['features']", "def pbf2geojson(pbf_file, geojson_file):\n\n command_success = False\n\n if not bash_command_exists(\"osmium\"):\n raise Exception(\"'osmium' is not detected\" )\n\n osmium_call = [\n \"osmium\",\n \"export\",\n pbf_file,\n \"-o\",\n geojson_file,\n \"-u\",\n \"type_id\",\n \"--overwrite\",\n \"--output-format\",\n \"geojson\"\n ]\n\n try:\n print(' '.join(osmium_call) )\n with open(geojson_file, \"w\") as sp_log:\n with subprocess.Popen(osmium_call, stdout=sp_log, stderr=subprocess.PIPE) as sp:\n sp.wait()\n sp_stdout, sp_stderr = sp.communicate()\n\n sp_log.flush()\n\n if sp_stderr:\n logging.warning(sp_stderr)\n command_success = True\n except CalledProcessError as ee:\n #logging.exception(\"Could not execute command! stderr:\\n{}\".format(sp_stderr) )\n raise Exception(ee)\n\n\n return {\n \"pbf_file\": pbf_file,\n \"geojson_file\": geojson_file,\n \"success\": command_success\n }", "def to_geojson(cls, geopandas_dataframe, name='object.geojson', in_utm=False):\n\n # Convert to utm\n if in_utm is True:\n geopandas_dataframe = geopandas_dataframe.to_crs(epsg=3395)\n\n print('Saving as a geoJSON file as {name}'.format(name=name))\n with open(name, 'w') as file:\n file.write(geopandas_dataframe.to_json())", "def shp_to_json(base_path, shp_path, name):\n print \" -- Projecting shapefile to WGS-84 and converting to JSON\"\n\n # define ogr drivers\n shp_driver = ogr.GetDriverByName('ESRI Shapefile')\n json_driver = ogr.GetDriverByName('GeoJSON')\n\n # define the input layer\n shp = shp_driver.Open(shp_path)\n shp_lyr = shp.GetLayer()\n\n # create the output layer\n json_path = os.path.join(base_path, name + \".geojson\")\n if os.path.exists(json_path):\n json_driver.DeleteDataSource(json_path)\n json = json_driver.CreateDataSource(json_path)\n json_lyr = json.CreateLayer(json_path, geom_type=ogr.wkbMultiPolygon)\n json_lyr_defn = json_lyr.GetLayerDefn()\n\n # create the CoordinateTransformation\n json_ref = osr.SpatialReference()\n json_ref.ImportFromEPSG(4326)\n coord_trans = osr.CoordinateTransformation(\n shp_lyr.GetSpatialRef(), json_ref)\n\n # add fields to output layer\n shp_lyr_defn = shp_lyr.GetLayerDefn()\n for i in range(0, shp_lyr_defn.GetFieldCount()):\n field_defn = shp_lyr_defn.GetFieldDefn(i)\n json_lyr.CreateField(field_defn)\n\n # loop through the input features\n shp_feat = shp_lyr.GetNextFeature()\n while shp_feat:\n # reproject the input geometry\n geom = shp_feat.GetGeometryRef()\n geom.Transform(coord_trans)\n # create a new feature\n json_feat = ogr.Feature(json_lyr_defn)\n # set the feature's geometry and attributes\n json_feat.SetGeometry(geom)\n for i in range(0, json_lyr_defn.GetFieldCount()):\n json_feat.SetField(\n json_lyr_defn.GetFieldDefn(i).GetNameRef(),\n shp_feat.GetField(i))\n # add new feature to output Layer\n json_lyr.CreateFeature(json_feat)\n # destroy the features and get the next input feature\n json_feat.Destroy()\n shp_feat.Destroy()\n shp_feat = shp_lyr.GetNextFeature()\n\n # close the datasets\n shp.Destroy()\n json.Destroy()\n\n return json_path", "def output_geojson(jsn):\n geoj = jsn[0]\n geoj2 = json.dumps(geoj)\n geoj3 = geojson.loads(geoj2)\n return (geoj3, geoj)", "def create_geojson(csv_file_open_path, file_save_path_name):\n data = []\n with open(csv_file_open_path,'r') as f:\n reader = csv.reader(f)\n for record in reader:\n data.append(record)\n\n data = data[1::]\n \n for record in data:\n # shorten latitude to 4 decimal places\n lat = record[1].split('.')\n lat_diff = len(lat[1])-4\n lat[1] = lat[1][0:len(lat[1])-lat_diff]\n record[1] = float(lat[0] + '.' + lat[1])\n \n # shorten longitude to 4 decimal places\n long = record[2].split('.')\n long_diff = len(long[1])-4\n long[1] = long[1][0:len(long[1])-long_diff]\n record[2] = float(long[0] + '.' + long[1])\n \n record[4] = record[4].replace(',', ', ')\n record[7] = record[7].replace(',', ', ')\n \n data_file = {\"type\": \"FeatureCollection\"}\n data_file[\"crs\"] = {\"type\":\"name\",\"properties\":{\"name\":\"urn:ogc:def:crs:OGC:1.3:CRS84\"}}\n features_list = []\n \n for coop in data:\n features_list.append(create_feature_dict(coop))\n \n data_file[\"features\"] = features_list\n \n with open(file_save_path_name, 'w') as ff:\n ff.write(json.dumps(data_file))", "def save_geometric_features(features_dict, output_path, file_name):\n output_path += \"/\"\n path = join(output_path, file_name + \"_\" +\n SAVE_GEOMETRICAL_FEATURES)\n with open(path, 'w') as output_file:\n for k, v in features_dict.items():\n output_file.write(k + \": \" + v + \"\\n\")", "def geojson_to_shp(infile,outfile):\n cmd = \"ogr2ogr\"\n driver = \"ESRI Shapefile\"\n\n st,r = sp.getstatusoutput(cmd + \" --version\")\n\n if st == 0:\n process = sp.Popen([cmd, \"-f\", driver, outfile, infile])\n else:\n print(\"Couldn't find {}, please install GDAL\".format(cmd))", "def csv_to_geojson(in_csv, out_geojson, x=\"longitude\", y=\"latitude\"):\n import pandas as pd\n import geopandas as gpd\n\n if not os.path.exists(in_csv):\n raise FileNotFoundError(\"The input csv does not exist.\")\n\n if not out_geojson.lower().endswith(\".geojson\"):\n raise ValueError(\"out_geojson must have the .geojson file extension.\")\n\n out_dir = os.path.dirname(out_geojson)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n df = pd.read_csv(in_csv)\n col_names = df.columns.values.tolist()\n\n if x not in col_names:\n raise ValueError(f\"x must be one of the following: {', '.join(col_names)}\")\n\n if y not in col_names:\n raise ValueError(f\"y must be one of the following: {', '.join(col_names)}\")\n\n gdf = gpd.GeoDataFrame(\n df, crs=\"epsg:4326\", geometry=gpd.points_from_xy(df[x], df[y])\n )\n gdf.to_file(out_geojson, driver=\"GeoJSON\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates kml located at filepath.
def validate_kml(filepath, schema = "https://developers.google.com/kml/schema/kml21.xsd"): schema_gomaps = Schema(schema) data = "" with open(filepath, "r") as temp: data = temp.read() assert(data != "") doc = parser.fromstring(data) return schema_ogc.assertValid(doc)
[ "def validate(self):\n file_content = filesystem_utils.get_content(self._file_path)\n self.parse_yaml_file(file_content)", "def validate_file(file_path):\n if not os.path.exists(file_path):\n raise FileNotFoundError(\"[Error] File not found!\")", "def __validateFile(self):\n\n xml_schema_doc = etree.parse(TemplateReader.TemplateXSD)\n xmlSchema = etree.XMLSchema(xml_schema_doc)\n \n return xmlSchema.validate(self.template_xml)", "def validate_file(filename, keywords, dripconf=False):\n if not isinstance(filename, str):\n log.error(\"Filename must be a string, received %s\" % type(filename))\n return False\n if not isinstance(keywords, DataFrame):\n log.error(\"Keywords must be %s, received %s\" %\n (DataFrame, type(keywords)))\n return False\n if not os.path.isfile(filename):\n log.error(\"Not a file: %s\" % filename)\n return False\n header = fits.getheader(filename)\n return validate_header(header, keywords, dripconf=dripconf)", "def check_labels_file(labels_file):\n test=True\n # existence of file\n if not os.path.isfile(labels_file):\n test=False\n # reading\n try:\n ssp.load_npz(labels_file)\n except ValueError:\n test=False\n\n # return\n return test", "def _check_input_path(self, input_path):", "def file_valid(self,\n file_path: Path):\n if file_path.exists():\n return True\n else:\n logger.critical('File directory or file name is incorrect. Aborted')\n quit()", "def validate(self):\n print(\"Validating {f}\".format(f=self.file))\n\n with open(self.file, 'r') as xmlfile:\n xml = xmlfile.read()\n # see if it is valid XML first\n try:\n etree.XML(xml)\n except etree.XMLSyntaxError:\n self.valid = False\n return\n # create an instance of the Validator\n xmlValidator = XmlValidator(open(specpath, 'rt').read())\n try:\n xmlValidator.validate(xml)\n self.valid = True\n except XmlValidationError as err:\n print(err)\n self.valid = False", "def __validate_tree(file, log):\n global num_nodes, words, errors\n\n log.write(\"loading tree\\n\")\n __load_tree(file, log)\n log.write(\"finished loading tree\\n\")\n if len(words.keys()) != (num_nodes):\n diff = abs(len(words.keys()) - num_nodes ) # exclude *root*\n log.write(\"validation error: missing nodes \"+str(diff)+\"\\n\")\n errors += 1", "def validate(self, in_filename, file_format, entrypoint_name=None):\n\n if file_format == FileFormat.JSON:\n self.from_JSON(in_filename, entrypoint_name)\n else:\n self.from_XML(in_filename, entrypoint_name)", "def _valid_metadata_file_path(path: str) -> bool:\n return METADATA_FILE_NAME in path and CONNECTORS_PATH in path and \"-scaffold-\" not in path", "def validate_path(self, path: str) -> bool:\n pass", "def validate_view(path):\n print(f\" validating {path}...\")\n\n # JSON schema for /views\n view_schema_file = _VALID_SCHEMA_TYPES[\"view\"][\"file\"]\n data = run_validator(data_file=path, schema_file=view_schema_file)\n namecheck_schema(path, data)\n\n print(f\"✓ {path} is valid.\")\n return data", "def _validate_certificatefile(self, name, filename):\n\n filename = sanatizefilename(filename)\n\n if not os.path.isfile(filename):\n print_error(\"{} at {} does not exist\".format(name, os.path.realpath(filename)))\n return False\n elif not self._tryload_certificatefile(filename):\n print_error(\"Could not load {}\".format(name))\n return False\n else:\n return True", "def should_check_file(self, filename):\n raise NotImplementedError", "def validate_cms_file_exist(filename, sample_id, data_path):\n return validate_file_exist(filename=filename.format(sample_id), data_path=data_path)", "def validateyaml(ctx, yamllintrc='.yamllint', filename='strings.yml'):\n ctx.run('yamllint --config-file %s %s' % (yamllintrc, filename))", "def test_loads_from_file_is_searching_in_rkd_path(self):\n\n yaml_loader = YamlFileLoader([])\n\n d = tempfile.TemporaryDirectory()\n os.environ['RKD_PATH'] = d.name\n\n with open(d.name + '/makefile.yml', 'w') as f:\n f.write('''\nversion: org.riotkit.rkd/yaml/v1\nimports: []\ntasks: \n :join:iwa-ait:\n description: Subscribe to any local section of IWA-AIT, workers have common interest\n arguments:\n - not a list\n ''')\n\n try:\n self.assertRaises(YAMLFileValidationError,\n lambda: yaml_loader.load_from_file('makefile.yml', 'org.riotkit.rkd/yaml/v1'))\n finally:\n d.cleanup()\n os.environ['RKD_PATH'] = ''", "def validate_file_input(input_file: Path):\n assert input_file.exists(), \"The path doesn't exist\"\n assert input_file.is_file(), \"The path isn't a file\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a section into a kml file
def section_to_kml(section, color, outfile_path="", write=True): line_style_id = "line-%s-5" % color red = "FF1212" green = "00B80C" start_icon_style_id = "icon-%s" % color end_icon_style_id = "icon-%s" % color make_coord = lambda p: (",".join([str(x) for x in p["track_location"]["coordinates"]] + ["0.0"])) make_coord_point = lambda p: (",".join([str(x) for x in p["coordinates"]] + ["0.0"])) style_id = "style-%s" % section['section_start_time'] pm = KML.Placemark( KML.styleUrl("#%s" % line_style_id), KML.name(section['_id']), KML.description(section["section_id"]), KML.LineString( KML.tessellate(1), KML.coordinates(" ".join( [make_coord(track_point) for track_point in section['track_points']])) ) ) start_point = section['section_start_point'] end_point = section['section_end_point'] start_time = mongodate_to_datetime(section["section_start_time"]) end_time = mongodate_to_datetime(section["section_end_time"]) start_point = KML.Placemark( KML.styleUrl("#%s" % start_icon_style_id), KML.name("Start: %s" % start_time), KML.description("Starting point"), KML.Point(KML.coordinates(make_coord_point(start_point))) ) end_point = KML.Placemark( KML.styleUrl("#%s" % end_icon_style_id), KML.name("End: %s" % end_time), KML.description("Ending point"), KML.Point(KML.coordinates(make_coord_point(end_point))) ) line_style = KML.Style( KML.LineStyle( KML.color("ff%s" % color), KML.width("5") ) ) line_style.set("id", line_style_id) start_icon_style = KML.Style( KML.IconStyle( KML.color("ff%s" % color), KML.scale("1.1"), KML.Icon( KML.href("http://www.gstatic.com/mapspro/images/stock/503-wht-blank_maps.png") ) ) ) start_icon_style.set("id", start_icon_style_id) end_icon_style = KML.Style( KML.IconStyle( KML.color("ff%s" % color), KML.scale("1.1"), KML.Icon( KML.href("http://www.gstatic.com/mapspro/images/stock/503-wht-blank_maps.png") ) ) ) end_icon_style.set("id", end_icon_style_id) fld = KML.Folder( KML.name(section['_id']), KML.description("From %s \nto %s" % (start_time, end_time)), pm, start_point, end_point ) if write: kml = KML.kml(KML.Document(fld, section["user_id"])) path = os.path.join(outfile_path, str(section['user_id']) +'.kml') outfile = file(path,'w') outfile.write(etree.tostring(kml, pretty_print=True)) else: return fld, line_style, start_icon_style, end_icon_style
[ "def section(c32, name):\n\n entries = documents.entries\n\n if 'document' == name:\n return c32.template('2.16.840.1.113883.3.88.11.32.1')\n if 'allergies' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.102')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.2')\n\n el.entries = entries\n return el\n if 'demographics' == name:\n return c32.template('2.16.840.1.113883.3.88.11.32.1')\n if 'encounters' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.127')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.3')\n\n el.entries = entries\n return el\n if 'immunizations' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.117')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.6')\n\n el.entries = entries\n return el\n if 'results' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.122')\n el.entries = entries\n return el\n if 'medications' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.112')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.8')\n\n el.entries = entries\n return el\n if 'problems' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.103')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.11')\n\n el.entries = entries\n return el\n if 'procedures' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.108')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.12')\n\n el.entries = entries\n return el\n if 'vitals' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.119')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.16')\n\n el.entries = entries\n return el\n\n\n return None", "def write_kml(self, outfile):\n\n logging.info('Writing KML: %s' % outfile)\n raise ValueError('Writing KML not implemented')", "def writeSegmentDetailsKml(outPath,singleSimulation,nodes):", "def kmlConvert(sitelist):\n\n kml = simplekml.Kml()\n\n\n style_dict = {\"Red\": \"http://maps.google.com/mapfiles/kml/paddle/red-blank.png\", \"Yellow\": \"http://maps.google.com/mapfiles/kml/paddle/ylw-blank.png\", \"Green\": \"http://maps.google.com/mapfiles/kml/paddle/grn-blank.png\", \"New\": \"http://maps.google.com/mapfiles/kml/paddle/blu-blank.png\"}\n\n if args.internal:\n pass\n else:\n if tandemEdSite:\n site_style.iconstyle.icon.href = \"http://www.tandembayarea.org/wp-content/uploads/2015/08/tandemEduMarker.png\"\n else:\n site_style.iconstyle.icon.href = \"http://www.tandembayarea.org/wp-content/uploads/2015/08/tandemPartMarker.png\"\n\n for site in sitelist:\n pnt = kml.newpoint()\n site_style = simplekml.Style()\n pnt.name = site.name\n site_style.iconstyle.icon.href = style_dict[site.status]\n\n if args.internal:\n pnt.description = \"%s \\n\\n %s\" % (site.staffLead, site.address)\n else:\n if tandemEdSite:\n pnt.description = \"For questions about this site please contact %s. \\n %s\" % (site.staffLead, site.email)\n else:\n pnt.description = \"A Tandem Partner Site\"\n\n pnt.coords = [(site.longitude, site.latitude)]\n pnt.style = site_style\n\n outputfile = raw_input(\"Please select output filename: \") + \".kml\"\n kml.save(outputfile)\n print \"File saved!\"\n\n return", "def writeSection(self):\n p = os.path.join(self.basePath, \"section/index.html\")\n self.writeLayout(\n p, body=self.sectionsIndex(), basePath=\"..\", title=\"Sections Index\"\n )\n for sName, s in sorted(self.schema.sections.items()):\n basePath = \"../..\"\n p = os.path.join(self.basePath, f\"section/{sName}/index.html\")\n sec = s.section\n body = self.metaDesc(sec, basePath=basePath)\n self.writeLayout(\n p,\n body=body,\n title=f\"Section {sName}\",\n basePath=basePath,\n extra=self.mathExtra(basePath),\n )\n for vName, v in s.valueEntries.items():\n basePath = \"../../..\"\n p = os.path.join(self.basePath, f\"section/{sName}/value/{vName}.html\")\n body = self.metaDesc(v, basePath=basePath)\n self.writeLayout(\n p,\n body=body,\n title=f\"Value {sName}\",\n basePath=basePath,\n extra=self.mathExtra(basePath),\n )\n for dName, d in s.dimensions.items():\n basePath = \"../../..\"\n p = os.path.join(\n self.basePath, f\"section/{sName}/dimension/{dName}.html\"\n )\n body = self.metaDesc(d, basePath=basePath)\n self.writeLayout(\n p,\n body=body,\n title=f\"Dimension {sName}\",\n basePath=basePath,\n extra=self.mathExtra(basePath),\n )", "def generateKML(self):\n k = kml.KML()\n k.append(self.generateDocument())\n return k.to_string(prettyprint=True)", "def write(self):\n output = open(self.filename, \"w\")\n output.write(self.generateKML())\n output.close()\n self.logger.info(\"KML File Written: {}\".format(self.filename))", "def get_section(section):", "def _recipe_create_stakkr_config(config: dict):\n with open('stakkr.yml', 'w') as outfile:\n dump(config, outfile, default_flow_style=False)", "def to_kml(path):\n out = (\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<kml xmlns=\"http://earth.google.com/kml/2.1\">\n <Document>\n <Placemark>\n <LineString>\n <extrude>1</extrude>\n <tessellate>1</tessellate>\n <coordinates>\"\"\")\n out += \" \".join(\"%f,%f\" % (loc[::-1]) for loc in path)\n return out + (\"\"\"</coordinates>\n </LineString>\n </Placemark>\n <Placemark>\n <name>Start</name>\n <Point>\n <coordinates>%f,%f,0</coordinates>\n </Point>\n </Placemark>\n <Placemark>\n <name>End</name>\n <Point>\n <coordinates>%f,%f,0</coordinates>\n </Point>\n </Placemark>\n </Document>\n</kml>\"\"\" % ((path[0][::-1]) + (path[-1][::-1])))", "def prepare_files_in_section(spittal_sub_instance, section):\n\n # With s being config section and p being Oasis profile type.\n for skey, sitems in section.iteritems():\n if skey in ['dict', 'version']:\n for pkey, pitems in sitems.iteritems():\n data_name = skey + \"_\" + pkey\n filepath = os.path.join(\n section['directory_path'],\n pitems['filename']\n )\n spittal_sub_instance.prepare_file(\n data_name,\n filepath,\n pitems['module_supplier_id'],\n section['do_timestamps']\n )", "def create_section(self, level: int, section: str) -> None:\n self.add_output(section)\n self.add_output(self.sections[level] * len(section.rstrip()), line_breaks=2)", "def getcfpsectionvalue(cfpfilename: str, sectionname: str):\n cfpin, cfpinpath = getcfp(cfpfilename)\n if not cfpin.has_section(sectionname):\n print(f\"seticon {sectionname} is not exists. Then creating it now ...\")\n cfpin.add_section(sectionname)\n cfpin.write(open(cfpinpath, 'w', encoding='utf-8'))\n return\n else:\n return cfpin.items(f'{sectionname}')", "def section_to_config(section):\n new_conf = ConfigObj()\n for (key, value) in section.items():\n new_conf[key] = value\n return new_conf", "def translate_section(data):\n sect_str = \"\"\n elements = data.get(\"Elements\", [])\n for elem in elements:\n print(\" Translating \" + elem[\"Type\"])\n sect_str += translate_map[elem[\"Type\"]](elem)\n return sect_str", "def parse(self, section='main'):\n self.parser = ConfigParser()\n self.parser.readfp(self.file_object)\n root_recipe = self.parse_section(section)\n return root_recipe", "def _l2t_section(label, include_section, include_marker):\n if include_marker:\n marker = u'§ '\n else:\n marker = ''\n\n if include_section:\n # Regulation Text with section number\n if len(label) == 2: # e.g. 225-2\n return marker + '.'.join(label)\n else: # e.g. 225-2-b-4-i-A\n return marker + '%s.%s(%s)' % (label[0], label[1],\n ')('.join(label[2:]))\n else:\n # Regulation Text without section number\n if len(label) == 2: # e.g. 225-2\n return marker + label[1]\n else: # e.g. 225-2-b-4-i-A\n return marker + '%s(%s)' % (label[1], ')('.join(label[2:]))", "def export2kml(self):\n\n print 'export data to kml'\n self.mymodel = self.ui.tableView.model()\n\n # check for valid model\n if not self.check_valid_model(self.mymodel):\n return\n\n fn = QtGui.QFileDialog.getSaveFileName(None, 'Save File', os.getenv('HOME'), u\"Αρχεία kml (*.kml)\")\n\n if not fn:\n return\n\n self.kmlfilename = myfunctions.fix_file_extension(fn, '.kml')\n\n # set state of widgets\n self.set_widgets_state()\n\n # generic thread using signal\n self.genericThread2 = GenericThread(self.generate_kml_stigmata, self.mymodel)\n self.disconnect(self, QtCore.SIGNAL(\"sendpositions\"), self.write_kml)\n\n # connect signal \"sendpositions\" with method write_kml\n self.connect(self, QtCore.SIGNAL(\"sendpositions\"), self.write_kml)\n\n self.connect(self, QtCore.SIGNAL(\"updateprogressbar\"), lambda i: self.progressbar.setValue(i))\n self.genericThread2.start()", "def find_section_text(lines, section, go_to_end=False, section2=\"\"):\n if len(lines) == 0:\n return \"\"\n n = 0\n for line in lines:\n line_mod = line.replace(\" \", \"\")\n if line_mod.startswith(\"==%s\" % section) \\\n or (section2 != \"\" and line_mod.startswith(\"==%s\" % section2)):\n # Section started\n n += 1\n doc = \"\"\n # collect the documents till next section or the end \n newline = lines[n]\n while (go_to_end or not newline.strip().startswith('==')) \\\n and not newline.strip().startswith('[[Category'):\n doc += newline + '\\n'\n n += 1\n if n < len(lines):\n newline = lines[n]\n else:\n break\n return doc\n n += 1\n \n return \"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads in UUIDs from the file user_uuid.secret
def read_uuids(): from uuid import UUID f = open("user_uuid.secret","r") user_uuid = {} for line in f: user, uuid = [c.strip() for c in line.split(":")] user_uuid[user] = UUID(uuid) return user_uuid
[ "def read_secrets_file( secrets_file ):\n try:\n sf = open( secrets_file, \"r\" )\n except Exception, e:\n raise MDMethodFailed( \"read_secrets_file\", e )\n\n users = []\n lineno = 0\n while True:\n line = sf.readline()\n lineno += 1\n \n if len(line) == 0:\n break\n\n line = line.strip()\n if len(line) == 0:\n continue\n\n parts = line.split(\":\")\n if len(parts) != 3:\n logger.warn(\"Invalid line %s in %s\" % (lineno, secret_line))\n continue\n \n users.append( user_entry( int(parts[0]), parts[1], parts[2] ) )\n\n return users", "def get_secret(filename):\n def creator():\n length = 10\n return b64_encode(os.urandom(length))\n return read_unique(filename, creator)", "def read():\n global accounts, users\n try:\n f = open(\"accounts.dat\", \"rb\")\n except FileNotFoundError:\n return None\n rd = f.read()\n users, accounts = json.loads(base64.b64decode(zlib.decompress(rd[65:])).decode())\n f.close()", "def read_user_credentials(file_path):\n return read_file(file_path)", "def read_uuids_below(relative_path):\n with open(os.path.join(git_repo_abs_dir(), \n relative_path, \n 'uuid_dict.json'), 'r') as fp:\n uuid_dict = json.load(fp)\n return dict(uuid_dict)", "def get_existing_users():\n with open(users_credentials, \"r\") as f:\n next(f)\n for line in f:\n line = line.strip()\n user, password, title = line.split(\"|\")\n yield user, password, title\n f.close()", "def readAccounts():\n try:\n with open(USERFILE, 'r') as usersFile:\n return json.load(usersFile)\n except FileNotFoundError:\n print(\"User file not found. Exiting.\")\n exit(1)\n except json.JSONDecodeError:\n print(\"No entries in user account file.\"\n \" Please add an account and try again.\")\n exit(1)", "def read_id(file):\n return file.read(4)", "def _remove_uuid_from_list(self, uuid: str, file_name: str):\n\n check_if_file_exists(file_name)\n \n with open(file_name, \"r\") as f:\n lines = f.readlines()\n with open(file_name, \"w\") as f:\n for line in lines:\n if line.strip(\"\\n\") != uuid:\n f.write(line)", "def generate_uuid(self):\n try:\n uuid_file = open(PATHS.UUID_FILE, \"rb\")\n self.uuid = pickle.load(uuid_file)\n LOG.log('info', 'UUID FOUND! Your UUID is ', self.uuid)\n except FileNotFoundError:\n self.uuid = uuid.uuid1().int\n uuid_file = open(PATHS.UUID_FILE, \"wb\")\n pickle.dump(self.uuid, uuid_file)\n uuid_file.close()\n LOG.log('info', \"UUID was not found, generated a new one. Your new UUID is \", self.uuid)", "def read_password():\n with open(passwordfile,'r') as handle:\n read = handle.read()\n return read", "def _read_pw_file(self):\n import codecs\n\n with open(self.password_file, \"r\") as f:\n pwstring = codecs.decode(f.read(), \"rot_13\")\n (username, password) = pwstring.split(\",\", 2)\n return (username, password)", "def get_noname_uids():\n #\n uids = []\n with open('../data/guids_no_name.dat', 'r') as datain:\n for ln in datain:\n ln = ln.strip()\n if not isinstance(ln, int) and ln.isnumeric:\n ln = int(ln)\n uids.append(ln)\n uids.sort()\n\n with open('../data/uvaids_no_name.dat', 'w') as outfile:\n for uid in uids:\n usr = loaduser(uid)\n outfile.write(\"{}|{}\\n\".format(uid, usr['name']))\n print(\"Wrote {} guids with UVA IDs to ../data/uvaids_no_name.dat\".format(len(uids)))", "def testReadFileObjectUUID(self):\n definitions_registry = registry.DataTypeDefinitionsRegistry()\n definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()\n\n definitions_file = self._GetTestFilePath([u'uuid.yaml'])\n with open(definitions_file, 'rb') as file_object:\n definitions_reader.ReadFileObject(definitions_registry, file_object)\n\n self.assertEqual(len(definitions_registry._definitions), 1)\n\n data_type_definition = definitions_registry.GetDefinitionByName(u'uuid')\n self.assertIsInstance(\n data_type_definition, data_types.UUIDDefinition)\n self.assertEqual(data_type_definition.name, u'uuid')\n self.assertEqual(\n data_type_definition.byte_order, definitions.BYTE_ORDER_LITTLE_ENDIAN)\n self.assertEqual(data_type_definition.size, 16)\n self.assertEqual(data_type_definition.units, u'bytes')\n\n byte_size = data_type_definition.GetByteSize()\n self.assertEqual(byte_size, 16)", "def read_all_seed_ids():\n raw_file = open('./data/raw/raw_data.txt', 'r').read().splitlines()\n raw_file = [seq.split(' ') for seq in raw_file]\n seed_id_list = [seq[1] for seq in raw_file]\n return seed_id_list", "def read_users(users_file: Path) -> List[Dict[str, str]]:\n with open(users_file, 'r') as handler:\n xs = handler.read()\n\n # Generate username dictionary\n users = xs.split()\n logger.info(f\"Adding users to database:\\n{users}\")\n return [{\"username\": u} for u in users]", "def parse_vmx_file(self):\n\t\tfd = open(self.vmx_path,\"r\")\n\t\tdata = fd.readlines()\n\t\tfor line in data:\n\t\t\tif \"uuid.bios\" in line:\n\t\t\t\t# Convert this into a UUID\n\t\t\t\ttemp = line.split(\" = \")[1].lstrip().strip().replace(\" \",\"-\").replace(\"\\\"\",\"\")\n\t\t\t\t#print(temp)\n\t\t\t\tuuid_obj = uuid.UUID(temp)\n\t\t\t\tprint(uuid_obj)\n\t\t\t\tself.uuid = uuid_obj\n\t\tfd.close()", "def get_secrets():\n secret_fn = os.getenv(\"ARTIFACT_TRACKER_SECRETS\")\n secret_file_name = secret_fn or os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"..\", \"secrets\", \"secrets\")\n\n if not os.path.isfile(secret_file_name):\n from artifact_tracker.utils.secrets import make_secrets\n make_secrets()\n\n with open(secret_file_name) as sf:\n key = sf.readline().strip()\n salt = sf.readline().strip()\n if not key or not salt:\n raise ValueError(f\"Error, unexpected data in the\"\n f\" secrets file: {secret_file_name}.\"\n f\"If you are starting for the first time,\"\n f\"please run the create_secrets file.\")\n return key, salt", "def import_unique_passwords(self, storage, infile):\n self.import_passwords(storage, infile, unique_check=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run art_illumina read simulator
def sim_reads_art(workdir, coverage=30, readlen=150, meanfrag=400, insertsd=50, instrument="HS25"): ret = cmd_exe("which art_illumina") if ret.ret_code != 0: logging.error("Cannot fine art_illumina executable in the environment") exit(ret.retcode) try: os.chdir(workdir) except OSError: logging.error(f"Cannot change into {workdir} directory") exit(1) alt_ref = 'svteaser.altered.fa' ret = cmd_exe((f"art_illumina -ss {instrument} -sam -na -i {alt_ref} -p " f"-l {readlen} -m {meanfrag} -s {insertsd} -f {coverage} -o art_illumina.simReads")) if ret.ret_code != 0: logging.error("Problem running art_illumina") logging.error(ret.stderr) logging.error(ret.stdout) exit(ret.ret_code)
[ "def sim_reads_art(workdir, coverage=30, readlen=150, meanfrag=400, insertsd=50, instrument=\"HS25\", keep_bam=False):\n ret = cmd_exe(\"which art_illumina\")\n if ret.ret_code != 0:\n logging.error(\"Cannot find art_illumina executable in the environment\")\n exit(ret.ret_code)\n try:\n os.chdir(workdir)\n except OSError:\n logging.error(f\"Cannot change into {workdir} directory\")\n exit(1)\n alt_ref = 'svteaser.altered.fa'\n\n outdir = \"sim_reads_{}_{}_{}_{}_{}\".format(coverage, readlen, meanfrag, insertsd, instrument)\n os.mkdir(outdir)\n # Useful when running on same altered reference but different parameters\n out_path = os.path.join(outdir, \"art_illumina.simReads\")\n ret = cmd_exe((f\"art_illumina -ss {instrument} -sam -na -i {alt_ref} -p \"\n f\"-l {readlen} -m {meanfrag} -s {insertsd} -f {coverage} -o {out_path}\"))\n if ret.ret_code != 0:\n logging.error(\"Problem running art_illumina\")\n logging.error(ret.stderr)\n logging.error(ret.stdout)\n exit(ret.ret_code)\n\n # Optionally compress fq\n if check_gzip():\n ret = cmd_exe((f\"gzip {out_path}1.fq\"))\n if ret.ret_code != 0:\n logging.info(f\"Could not compress {out_path}1.fq\")\n ret = cmd_exe((f\"gzip {out_path}2.fq\"))\n if ret.ret_code != 0:\n logging.info(f\"Could not compress {out_path}2.fq\")\n if keep_bam:\n if check_samtools():\n ret = cmd_exe((f\"samtools view -S -b {out_path}.sam > {out_path}.bam\"))\n if ret.ret_code != 0:\n logging.info(f\"Could not compress {out_path}.sam\")\n else:\n os.remove(f\"{out_path}.sam\")\n else:\n os.remove(f\"{out_path}.sam\")", "def test_illumina_faked(self) :\n write_read(os.path.join(\"Quality\", \"illumina_faked.fastq\"), \"fastq-illumina\", \"fasta\")\n write_read(os.path.join(\"Quality\", \"illumina_faked.fastq\"), \"fastq-illumina\", \"fastq-sanger\")\n write_read(os.path.join(\"Quality\", \"illumina_faked.fastq\"), \"fastq-illumina\", \"fastq-solexa\")\n write_read(os.path.join(\"Quality\", \"illumina_faked.fastq\"), \"fastq-illumina\", \"fastq-illumina\")\n write_read(os.path.join(\"Quality\", \"illumina_faked.fastq\"), \"fastq-illumina\", \"qual\")", "def main():\n if os.name != 'posix':\n print 'runs only on posix systems'\n return\n\n #parse arguments\n parser = argparse.ArgumentParser(description='''A simple Metagenome Illumina read simulator that wraps pIRS''',\n epilog='''''')\n\n parser.add_argument('-c', '--config', nargs=1, type=file, required=True,\n help='configuration file of the simulator', metavar='configMetagenome.cfg',\n dest='config')\n\n parser.add_argument('-p', '--pIRS-param', action='store', nargs='+',\n help='parameters of the pIRS simulator, e.g. \"-Q 64 -E 1\"',\n dest='p')\n\n args = parser.parse_args()\n config = Config(args.config[0], 'Sim')\n\n pirsParam = ''\n if args.p:\n pirsParam = args.p[0]\n\n #reads configuration\n workingDir = config.get('workingDir')\n referenceSeq = config.get('referenceSeq')\n frequenciesInfo = config.get('frequenciesInfo')\n coverageFrequencyMultiplier = float(config.get('coverageFrequencyMultiplier'))\n pirsInstallDir = config.get('pirsInstallDir')\n insertSizeMean = int(config.get('insertSizeMean'))\n insertSizeSd = int(config.get('insertSizeSd'))\n readLength = int(config.get('readLength'))\n\n #check whether the pIRS optional parameters doesn`t contain those predefined elsewhere (e.g. in the config)\n if (string.count(pirsParam,'-m') != 0 or string.count(pirsParam,'-v') != 0 or string.count(pirsParam,'-l') != 0\n or string.count(pirsParam,'-x') != 0 or string.count(pirsParam,'-i') != 0 or string.count(pirsParam,'-o') != 0):\n print 'pIRS parameters -m -v -l (-x) must be set in the configuration file, parameters -i -o cannot be set '\n return\n\n #check working directory, create temporary directory\n tmpDir = os.path.join(workingDir,'tmp')\n if not os.path.isdir(workingDir):\n print str('The working directory does not exists, create it! (' + str(workingDir) + ')')\n return\n if not os.path.isdir(tmpDir):\n os.mkdir(tmpDir)\n\n seqNameToSeq = fastaFileToDict(referenceSeq)\n seqNameToFreq = getMapping(frequenciesInfo, 0, 1, sep='\\t', comment = '#')\n\n outReads1Merged = OutFileBuffer(os.path.join(workingDir,'reads_1.fq'))\n outReads2Merged = OutFileBuffer(os.path.join(workingDir,'reads_2.fq'))\n\n for seqName in seqNameToFreq:\n seq = seqNameToSeq[seqName]\n coverage = float(seqNameToFreq[seqName][0])*coverageFrequencyMultiplier\n\n fastaFile = os.path.join(tmpDir,str(seqName + '.fna'))\n outBuffer = OutFileBuffer(fastaFile)\n outBuffer.writeText(str('>' + seqName + '\\n' + seq + '\\n'))\n outBuffer.close()\n\n cmd = str(os.path.join(pirsInstallDir,'pirs') + ' simulate -i ' + fastaFile + ' -x ' + str(coverage) +\n ' -m ' + str(insertSizeMean) + ' -v ' + str(insertSizeSd) + ' -l ' + str(readLength)\n + ' -o ' + seqName + ' ' + pirsParam)\n #print cmd\n proc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=tmpDir)# stdout=subprocess.STDOUT, stderr=subprocess.STDOUT)\n proc.wait()\n if proc.returncode != 0:\n sys.stderr.write(str('command failed: ' + cmd))\n\n #append generated reads to the merged files\n reads1 = gzip.open(os.path.join(tmpDir, str(seqName + '_' + str(readLength) + '_' + str(insertSizeMean) + '_1.fq.gz')), 'rb')\n file1Content = reads1.read()\n outReads1Merged.writeText(str(file1Content.replace('@read_',str('@read_' + seqName + '_')) + '\\n'))\n reads1.close()\n\n reads2 = gzip.open(os.path.join(tmpDir, str(seqName + '_' + str(readLength) + '_' + str(insertSizeMean) + '_2.fq.gz')), 'rb')\n file2Content = reads2.read()\n outReads2Merged.writeText(str(file2Content.replace('@read_',str('@read_' + seqName + '_')) + '\\n'))\n reads2.close()\n\n outReads1Merged.close()\n outReads2Merged.close()", "def run(self):\n\n # Reset clock and memory/responses\n self.time = 0\n self.short_memory = []\n self.long_memory = ({}, {})\n self.fixations = []\n self.responses = []\n self.typed_output = \"\"\n\n eye = self.eye\n last_line = self.text_buffer.shape[0] - 1\n\n fovea_idx = eye.retina.fovea_slice()\n fovea_start, fovea_stop = fovea_idx.start, fovea_idx.stop\n fovea_len = fovea_stop - fovea_start\n\n # Loop until last line is read\n state = DummyModel.STATE_READING_LINE_PART\n while state != DummyModel.STATE_END:\n if state == DummyModel.STATE_READING_LINES:\n line_idx = eye.pos[1]\n\n # Check if we're beyond the last line\n if line_idx < last_line:\n state = DummyModel.STATE_READING_LINE_PART\n else:\n # End of program\n state = DummyModel.STATE_END\n\n elif state == DummyModel.STATE_READING_LINE_PART:\n # Look at current position\n code_str = eye.view()\n\n # Fixation starts now\n fix_start = self.time\n\n # Time taken to encode current sensor contents (fixed)\n self.time += DummyModel.ENCODING_TIME\n\n # Add fovea contents to STM\n self.short_memory.append(code_str[fovea_idx])\n\n # Record fixation\n self.fixations.append([eye.pos[0], eye.pos[1],\n fix_start, self.time - fix_start])\n\n # Check right of fovea for whitespace\n right_of_fovea = code_str[(fovea_stop + 1):].strip()\n\n if len(right_of_fovea) > 0:\n # More to read on this line. Move eye to the right.\n next_pos = (eye.pos[0] + fovea_len, eye.pos[1])\n self.time += eye.move_to(next_pos)\n else:\n # Execute line in STM\n state = DummyModel.STATE_EXECUTING_LINE\n\n elif state == DummyModel.STATE_READING_BLOCK:\n pass\n\n elif state == DummyModel.STATE_READING_BLOCK_PART:\n pass\n\n elif state == DummyModel.STATE_EXECUTING_LINE:\n self.__execute_line()\n\n # Move eye to next line, all the way to the left\n next_pos = (0, eye.pos[1] + 1)\n self.time += eye.move_to(next_pos)\n\n state = DummyModel.STATE_READING_LINES\n\n elif state == DummyModel.STATE_EXECUTING_BLOCK:\n pass\n\n # --------------------------------------------------------------------\n # Model is done running. Package fixations and responses as DataFrames.\n\n fix_cols = [\"fix_x\", \"fix_y\", \"start_ms\", \"duration_ms\"]\n fixes_df = pandas.DataFrame(self.fixations, columns=fix_cols)\n fixes_df[\"end_ms\"] = fixes_df[\"start_ms\"] + fixes_df[\"duration_ms\"]\n\n resp_cols = [\"time_ms\", \"response\"]\n resps_df = pandas.DataFrame(self.responses, columns=resp_cols)\n\n return fixes_df, resps_df", "def createSimulatedFiles((rLength, fCov, reference)):\n os.chdir(path)\n # Create a new folder(if necessary) at the appropriate location\n newPath = \"%s/tmp/rL%s/rL%s_fC%s\" % (path, rLength, rLength, fCov)\n newFile = \"%s/%s_%s\" % (newPath, rLength, fCov)\n # The adjusted coverage keeps the number of reads constant for each readLength value supplied.\n # a modelling experiment with a readLength of 20 will have a adjCov that is 40% the value of\n # one with a readLength of 50\n adjCov = float(fCov) * float(rLength)/float(maxRL)\n # If using Sakai as the reference, then multiplying the foldCoverage by the constant below\n # will allow for the use of precise read lengths - using a foldCoverage value of 5 will yield almost\n # exactly 500 000 reads\n # adjCov = float(fCov) * 0.90935049 * float(rLength)/float(maxRL)\n\n # Call art_illumina to simulate the reads into the appropriate folders - general format of system call:\n # art_illumina -i /path-to-file/Escherichia_coli_O157_H7_str_Sakai.fas -l \"readLength\" -f \"foldCoverage\" \\\n # -m 225 -s 60 -o /path-to-folder/Appropriate_name\n artIlluminaCall = \"art_illumina -i %s -l %s -f %s -o %s\" % (reference, rLength, adjCov, newFile)\n # If not using an adjusted coverage value, then uncomment the line below\n # artIlluminaCall = \"art_illumina -i %s -l %s -f %s -o %s\" % (reference, rLength, float(fCov), newFile)\n\n make_path(newPath)\n\n if not os.path.isfile(\"%s.fq\" % newFile):\n sys.stdout.write('.')\n # Subprocess.call requires that the command be finished before the loop can continue\n # this ensures that processes will not be started, and continue running, while the\n # script believes that it is \"safe\" to start more processes, eventually leading to problems\n subprocess.call(artIlluminaCall, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))\n else:\n print sys.stdout.write('.')", "def run(self):\n midis = self.parse_files(chdir=True)\n total_time = sum([m.get_end_time() for m in midis])\n print(\"\\n{} midis read, or {:.1f} minutes of music\"\\\n .format(len(midis), total_time/60))\n\n note_sequences = self.get_note_sequences(midis)\n del midis\n #vectorize note sequences\n note_sequences = [vectorize(ns) for ns in note_sequences]\n print(\"{} note sequences extracted\\n\".format(len(note_sequences)))\n self.note_sequences = self.partition(note_sequences)\n for mode, sequences in self.note_sequences.items():\n print(f\"Processing {mode} data...\")\n print(f\"{len(sequences):,} note sequences\")\n if mode == \"training\":\n sequences = self.stretch_note_sequences(sequences)\n print(f\"{len(sequences):,} stretched note sequences\")\n samples = self.split_sequences(sequences)\n self.quantize(samples)\n print(f\"{len(samples):,} quantized, split samples\")\n if mode == \"training\":\n samples = self.transpose_samples(samples)\n print(f\"{len(samples):,} transposed samples\")\n self.split_samples[mode] = samples\n self.encoded_sequences[mode] = self.encoder.encode_sequences(samples)\n print(f\"Encoded {mode} sequences!\\n\")", "def main():\n\n # Load available detection modes and timecode formats.\n scene_detectors = scenedetect.detectors.get_available()\n timecode_formats = scenedetect.timecodes.get_available()\n # Parse CLI arguments.\n args = scenedetect.cli.get_cli_parser(\n scene_detectors.keys(), timecode_formats.keys()).parse_args()\n # Use above to initialize scene manager.\n smgr = scenedetect.manager.SceneManager(args, scene_detectors)\n\n # Perform scene detection using specified mode.\n start_time = time.time()\n if not args.quiet_mode:\n print('[PySceneDetect] Detecting scenes (%s mode)...' % smgr.detection_method)\n video_fps, frames_read, frames_processed = detect_scenes_file(\n path = args.input.name, scene_manager = smgr)\n elapsed_time = time.time() - start_time\n perf_fps = float(frames_read) / elapsed_time\n\n # Create new list with scene cuts in milliseconds (original uses exact\n # frame numbers) based on the video's framerate, and then timecodes.\n scene_list_msec = [(1000.0 * x) / float(video_fps) for x in smgr.scene_list]\n scene_list_tc = [scenedetect.timecodes.get_string(x) for x in scene_list_msec]\n # Create new lists with scene cuts in seconds, and the length of each scene.\n scene_start_sec = [(1.0 * x) / float(video_fps) for x in smgr.scene_list]\n scene_len_sec = []\n if len(smgr.scene_list) > 0:\n scene_len_sec = smgr.scene_list + [frames_read]\n scene_len_sec = [(1.0 * x) / float(video_fps) for x in scene_len_sec]\n scene_len_sec = [(y - x) for x, y in zip(scene_len_sec[:-1], scene_len_sec[1:])]\n\n if frames_read >= 0:\n # Print performance (average framerate), and scene list if requested.\n if not args.quiet_mode:\n print('[PySceneDetect] Processing complete, found %d scenes in video.' % (\n len(smgr.scene_list)))\n print('[PySceneDetect] Processed %d / %d frames read in %3.1f secs (avg %3.1f FPS).' % (\n frames_processed, frames_read, elapsed_time, perf_fps))\n if len(smgr.scene_list) > 0:\n if args.list_scenes:\n print('[PySceneDetect] List of detected scenes:')\n print ('-------------------------------------------')\n print (' Scene # | Frame # | Timecode ')\n print ('-------------------------------------------')\n for scene_idx, frame_num in enumerate(smgr.scene_list):\n print (' %3d | %9d | %s' % (\n scene_idx+1, frame_num, scene_list_tc[scene_idx]))\n print ('-------------------------------------------')\n print('[PySceneDetect] Comma-separated timecode output:')\n\n # Print CSV separated timecode output for use in other programs.\n print(','.join(scene_list_tc))\n\n # Output timecodes to CSV file if required (and scenes were found).\n if args.output and len(smgr.scene_list) > 0:\n csv_writer = csv.writer(args.output)\n # Output timecode scene list\n csv_writer.writerow(scene_list_tc)\n # Output detailed, human-readable scene list.\n csv_writer.writerow([\"Scene Number\", \"Frame Number (Start)\",\n \"Timecode\", \"Start Time (seconds)\", \"Length (seconds)\"])\n for i, _ in enumerate(smgr.scene_list):\n csv_writer.writerow([str(i+1), str(smgr.scene_list[i]),\n scene_list_tc[i], str(scene_start_sec[i]),\n str(scene_len_sec[i])])\n\n # Cleanup, release all objects and close file handles.\n if args.stats_file:\n args.stats_file.close()\n if args.output:\n args.output.close()\n return", "def test_sanger_to_illumina(self):\n seq = \"N\"*94\n qual = \"\".join(chr(33+q) for q in range(0,94))\n expected_phred = [min(62,q) for q in range(0,94)]\n in_handle = StringIO(\"@Test\\n%s\\n+\\n%s\" % (seq,qual))\n out_handle = StringIO(\"\")\n #Want to ignore the data loss warning\n #(on Python 2.6 we could check for it!)\n warnings.simplefilter('ignore', UserWarning)\n SeqIO.write(SeqIO.parse(in_handle, \"fastq-sanger\"),\n out_handle, \"fastq-illumina\")\n warnings.resetwarnings()\n out_handle.seek(0)\n record = SeqIO.read(out_handle, \"fastq-illumina\")\n self.assertEqual(str(record.seq), seq)\n self.assertEqual(record.letter_annotations[\"phred_quality\"],\n expected_phred)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified num ber of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n a.sumUp()", "def main():\n env = os.environ.copy()\n db_root = util.get_db_root()\n assert db_root\n part = util.get_part()\n assert part\n\n information = util.get_part_information(db_root, part)\n\n valid_devices = []\n for name, device in util.get_devices(db_root).items():\n if device['fabric'] == information['device']:\n valid_devices.append(name)\n\n for part, data in util.get_parts(db_root).items():\n if data['device'] in valid_devices:\n command = \"make roi_only\"\n env['XRAY_PART'] = part\n cwd = os.getenv('XRAY_FUZZERS_DIR')\n subprocess.run(command.split(' '), check=True, env=env, cwd=cwd)", "def run(self):\n self.open_image()\n self.parse_boot_sector()\n self.parse_mft()\n self.parse_entry()", "def main():\r\n\r\n options, args = parseOptions()\r\n\r\n if len(sys.argv) < 3:\r\n print help()\r\n sys.exit()\r\n\r\n\r\n # As we keep similarity to hredner, \r\n # last argument of command line is a hip file. \r\n scene_file = sys.argv[-1]\r\n\r\n print options, args\r\n #sys.exit()\r\n\r\n # Catch errors:\r\n if not os.path.isfile(scene_file):\r\n print \"Can't find %s scene file.\" % scene_file\r\n return 1\r\n try:\r\n hou.hipFile.load(scene_file, True, True)\r\n except:\r\n print \"Can't open %s\" % scene_file\r\n sys.exit()\r\n try:\r\n driver = hou.node(options.driver)\r\n except:\r\n print \"Can't find %s rop\" % options.driver\r\n sys.exit()\r\n\r\n # Ignoring tiling:\r\n if options.ignore_tiles and driver.parm(\"vm_tile_render\"):\r\n driver.parm(\"vm_tile_render\").set(0)\r\n\r\n # Change ROP to save IFD to disk:\r\n if driver.type().name() == 'ifd' and options.generate_ifds:\r\n driver.parm(\"soho_outputmode\").set(1)\r\n scene_path, scene_name = os.path.split(hou.hipFile.name())\r\n scene_name, ext = os.path.splitext(scene_name)\r\n ifd_name = os.path.join(options.ifd_path, scene_name + \".$F.ifd\")\r\n driver.parm('soho_diskfile').set(ifd_name)\r\n\r\n\r\n # Render with all details specified in a hip file:\r\n if not options.frame_list:\r\n frame_range = tuple(options.frame_range + (options.increment,))\r\n driver.render(frame_range=frame_range, ignore_inputs=True, verbose=True)\r\n # Or render from a list of random frames:\r\n else:\r\n for frame in fileseq.FrameSet(options.frame_list):\r\n driver.render(frame_range=(frame, frame), ignore_inputs=True, verbose=True)", "def main():\n\n\n ###################################################################################################\n # The following code is sample code to get you aquainted with read_num() and generator functions. #\n # It's your choice if you want to use Numpy or just use regular Python lists. #\n ###################################################################################################\n\n\n # This is the model used for processing.\n # 0 will be stored in the 0th index, 1 will be stored in the 1st index and so on\n model = np.zeros([10, 28, 28])\n # This will accumulate the total\n total = np.zeros(10)\n\n # Example code\n # Create a generator to begin reading numbers\n generator_func = read_num('training')\n\n # You can call generator functions in two ways:\n # Method 1. Use the next() function.\n arr_and_num = generator_func.next()\n\n # Read_num is a tuple. The 0th index will return a numpy array\n arr_like = arr_and_num[0]\n # The 1st will return the number the image is supposed to represent\n num = arr_and_num[1]\n\n # Using the pretty print function to visualize the number\n pprint_num(arr_like)\n # Confirm the image is our number\n print(num)\n\n # Method 2. Using a for loop\n for arr_and_num in generator_func:\n # Won't go through everything, but the logic is the same.\n # For the first one there is 60000 data sets. So you probably\n # Don't want to print them all...\n break", "def main():\n pv_simulator = PVSimulator()\n pv_simulator.consume()", "def main():\n # Load the UnityMLAgents environment\n env, state_size, action_size, brain_name = load_environment()\n # Instantiate the agent\n agent = Agent(state_size=state_size, action_size=action_size, seed=0)\n # Use Double-DQN to train the agent\n scores = ddqn(env, agent)\n # Persist the weights of the learned model\n save_weights(agent)\n # Plot the scores from the training episodes.\n plot_scores(scores)\n # Clean up the workspace once finished.\n env.close()", "def run():\n args = get_args()\n cfg = open(\"PlantVillage_cfg.yaml\", 'r')\n cfg_dict = yaml.load(cfg)\n single_image_pred = False\n if args.test_data_path is None:\n args.test_data_path = cfg_dict['output_dataset']['test_path']\n if os.path.isfile(args.test_data_path):\n single_image_pred = True\n cat = os.path.basename(os.path.dirname(args.test_data_path))\n\n test_data_transform = PlantVillageSubSet.default_input_transform(False, (224,224))\n plant_ds_test = PlantVillageSubSet.default_plant_ds(\n root=args.test_data_path, transform=test_data_transform)\n\n model_evaluator = MobileNetV2Eval(\n model=cfg_dict['checkpoint_path'],\n test_loader=PlantVillageSubSet(\n plant_ds_test,\n transform=test_data_transform\n ).get_data_loader(),\n batches_per_iter = cfg_dict['batches_per_iter'],\n num_classes = cfg_dict['num_classes']\n )\n\n print(\"\\n******** FEDERATED LEARNING MODEL EVAL ********\")\n print(f\"\\n\\tInitiating model\")\n print(\"\\n***********************************************\\n\")\n\n model_evaluator.load_model(cfg_dict['checkpoint_path'])\n if single_image_pred:\n model_evaluator.predict(args.test_data_path, cat, plant_ds_test.class_to_idx)\n else:\n model_evaluator.test()", "def main():\n # set up the screen display\n screen = pygame.display.set_mode((screen_width, screen_height))\n pygame.display.set_caption(\"Interactive Drum Machine\")\n\n # initialize done to false\n done = False\n\n # create objects\n view = Display()\n sounds = SoundObjects()\n controller = Controller()\n\n # display the screen background\n view.display_background(screen)\n\n while not done:\n # play metronome continuously\n sounds.play_sound(sounds.metronome)\n\n # process events\n done = controller.process_events(sounds.notes, screen)\n\n # exit the window\n pygame.quit()", "def main():\n sound_path = []\n textgrid_path = []\n # change the data to your own recorded sounds, make sure you have wav file and TextGrid file which\n # mark every vowel with some character, and every word end must be marked with \"wordend\"\n # theses recordings are only [a], [e], [i], [o], [u] sequence to test the pipeline flow,\n # the phonological rule learner tests are in the \"phonological_learner\" py file.\n for i in range(1, 5):\n sound_path.append(\"recordings\\\\aeiou{}.wav\".format(str(i)))\n textgrid_path.append(\"recordings\\\\aeiou{}.TextGrid\".format(str(i)))\n data = signal_parser.parse_input_sound(sound_path, textgrid_path) # from sound to vowel objects with f1 and f2\n clustered_data = mdl_clustering.mdl_cluster(data) # cluster into main values\n final_tagged_data = phonology_learner.extract_features(clustered_data.cluster) # add phonological features\n update_data(data, final_tagged_data) # update the input data with the phonological features\n lexicon = find_lexicon(data) # get lexicon from data\n data = separate_data_into_words(data)\n model = phonology_learner.MdlPhonology(final_tagged_data.keys(), lexicon, data, POSSIBLE_FEATURES)\n print model\n model = phonology_learner.mdl_phonology_learner(model)\n print model", "def config_armies(filename: str) -> None:\n game = Game()\n reader = Reader()\n armies = reader.read(filename)\n game.start_step = reader.start_from_step\n for army in armies:\n game.add_army(army)\n game.start()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
KLUERE micro f1 (except no_relation)
def klue_re_micro_f1(preds, labels, binary): if binary: label_list = ['org:top_members/employees', 'org:members', 'org:product', 'per:title', 'org:alternate_names', 'per:employee_of', 'org:place_of_headquarters', 'per:product', 'org:number_of_employees/members', 'per:children', 'per:place_of_residence', 'per:alternate_names', 'per:other_family', 'per:colleagues', 'per:origin', 'per:siblings', 'per:spouse', 'org:founded', 'org:political/religious_affiliation', 'org:member_of', 'per:parents', 'org:dissolved', 'per:schools_attended', 'per:date_of_death', 'per:date_of_birth', 'per:place_of_birth', 'per:place_of_death', 'org:founded_by', 'per:religion'] label_indices = list(range(len(label_list))) else: label_list = ['no_relation', 'org:top_members/employees', 'org:members', 'org:product', 'per:title', 'org:alternate_names', 'per:employee_of', 'org:place_of_headquarters', 'per:product', 'org:number_of_employees/members', 'per:children', 'per:place_of_residence', 'per:alternate_names', 'per:other_family', 'per:colleagues', 'per:origin', 'per:siblings', 'per:spouse', 'org:founded', 'org:political/religious_affiliation', 'org:member_of', 'per:parents', 'org:dissolved', 'per:schools_attended', 'per:date_of_death', 'per:date_of_birth', 'per:place_of_birth', 'per:place_of_death', 'org:founded_by', 'per:religion'] no_relation_label_idx = label_list.index("no_relation") label_indices = list(range(len(label_list))) label_indices.remove(no_relation_label_idx) return sklearn.metrics.f1_score(labels, preds, average="micro", labels=label_indices) * 100.0
[ "def klue_re_micro_f1(preds, labels):\n label_list = ['no_relation', 'org:top_members/employees', 'org:members',\n 'org:product', 'per:title', 'org:alternate_names',\n 'per:employee_of', 'org:place_of_headquarters', 'per:product',\n 'org:number_of_employees/members', 'per:children',\n 'per:place_of_residence', 'per:alternate_names',\n 'per:other_family', 'per:colleagues', 'per:origin', 'per:siblings',\n 'per:spouse', 'org:founded', 'org:political/religious_affiliation',\n 'org:member_of', 'per:parents', 'org:dissolved',\n 'per:schools_attended', 'per:date_of_death', 'per:date_of_birth',\n 'per:place_of_birth', 'per:place_of_death', 'org:founded_by',\n 'per:religion'] # 30개\n no_relation_label_idx = label_list.index(\"no_relation\")\n label_indices = list(range(len(label_list)))\n label_indices.remove(no_relation_label_idx) # no_relation인 index 삭제\n return sklearn.metrics.f1_score(labels, preds, average=\"micro\", labels=label_indices) * 100.0", "def microF1(self):\n results = [[0,0,0],[0,0,0],[0,0,0]] # the confusing matrix for all categories\n #rows are actual classes; columns are predicted classes\n for docid in self.test_class:\n row = 3\n col = 3\n for i in xrange(3):\n if self.true_test_class[docid] == self.cat[i]:\n row = i\n if self.test_class[docid] == self.cat[i]:\n col = i\n if row < 3 and col < 3:\n results[row][col] += 1\n else:\n print \"microF1: docid not found: \", docid, \"row: \", \"column: \", col\n TP = [0,0,0]\n FN = [0,0,0]\n FP = [0,0,0]\n for i in xrange(3):\n TP[i] = results[i][i]\n for j in xrange(3):\n if j == i:\n pass\n else:\n FN[i] += results[i][j]\n FP[i] += results[j][i]\n total_TP = sum(TP)\n total_FP = sum(FP)\n total_FN = sum(FN)\n P = total_TP / float(total_TP + total_FP)\n R = total_TP / float(total_TP + total_FN)\n F1 = 2 * P * R / float(P + R)\n #print the results matrix\n print \"-------------------the actual class (row) vs. the predicted class (column)---------------\"\n for i in xrange(3):\n print self.cat[i],\"\\t\",\n for j in xrange(3):\n print results[i][j],\"\\t\",\n print \"\\n\"\n print \"----------------TP, FP, FN----------------\"\n for i in xrange(3):\n print self.cat[i],\"\\t\",TP[i], FP[i], FN[i]\n print \"the mircoaveraged F1 is: \", F1\n \n return F1", "def f1(gold_labels, classified_labels, pos_label='1', neg_label='0'):\n # f1 = (2 * pr * re) / (pr + re)\n prec = precision(gold_labels, classified_labels, pos_label, neg_label)\n rec = recall(gold_labels, classified_labels, pos_label, neg_label)\n \n if prec + rec == 0:\n return 0\n \n return (2 * prec * rec) / (prec + rec)", "def test_cr72_full_cluster_one_field_no_rex3(self):\n\n # Parameter reset.\n self.kex = 0.0\n\n # Calculate and check the R2eff values.\n self.calc_r2eff()", "def use_loss_fn2(first_term_loss, genFGen2, args, model, genFGen3, xData):\n\n #first_term_loss = compute_loss2(genFGen2, args, model)\n #first_term_loss2 = compute_loss2(genFGen2, args, model)\n #first_term_loss = torch.log(first_term_loss2 / (1.0 - first_term_loss2))\n\n #first_term_loss = compute_loss2(genFGen2, args, model)\n\n #first_term_loss = compute_loss2(genFGen2, args, model)\n #first_term_loss = compute_loss2(genFGen2, args, model)\n\n #print('')\n #print(first_term_loss)\n\n #mu = torch.from_numpy(np.array([2.805741, -0.00889241], dtype=\"float32\")).to(device)\n #S = torch.from_numpy(np.array([[pow(0.3442525,2), 0.0], [0.0, pow(0.35358343,2)]], dtype=\"float32\")).to(device)\n\n #mu = torch.from_numpy(np.array([2.8093171, 1.2994107e-03], dtype=\"float32\")).to(device)\n #S = torch.from_numpy(np.array([[pow(0.35840544, 2), 0.0], [0.0, pow(0.34766033, 2)]], dtype=\"float32\")).to(device)\n\n #mu = torch.from_numpy(np.array([0.0, 0.0], dtype=\"float32\")).to(device)\n #S = torch.from_numpy(np.array([[pow(1.0,2), 0.0], [0.0, pow(1.0,2)]], dtype=\"float32\")).to(device)\n\n \"\"\"\n #storeAll = torch.from_numpy(np.array(0.0, dtype=\"float32\")).to(device)\n storeAll = torch.empty(args.batch_size, device=device, requires_grad=False)\n #toUse_storeAll = torch.distributions.MultivariateNormal(loc=mu, covariance_matrix=S)\n #for loopIndex_i in range(genFGen2.size()[0]):\n for loopIndex_i in range(args.batch_size):\n #storeAll += torch.exp(toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)))\n #storeAll[loopIndex_i] = torch.exp(toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)).requires_grad_())\n\n #storeAll[loopIndex_i] = torch.exp(\n # toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)).requires_grad_())\n\n storeAll[loopIndex_i] = 0.5 * torch.exp(toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)).requires_grad_())\\\n + 0.5 * torch.exp(toUse_storeAll2.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)).requires_grad_())\n #storeAll /= genFGen2.size()[0]\n first_term_loss = torch.mean(storeAll)\n \"\"\"\n\n #print(first_term_loss)\n #first_term_loss = compute_loss2(genFGen2, args, model)\n\n #print(genFGen2)\n #dasfasdfs\n\n #first_term_loss = compute_loss2(genFGen2, args, model)\n #first_term_loss = compute_loss2(genFGen2, model)\n\n #print(xData.shape)\n #print(genFGen2.shape)\n\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.image as mpimg\n\n imageStore = xData[0,:,:,:].squeeze().cpu().numpy()\n #imageStore = genFGen2[0, :, :, :].squeeze().cpu().detach().numpy()\n\n plt.imshow(imageStore)\n plt.show()\n \"\"\"\n\n #pilTrans = transforms.ToTensor()\n #plt.imshow(xData[1, :])\n\n #first_term_loss = compute_loss2(genFGen2, model)\n #first_term_loss = compute_loss2(xData, model)\n\n #first_term_loss = compute_loss2(genFGen2, model)\n\n #first_term_loss = compute_loss2(genFGen2, model)\n #first_term_loss = compute_loss2(genFGen2, model)\n\n #first_term_loss = compute_loss2(genFGen2, model)\n\n #first_term_loss = compute_loss2(genFGen2, model)\n #first_term_loss = compute_loss2(xData, model)\n\n #print(xData)\n #print(genFGen2)\n\n #print(genFGen2.shape)\n #print(xData.shape)\n\n #print(compute_loss2(genFGen2, model))\n #print(compute_loss2(xData, model))\n\n #print(compute_loss(xData, model))\n #print(compute_loss(xData, model).item())\n\n # (tensor(0.9740, device='cuda:0', grad_fn=<DivBackward0>), tensor([0.], device='cuda:0'),\n # tensor(-1139.7253, device='cuda:0'), tensor(4957.8486, device='cuda:0'))\n\n #print(computeLoss(genFGen2, model))\n #print(computeLoss(xData, model))\n\n #first_term_loss = compute_loss2(genFGen2, model)\n #first_term_loss = compute_loss2(genFGen2, model)\n\n #first_term_loss = compute_loss2(genFGen2, model)\n\n #first_term_loss = compute_loss2(genFGen2, model)\n #first_term_loss = computeLoss(genFGen2, model)\n\n #print(genFGen2.shape)\n #print(first_term_loss)\n\n #first_term_loss.retain_grad()\n\n #first_term_loss.retain_grad()\n #first_term_loss.retain_grad()\n\n # (?)\n #first_term_loss.retain_grad()\n # (?)\n\n #print(first_term_loss)\n #print('')\n\n \"\"\"\n second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)\n for i in range(args.batch_size):\n second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_() ** 2\n second_term_loss32[i] = torch.min(second_term_loss22)\n second_term_loss2 = torch.mean(second_term_loss32)\n \"\"\"\n\n #print(first_term_loss)\n #print('')\n\n #print('')\n #print(compute_loss2(mu.unsqueeze(0), args, model))\n\n #print(torch.exp(toUse_storeAll.log_prob(mu)))\n #print('')\n\n #first_term_loss = storeAll\n\n #xData = toy_data.inf_train_gen(args.data, batch_size=args.batch_size)\n #xData = torch.from_numpy(xData).type(torch.float32).to(device)\n\n #print(xData.shape)\n #print(torch.mean(xData))\n #print(torch.std(xData))\n\n #xData = torch.empty((args.batch_size, 2), device=device)\n #xData[:args.batch_size//2, :] = toUse_storeAll.sample((args.batch_size//2,)) # .sample_n(args.batch_size // 2)\n #xData[args.batch_size//2:, :] = toUse_storeAll2.sample((args.batch_size//2,)) # .sample_n(args.batch_size//2)\n\n \"\"\"\n xData = torch.empty((args.batch_sizeM, 2), device=device)\n xData[:args.batch_sizeM // 2, :] = toUse_storeAll.sample((args.batch_sizeM // 2,)) # .sample_n(args.batch_size // 2)\n xData[args.batch_sizeM // 2:, :] = toUse_storeAll2.sample((args.batch_sizeM // 2,)) # .sample_n(args.batch_size//2)\n \"\"\"\n\n #xData = torch.empty((args.batch_size, 2)).normal_(mean=[2.82507515, 1.92882611e-04 + 0.8], std=0.5)\n #xData[args.batch_size//2:,:] = torch.empty((args.batch_size, 2)).normal_(mean=4, std=0.5)\n\n #mu = torch.from_numpy(np.array([2.82507515, 1.92882611e-04 + 0.8], dtype=\"float32\")).to(device)\n #S = torch.from_numpy(np.array([[pow(0.07166782, 2), 0.0], [0.0, pow(0.06917527, 2)]], dtype=\"float32\")).to(device)\n #mu2 = torch.from_numpy(np.array([2.82507515, 1.92882611e-04 - 0.8], dtype=\"float32\")).to(device)\n #toUse_storeAll = torch.distributions.MultivariateNormal(loc=mu, covariance_matrix=S)\n #toUse_storeAll2 = torch.distributions.MultivariateNormal(loc=mu2, covariance_matrix=S)\n\n #print(xData.shape)\n #print(torch.mean(xData))\n #print(torch.std(xData))\n\n #var2 = []\n #for i in genFGen2:\n # var1 = []\n # for j in xData:\n # new_stuff = torch.dist(i, j, 2) # this is a tensor\n # var1.append(new_stuff.unsqueeze(0))\n # var1_tensor = torch.cat(var1)\n # second_term_loss2 = torch.min(var1_tensor) / args.batch_size\n # var2.append(second_term_loss2.unsqueeze(0))\n #var2_tensor = torch.cat(var2)\n #second_term_loss = torch.mean(var2_tensor) / args.batch_size\n #second_term_loss *= 100.0\n\n #print('')\n #print(second_term_loss)\n\n # If you know in advance the size of the final tensor, you can allocate\n # an empty tensor beforehand and fill it in the for loop.\n\n #x = torch.empty(size=(len(items), 768))\n #for i in range(len(items)):\n # x[i] = calc_result\n\n #print(len(genFGen2))\n #print(genFGen2.shape[0])\n # len(.) and not .shape[0]\n\n #print(len(xData))\n #print(xData.shape[0])\n # Use len(.) and not .shape[0]\n\n \"\"\"\n #second_term_loss = torch.empty(size=(len(genFGen2), len(xData))).to(device)\n #second_term_loss = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=True)\n #second_term_loss3 = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=True)\n #second_term_loss3 = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=False)\n second_term_loss3 = torch.empty(size=(args.batch_size, args.batch_size), device=device, requires_grad=False)\n #for i in range(len(genFGen2)):\n for i in range(args.batch_size):\n #for j in range(len(xData)):\n for j in range(args.batch_size):\n #second_term_loss[i, j] = torch.dist(genFGen2[i,:], xData[j,:], 2)\n #second_term_loss[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)\n\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)\n\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)\n #second_term_loss3[i, j] = torch.tensor(0.1, requires_grad=True)\n\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1).requires_grad_()\n\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1).requires_grad_()\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2).requires_grad_()**2\n\n #second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2).requires_grad_()**2\n second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2).requires_grad_()\n\n #second_term_loss[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2)**2\n #second_term_loss2, _ = torch.min(second_term_loss, 1)\n second_term_loss2, _ = torch.min(second_term_loss3, 1)\n #second_term_loss = 5000.0 * torch.mean(second_term_loss2) / (args.batch_size**2)\n #second_term_loss = lambda1 * torch.mean(second_term_loss2) / (args.batch_size ** 2)\n #second_term_loss = lambda1 * torch.mean(second_term_loss2)\n second_term_loss = torch.mean(second_term_loss2)\n\n #print(second_term_loss)\n #print('')\n\n print('')\n print(first_term_loss)\n print(second_term_loss)\n\n print('')\n \"\"\"\n\n #args.batch_size = 2\n #genFGen2 = torch.from_numpy(np.array([[3, 0], [2, 0]], dtype=\"float32\")).to(device)\n #xData = torch.from_numpy(np.array([[1, 0], [0, 1]], dtype=\"float32\")).to(device)\n\n #import timeit\n #start = timeit.default_timer()\n #stop = timeit.default_timer()\n #print('Time: ', stop - start)\n\n \"\"\"\n second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)\n for i in range(args.batch_size):\n #second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p='fro', dim=1).requires_grad_()\n #second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()\n second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2\n #second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()\n #print(second_term_loss22.shape)\n second_term_loss32[i] = torch.min(second_term_loss22)\n #print(second_term_loss32)\n #print(second_term_loss32.shape)\n #print(torch.norm(genFGen2 - xData, p=None, dim=0).shape)\n #second_term_loss22 = torch.min(second_term_loss32)\n #print(second_term_loss22)\n #print(second_term_loss22.shape)\n second_term_loss2 = torch.mean(second_term_loss32)\n #second_term_loss2 = 7.62939453125 * torch.mean(second_term_loss32)\n #print(second_term_loss2)\n #print(second_term_loss2.shape)\n \"\"\"\n\n #import timeit\n #start = timeit.default_timer()\n #stop = timeit.default_timer()\n #print('Time: ', stop - start)\n\n #print('')\n #print(second_term_loss2)\n\n #distances = torch.norm(vertices - point_locs, p=2, dim=1)\n #distances = torch.sqrt((vertices - point_locs).pow(2).sum(1))\n\n #import timeit\n #start = timeit.default_timer()\n #stop = timeit.default_timer()\n #print('Time: ', stop - start)\n\n #xData = xData.view(-1, 28 * 28)\n xData = xData.view(-1, 64 * 64)\n\n #xData = xData.view(-1, 28*28)\n #genFGen2 = genFGen2.view(-1, 28*28)\n\n #genFGen2 = genFGen2.view(-1, 28 * 28)\n genFGen2 = genFGen2.view(-1, 64 * 64)\n\n #genFGen2 = genFGen2.view(-1, 28*28)\n #genFGen3 = genFGen3.view(-1, 28*28)\n\n #genFGen3 = genFGen3.view(-1, 28 * 28)\n genFGen3 = genFGen3.squeeze()\n\n #print(genFGen3.shape)\n #asdfasdf\n\n #print(xData.shape)\n #print(genFGen2.shape)\n\n #print(genFGen3.shape)\n #asdfasdf\n\n device = args.device\n\n #print(device)\n #adfasdfs\n\n #genFGen3 = genFGen3.view(-1, 28 * 28)\n #genFGen3 = genFGen3.view(-1, 64 * 64)\n\n #xData = torch.transpose(xData, 0, 1)\n #genFGen2 = torch.transpose(genFGen2, 0, 1)\n\n #genFGen2 = torch.transpose(genFGen2, 0, 1)\n #genFGen3 = torch.transpose(genFGen3, 0, 1)\n\n #print(genFGen2.shape)\n #print(xData.shape)\n #print(genFGen3.shape)\n\n #print(genFGen2.shape)\n #print(xData.shape)\n\n #print(genFGen3.shape)\n #print(args.batchSize)\n\n #second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)\n #for i in range(args.batchSize):\n # second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n #second_term_loss2 = torch.mean(second_term_loss32)\n\n #xData = xData[:15000,:]\n #xData.requires_grad = True\n\n #print(xData.shape)\n #asdfasfs\n\n #print(xData.shape)\n #adfasdfs\n\n #second_term_loss2 = torch.empty(1, device=device, requires_grad=False)\n second_term_loss2 = torch.zeros(1, device=device, requires_grad=False)\n #print(second_term_loss2)\n #asdfadsfs\n for i in range(args.batchSize):\n # print(i)\n\n # print((torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2))\n # print((torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2).shape)\n\n # asdfasdf\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n\n #if i < 6:\n\n #if i < 6:\n #if i < 5:\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n #else:\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)\n\n #print(i)\n #second_term_loss2 += torch.min(torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_() ** 2)\n\n second_term_loss2 += torch.min(torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_() ** 2)\n\n #if i < 7:\n # second_term_loss2 += torch.min(torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_() ** 2)\n #else:\n # second_term_loss2 += torch.min(torch.norm(genFGen2[i, :].detach() - xData, p=None, dim=1).requires_grad_() ** 2)\n\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)\n # try:\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n # #break\n # except MemoryError:\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)\n #second_term_loss2 /= args.batchSize\n\n second_term_loss2 /= args.batchSize\n #second_term_loss2 = max(second_term_loss2, 1e-8)\n\n #print(second_term_loss2)\n #print(second_term_loss2.requires_grad)\n\n #asdfasdfs\n\n #second_term_loss2.backward()\n\n second_term_loss2 = second_term_loss2.squeeze()\n #second_term_loss2 = abs(second_term_loss2.squeeze())\n\n #second_term_loss2 = max(second_term_loss2, torch.tensor(1e-8))\n\n #if torch.isnan(second_term_loss2).any():\n # second_term_loss2\n\n #print(torch.isnan(second_term_loss2).any())\n #asdfasdfs\n\n #print(second_term_loss2)\n #print(second_term_loss2.requires_grad)\n\n #print(second_term_loss2)\n #print(second_term_loss2.requires_grad)\n\n #asdfas\n\n '''\n second_term_loss2 = torch.empty(1, device=device, requires_grad=False)\n for i in range(args.batchSize):\n #print(i)\n\n #print((torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2))\n #print((torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2).shape)\n\n #asdfasdf\n #second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n\n #second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n #second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n\n if i<6:\n second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n else:\n second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)\n\n #second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n #second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)\n #try:\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n # #break\n #except MemoryError:\n # second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)\n second_term_loss2 /= args.batchSize\n\n #second_term_loss2.backward()\n\n print(second_term_loss2)\n print(second_term_loss2.requires_grad)\n '''\n\n '''\n # second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)\n second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)\n # for i in range(args.batch_size):\n for i in range(args.batchSize):\n \"\"\"\n print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))\n print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))\n print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))\n print('')\n\n print(torch.mean(torch.norm((genFGen2[i, :] - xData).view(args.batchSize, -1), p=None, dim=1)))\n print(torch.mean(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchSize, -1), p=None, dim=1)))\n print(torch.mean(torch.norm((genFGen3[i, :] - genFGen3), p=None, dim=1)))\n print('')\n \"\"\"\n\n # print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))\n # print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))\n # print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))\n # print('')\n\n # print(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)))\n # print(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)))\n # print(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1)))\n # print('')\n\n # second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p='fro', dim=1).requires_grad_()\n # second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()\n # second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2\n # second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1))**2\n # second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_()**2\n\n # second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2\n\n # second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2\n # second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_() ** 2\n\n # second_term_loss22 = torch.sqrt(\n # 1e-17 + (genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_() ** 2\n\n # second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2\n # second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2\n\n # tempVarVar21 = genFGen2[i, :] - xData\n # print(tempVarVar21.shape)\n\n # print(xData.shape)\n # asdfsadf\n\n # second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2\n # second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2\n\n # 61562.1641\n # 4.7732\n\n # print(genFGen2[i, :].shape)\n # print(xData.shape)\n\n # tempVarVar21 = genFGen2[i, :] - xData\n # print(tempVarVar21.shape)\n\n # print(second_term_loss22.shape)\n # adsfasfs\n\n # second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()\n # print(second_term_loss22.shape)\n # second_term_loss32[i] = torch.min(second_term_loss22)\n\n # print(i)\n\n # second_term_loss32[i] = torch.min(second_term_loss22)\n #second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n\n #second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)\n\n if i<6:\n second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n else:\n second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)\n\n # second_term_loss32[i] = torch.min(second_term_loss22)\n # print(second_term_loss32)\n # print(second_term_loss32.shape)\n # print(torch.norm(genFGen2 - xData, p=None, dim=0).shape)\n # second_term_loss22 = torch.min(second_term_loss32)\n # print(second_term_loss22)\n # print(second_term_loss22.shape)\n # second_term_loss2 = torch.mean(second_term_loss32)\n # second_term_loss2 = 0.3 * torch.mean(second_term_loss32)\n # second_term_loss2 = 3.0 * torch.mean(second_term_loss32)\n # second_term_loss2 = 7.62939453125 * torch.mean(second_term_loss32)\n # print(second_term_loss2)\n # print(second_term_loss2.shape)\n\n # second_term_loss2 = 0.3 * torch.mean(second_term_loss32)\n\n # second_term_loss2 = 0.3 * torch.mean(second_term_loss32)\n # second_term_loss2 = 0.001 * torch.mean(second_term_loss32)\n\n # second_term_loss2 = 0.001 * torch.mean(second_term_loss32)\n\n # second_term_loss2 = 0.001 * torch.mean(second_term_loss32)\n second_term_loss2 = torch.mean(second_term_loss32)\n\n print(second_term_loss2)\n print(second_term_loss2.requires_grad)\n\n asdfasfs\n '''\n\n # print(second_term_loss2)\n # asdfasfd\n\n #second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)\n #for i in range(args.batchSize):\n # second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n #second_term_loss2 = torch.mean(second_term_loss32)\n\n '''\n second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)\n for i in range(args.batchSize):\n second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n second_term_loss2 = torch.mean(second_term_loss32)\n '''\n\n '''\n #second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)\n second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)\n #for i in range(args.batch_size):\n for i in range(args.batchSize):\n \"\"\"\n print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))\n print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))\n print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))\n print('')\n\n print(torch.mean(torch.norm((genFGen2[i, :] - xData).view(args.batchSize, -1), p=None, dim=1)))\n print(torch.mean(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchSize, -1), p=None, dim=1)))\n print(torch.mean(torch.norm((genFGen3[i, :] - genFGen3), p=None, dim=1)))\n print('')\n \"\"\"\n\n #print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))\n #print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))\n #print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))\n #print('')\n\n #print(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)))\n #print(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)))\n #print(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1)))\n #print('')\n\n #second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p='fro', dim=1).requires_grad_()\n #second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()\n #second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2\n #second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1))**2\n #second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_()**2\n\n #second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2\n\n #second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2\n #second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_() ** 2\n\n #second_term_loss22 = torch.sqrt(\n # 1e-17 + (genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_() ** 2\n\n #second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2\n #second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2\n\n #tempVarVar21 = genFGen2[i, :] - xData\n #print(tempVarVar21.shape)\n\n #print(xData.shape)\n #asdfsadf\n\n #second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2\n #second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2\n\n # 61562.1641\n # 4.7732\n\n #print(genFGen2[i, :].shape)\n #print(xData.shape)\n\n #tempVarVar21 = genFGen2[i, :] - xData\n #print(tempVarVar21.shape)\n\n #print(second_term_loss22.shape)\n #adsfasfs\n\n #second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()\n #print(second_term_loss22.shape)\n #second_term_loss32[i] = torch.min(second_term_loss22)\n\n #print(i)\n\n #second_term_loss32[i] = torch.min(second_term_loss22)\n second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)\n\n #second_term_loss32[i] = torch.min(second_term_loss22)\n #print(second_term_loss32)\n #print(second_term_loss32.shape)\n #print(torch.norm(genFGen2 - xData, p=None, dim=0).shape)\n #second_term_loss22 = torch.min(second_term_loss32)\n #print(second_term_loss22)\n #print(second_term_loss22.shape)\n #second_term_loss2 = torch.mean(second_term_loss32)\n #second_term_loss2 = 0.3 * torch.mean(second_term_loss32)\n #second_term_loss2 = 3.0 * torch.mean(second_term_loss32)\n #second_term_loss2 = 7.62939453125 * torch.mean(second_term_loss32)\n #print(second_term_loss2)\n #print(second_term_loss2.shape)\n\n #second_term_loss2 = 0.3 * torch.mean(second_term_loss32)\n\n #second_term_loss2 = 0.3 * torch.mean(second_term_loss32)\n #second_term_loss2 = 0.001 * torch.mean(second_term_loss32)\n\n #second_term_loss2 = 0.001 * torch.mean(second_term_loss32)\n\n #second_term_loss2 = 0.001 * torch.mean(second_term_loss32)\n second_term_loss2 = torch.mean(second_term_loss32)\n\n #print(second_term_loss2)\n #asdfasfd\n '''\n\n #second_term_loss2.retain_grad()\n\n #second_term_loss2.retain_grad()\n #second_term_loss2.retain_grad()\n\n # (?)\n #second_term_loss2.retain_grad()\n # (?)\n\n #import timeit\n #start = timeit.default_timer()\n\n #stop = timeit.default_timer()\n #print('Time: ', stop - start)\n\n #print(second_term_loss2)\n #print('')\n\n #print('')\n #print(first_term_loss)\n\n #print(second_term_loss2)\n #print('')\n\n #print(first_term_loss)\n #print(second_term_loss2)\n\n #second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)\n #for i in range(args.batch_size):\n # second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_()**2\n # second_term_loss32[i] = torch.min(second_term_loss22)\n #second_term_loss2 = torch.mean(second_term_loss32)\n\n #print(genFGen2.shape)\n #print(genFGen3.shape)\n\n #print(xData.shape)\n #print('')\n\n #third_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)\n third_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)\n #for i in range(args.batch_size):\n for i in range(args.batchSize):\n #print(xData.shape)\n #print(genFGen2.shape)\n\n #print(genFGen3.shape)\n #print('')\n\n #print(xData.squeeze().shape)\n #print(genFGen2.squeeze().shape)\n #print('')\n\n #print((genFGen2[i, :] - xData).pow(2).sum(1).shape)\n #print((genFGen2[i, :] - genFGen2).pow(2).sum(1).shape)\n\n #print((genFGen2[i, :].squeeze() - xData.squeeze()).pow(2).sum(1).shape)\n #print((genFGen2[i, :].squeeze() - genFGen2.squeeze()).pow(2).sum(1).shape)\n\n #print((genFGen3[i, :] - genFGen3).pow(2).sum(1).shape)\n #print('')\n\n #print(torch.norm(genFGen2[i, :] - xData, p=None, dim=2).shape)\n #print(torch.norm(genFGen2[i, :] - genFGen2, p=None, dim=2).shape)\n\n #print(torch.norm(genFGen2[i, :].squeeze() - xData.squeeze(), p=None, dim=2).shape)\n #print(torch.norm(genFGen2[i, :].squeeze() - genFGen2.squeeze(), p=None, dim=2).shape)\n\n #print(torch.norm(genFGen3[i, :] - genFGen3, p=None, dim=1).shape)\n #print('')\n\n #a = torch.randn(64, 3, 32, 32)\n #a = a.view(64, -1)\n #b = torch.norm(a, p=2, dim=1)\n\n #a = torch.randn(64, 1, 28, 28)\n #a = a.view(64, -1)\n #b = torch.norm(a, p=2, dim=1)\n\n #print((genFGen2[i, :] - xData).view(args.batchSize,-1).pow(2).sum(1).shape)\n #print((genFGen2[i, :] - genFGen2).view(args.batchSize,-1).pow(2).sum(1).shape)\n #print('')\n\n #print(torch.norm((genFGen2[i, :] - xData).view(args.batchSize,-1), p=None, dim=1).shape)\n #print(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchSize,-1), p=None, dim=1).shape)\n #print('')\n\n \"\"\"\n print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))\n print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))\n print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))\n print('')\n\n print(torch.mean(torch.norm((genFGen2[i, :] - xData).view(args.batchSize, -1), p=None, dim=1)))\n print(torch.mean(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchSize, -1), p=None, dim=1)))\n print(torch.mean(torch.norm((genFGen3[i, :] - genFGen3), p=None, dim=1)))\n print('')\n \"\"\"\n\n #print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))\n #print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))\n #print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))\n #print('')\n\n #print(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)))\n #print(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)))\n #print(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1)))\n #print('')\n\n #third_term_loss22 = (torch.norm(genFGen3[i, :] - genFGen3, p=None, dim=1).requires_grad_()) / (\n # 1.0e-17 + torch.norm(genFGen2[i, :] - genFGen2, p=None, dim=1).requires_grad_())\n #third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (\n # 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).pow(2).sum(1)).requires_grad_())\n\n #third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (\n # 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).pow(2).sum(1)).requires_grad_())\n\n #hbdafj = genFGen3[i, :] - genFGen3\n #print(hbdafj.shape)\n\n #adfa = genFGen2[i, :] - xData\n #print(adfa.shape)\n\n #third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (\n # 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_())\n\n #third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (\n # 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_())\n\n #third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (\n # 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).pow(2).sum(1)).requires_grad_())\n\n #third_term_loss22 = (torch.norm(genFGen3[i, :] - genFGen3, p=None, dim=1).requires_grad_()) / (\n # 1.0e-17 + torch.norm(genFGen2[i, :] - genFGen2, p=None, dim=1).requires_grad_())\n\n third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (\n 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).pow(2).sum(1)).requires_grad_())\n\n #print(third_term_loss22.shape)\n\n third_term_loss32[i] = torch.mean(third_term_loss22)\n #third_term_loss12 = torch.mean(third_term_loss32)\n #third_term_loss12 = 0.01 * torch.mean(third_term_loss32)\n #third_term_loss12 = 0.025 * torch.mean(third_term_loss32)\n #third_term_loss12 = 0.25 * torch.mean(third_term_loss32)\n #third_term_loss12 = 0.1 * torch.mean(third_term_loss32)\n\n #third_term_loss12 = 0.25 * torch.mean(third_term_loss32)\n\n #third_term_loss12 = 0.25 * torch.mean(third_term_loss32)\n #third_term_loss12 = 0.1 * torch.mean(third_term_loss32)\n\n #third_term_loss12 = 0.1 * torch.mean(third_term_loss32)\n\n #third_term_loss12 = 0.1 * torch.mean(third_term_loss32)\n third_term_loss12 = torch.mean(third_term_loss32)\n\n # (?)\n #third_term_loss12 = torch.zeros(1, device=device, requires_grad=True)\n # (?)\n\n #third_term_loss32 = torch.zeros(1, device=device, requires_grad=True)\n\n #third_term_loss32 = torch.zeros(1, device=device, requires_grad=True)\n #third_term_loss32 = torch.zeros(1, device=device, requires_grad=True)\n\n #print(third_term_loss12)\n #adfdfasc\n\n #third_term_loss12.retain_grad()\n\n #third_term_loss12.retain_grad()\n #third_term_loss12.retain_grad()\n\n # (?)\n #third_term_loss12.retain_grad()\n # (?)\n\n #print(third_term_loss12)\n #print('')\n\n #return first_term_loss + second_term_loss2\n #return first_term_loss + second_term_loss2, xData\n #return first_term_loss + second_term_loss2 + third_term_loss12, xData\n\n #return first_term_loss + second_term_loss2 + third_term_loss12, xData\n #return first_term_loss + second_term_loss2 + third_term_loss12\n\n #print(first_term_loss)\n #print(second_term_loss2)\n\n #print(third_term_loss12)\n #print('')\n\n #torch.set_printoptions(sci_mode=False)\n\n #print(first_term_loss)\n #print('')\n\n \"\"\"\n #print(torch.isnan(first_term_loss))\n if torch.isnan(first_term_loss):\n first_term_loss = 0.0\n \"\"\"\n\n #print(first_term_loss)\n #print('')\n\n #return first_term_loss + second_term_loss2 + third_term_loss12\n #return first_term_loss + second_term_loss2 + third_term_loss12\n\n #print(second_term_loss2)\n #print(third_term_loss12)\n\n #if torch.isnan(first_term_loss):\n # return second_term_loss2 + third_term_loss12\n #else:\n # return first_term_loss + second_term_loss2 + third_term_loss12\n\n #return first_term_loss + second_term_loss2 + third_term_loss12\n\n #return first_term_loss + second_term_loss2 + third_term_loss12\n #return first_term_loss + second_term_loss2 + third_term_loss12\n\n #return first_term_loss + second_term_loss2 + third_term_loss12\n #return first_term_loss + second_term_loss2 + third_term_loss12, first_term_loss, second_term_loss2\n\n #print('')\n #print(first_term_loss.item())\n\n #print(second_term_loss2.item())\n #print(third_term_loss12.item())\n\n #print('')\n #print(first_term_loss.grad)\n\n #print(second_term_loss2.grad)\n #print(third_term_loss12.grad)\n\n #print('')\n\n #total_totTotalLoss = first_term_loss * second_term_loss2 * third_term_loss12\n #total_totTotalLoss = first_term_loss + second_term_loss2 + third_term_loss12\n\n #total_totTotalLoss = first_term_loss + second_term_loss2 + third_term_loss12\n\n #total_totTotalLoss = first_term_loss + second_term_loss2 + third_term_loss12\n #total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12\n #total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 10.0 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12\n #total_totTotalLoss = first_term_loss + 10.0 * second_term_loss2 + 0.1 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12\n #total_totTotalLoss = first_term_loss + 1.0 * second_term_loss2 + 0.1 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 1.0 * second_term_loss2 + 0.1 * third_term_loss12\n\n #print(first_term_loss)\n #print(first_term_loss.requires_grad)\n\n #print(second_term_loss2)\n #print(second_term_loss2.requires_grad)\n\n #print(third_term_loss12)\n #print(third_term_loss12.requires_grad)\n\n #print(first_term_loss.requires_grad)\n #print(second_term_loss2.requires_grad)\n #print(third_term_loss12.requires_grad)\n\n #print(first_term_loss)\n #print(second_term_loss2)\n\n #print(third_term_loss12)\n #asdfasdf\n\n #first_term_loss = first_term_loss * 0.000001\n #second_term_loss2 = second_term_loss2.squeeze()\n\n #second_term_loss2 = second_term_loss2 * 0.001\n #second_term_loss2 = second_term_loss2 * 0.01\n\n #second_term_loss2 = second_term_loss2.squeeze()\n\n #first_term_loss *= 100.0\n #second_term_loss2 *= 0.0001\n\n #print(first_term_loss)\n #print(first_term_loss.requires_grad)\n\n #print(second_term_loss2)\n #print(second_term_loss2.requires_grad)\n\n #print(third_term_loss12)\n #print(third_term_loss12.requires_grad)\n\n #asdfszdf\n\n #total_totTotalLoss = first_term_loss + 1.0 * second_term_loss2 + 0.1 * third_term_loss12\n #total_totTotalLoss = first_term_loss + 0.3 * second_term_loss2 + 0.025 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + third_term_loss12\n #total_totTotalLoss = first_term_loss + second_term_loss2 + third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 100.0*second_term_loss2 + third_term_loss12\n #total_totTotalLoss = first_term_loss + 100.0*second_term_loss2 + 0.1*third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.1 * third_term_loss12\n #total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.01 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.01 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.01 * third_term_loss12\n #total_totTotalLoss = first_term_loss + 0.1 * second_term_loss2 + 0.01 * third_term_loss12\n\n #total_totTotalLoss = first_term_loss + 0.1 * second_term_loss2 + 0.01 * third_term_loss12\n total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.01 * third_term_loss12\n\n #print(total_totTotalLoss)\n #print(total_totTotalLoss.requires_grad)\n\n #asdfsadf\n\n #total_totTotalLoss.retain_grad()\n\n #total_totTotalLoss.retain_grad()\n #total_totTotalLoss.retain_grad()\n\n #total_totTotalLoss.retain_grad()\n #total_totTotalLoss.retain_grad()\n\n #return first_term_loss + second_term_loss2 + third_term_loss12, first_term_loss, second_term_loss2\n #return first_term_loss + second_term_loss2 + third_term_loss12, first_term_loss, second_term_loss2, third_term_loss12\n\n #return first_term_loss + second_term_loss2 + third_term_loss12, first_term_loss, second_term_loss2, third_term_loss12\n return total_totTotalLoss, first_term_loss, second_term_loss2, third_term_loss12", "def test_cr72_full_cluster_one_field_no_rex1(self):\n\n # Parameter reset.\n self.dw = 0.0\n\n # Calculate and check the R2eff values.\n self.calc_r2eff()", "def test_cr72_full_cluster_one_field_no_rex6(self):\n\n # Parameter reset.\n self.pA = 1.0\n self.kex = 0.0\n\n # Calculate and check the R2eff values.\n self.calc_r2eff()", "def Custom(trafo: ngsolve.fem.CoefficientFunction, jac: ngsolve.fem.CoefficientFunction) -> PML:", "def joint_f1(evi_precision, evi_recall, lab_precision, lab_recall):\n joint_precision = evi_precision * lab_precision\n joint_recall = evi_recall * lab_recall\n if joint_precision == 0 and joint_recall == 0:\n return 0\n return 2 * joint_precision * joint_recall / (joint_precision + joint_recall)", "def test_cr72_full_cluster_one_field_no_rex5(self):\n\n # Parameter reset.\n self.dw = 0.0\n self.kex = 0.0\n\n # Calculate and check the R2eff values.\n self.calc_r2eff()", "def cal_f1(self,base,comp):\n\n if type(base)==type(\"string\"):\n base=word_tokenize(base)\n base = [w.lower() for w in base]\n else:\n base = [w.lower() for w in base]\n if type(comp)==type(\"string\"):\n comp=word_tokenize(comp)\n comp = [w.lower() for w in comp]\n else:\n comp = [w.lower() for w in comp]\n precision=0\n for item in comp:\n if item in base:\n precision=precision+1\n precision=precision/len(comp)\n\n recall=0\n for item in base:\n if item in comp:\n recall=recall+1\n recall=recall/len(base)\n\n try:\n F1=2 * (precision * recall) / (precision + recall)\n except ZeroDivisionError:\n F1=0\n\n return F1,precision,recall", "def test_cr72_full_cluster_one_field_no_rex8(self):\n\n # Parameter reset.\n self.kex = 1e7\n\n # Calculate and check the R2eff values.\n self.calc_r2eff()", "def test_f1_micro_loss():\n y_true = torch.tensor([0, 1, 2, 2])\n y_pred_true = torch.tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [0., 0., 1.]])\n y_pred_false = torch.tensor([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.], [1., 0., 0.]])\n y_pred_medium = torch.tensor([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.], [1., 0., 0.]])\n no_mask = torch.tensor([1., 1., 1., 1.])\n mask = torch.tensor([1., 0., 1., 0.])\n\n assert torch.isclose(\n loss_micro_f1(y_true, y_pred_true, no_mask), torch.tensor(0.))\n assert torch.isclose(\n loss_micro_f1(y_true, y_pred_false, no_mask), torch.tensor(1.))\n assert torch.isclose(\n loss_micro_f1(y_true, y_pred_medium, no_mask), torch.tensor(1/2))\n assert torch.isclose(\n loss_micro_f1(y_true, y_pred_true, mask), torch.tensor(0.))\n assert torch.isclose(\n loss_micro_f1(y_true, y_pred_false, mask), torch.tensor(1.))\n assert torch.isclose(\n loss_micro_f1(y_true, y_pred_medium, mask), torch.tensor(0.))", "def test_cr72_full_cluster_one_field_no_rex2(self):\n\n # Parameter reset.\n self.pA = 1.0\n\n # Calculate and check the R2eff values.\n self.calc_r2eff()", "def test_orthonormality_fock():\n hs = LocalSpace('tls', basis=('g', 'e'))\n i = IdxSym('i')\n j = IdxSym('j')\n ket_0 = BasisKet(0, hs=hs)\n bra_0 = ket_0.dag()\n ket_1 = BasisKet(1, hs=hs)\n ket_g = BasisKet('g', hs=hs)\n bra_g = ket_g.dag()\n ket_e = BasisKet('e', hs=hs)\n ket_i = BasisKet(FockIndex(i), hs=hs)\n ket_j = BasisKet(FockIndex(j), hs=hs)\n bra_i = ket_i.dag()\n ket_i_lb = BasisKet(FockLabel(i, hs=hs), hs=hs)\n ket_j_lb = BasisKet(FockLabel(j, hs=hs), hs=hs)\n bra_i_lb = ket_i_lb.dag()\n\n assert bra_0 * ket_1 == Zero\n assert bra_0 * ket_0 == One\n\n assert bra_g * ket_g == One\n assert bra_g * ket_e == Zero\n assert bra_0 * ket_g == One\n assert bra_0 * ket_e == Zero\n assert bra_g * ket_0 == One\n assert bra_g * ket_1 == Zero\n\n delta_ij = KroneckerDelta(i, j)\n delta_i0 = KroneckerDelta(i, 0)\n delta_0j = KroneckerDelta(0, j)\n assert bra_i * ket_j == delta_ij\n assert bra_i * ket_0 == delta_i0\n assert bra_0 * ket_j == delta_0j\n assert bra_i * ket_g == delta_i0\n assert bra_g * ket_j == delta_0j\n assert delta_ij.substitute({i: 0, j: 0}) == One\n assert delta_ij.substitute({i: 0, j: 1}) == Zero\n assert delta_i0.substitute({i: 0}) == One\n assert delta_i0.substitute({i: 1}) == Zero\n\n delta_ij = KroneckerDelta(i, j)\n delta_ig = KroneckerDelta(i, 0)\n delta_gj = KroneckerDelta(0, j)\n assert bra_i_lb * ket_j_lb == delta_ij\n assert bra_i_lb * ket_0 == delta_ig\n assert bra_0 * ket_j_lb == delta_gj\n assert bra_i_lb * ket_g == delta_ig\n assert bra_g * ket_j_lb == delta_gj\n assert delta_ij.substitute({i: 0, j: 0}) == One\n assert delta_ij.substitute({i: 0, j: 1}) == Zero\n assert delta_ig.substitute({i: 0}) == One\n assert delta_ig.substitute({i: 1}) == Zero", "def evaluation_f1_general(self):\n return(self.__evaluation_f1_general)", "def test_cr72_full_cluster_one_field_no_rex7(self):\n\n # Parameter reset.\n self.dw = 0.0\n self.pA = 1.0\n self.kex = 0.0\n\n # Calculate and check the R2eff values.\n self.calc_r2eff()", "def test_cr72_full_cluster_one_field_no_rex4(self):\n\n # Parameter reset.\n self.pA = 1.0\n self.dw = 0.0\n\n # Calculate and check the R2eff values.\n self.calc_r2eff()", "def DMFluxneuDet(flavor,Enu,ch,DMm,DMsig,body,param,osc): \n ##B From Arxiv: 0506298 ec. 21 & 24\n #DM_annihilation_rate_Earth = 1.0e14*(100*param.GeV/DMm)**2/param.sec #[annhilations/s]\n #DM_annihilation_rate_Sun = ((1.0*param.AU)/(param.EARTHRADIUS*param.km))**2*DM_annihilation_rate_Earth\n DM_annihilation_rate_Sun = float(np.sum(DMSunAnnihilationRate(DMm,DMsig,param)))# [eV]\n ##E\n \n flux = 0.0\n \n if param.neutype == \"neutrino\":\n if osc :\n for flv in range(3):\n #p = DMParameters(flv)\n #if param.name == \"STD\":\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2,ch,DMm/param.GeV)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #else :\n # flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2,ch,DMm/param.GeV)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n else :\n #p = DMParameters(flavor)\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flavor*2,ch,DMm/param.GeV)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n return flux\n elif param.neutype == \"antineutrino\":\n if osc :\n for flv in range(3):\n #p = DMParameters(flv)\n #if param.name == \"STD\":\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2+1,ch,DMm/param.GeV)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #else :\n # flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2+1,ch,DMm/param.GeV)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n else :\n #p = DMParameters(flavor)\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flavor*2+1,ch,DMm/param.GeV)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n return flux\n else :\n print \"Wrong neutrino type.\"\n quit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if age includes 85andover, just have an upper bound over 85
def build_ages(self, lowerAge, upperAge): #Checking that upper is > than lower #Standardizing on the 5 year categories age_list = [] count = self.get_age_categories(lowerAge, upperAge) #this is a hacky way to get around the all mortality. if lowerAge == 'under5' and upperAge == '85over': age_list = [{'lower': 'under5', 'upper': 'NA', 'minAge': 0, 'maxAge': 4, 'ageGrouping': 'under5' }] for age in range(5,85,5): age_list.append({'lower': age, 'upper':age + 4, 'minAge': age, 'maxAge': age+4, 'ageGrouping': '{}_{}'.format(age, age+4)}) age_list.append({'lower':'85andover', 'upper':'NA', 'minAge': 85, 'maxAge': 99, 'ageGrouping': '85andover'}) return age_list elif isinstance(lowerAge, int) and upperAge == '85over': for age in range(lowerAge,85,5): age_list.append({'lower': age, 'upper':age + 4, 'minAge': age, 'maxAge': age+4 , 'ageGrouping': '{}_{}'.format(age, age+4)}) age_list.append({'lower':'85andover', 'upper':'NA', 'minAge': 85, 'maxAge': 99, 'ageGrouping': '85andover'}) return age_list elif isinstance(upperAge, int) and lowerAge == 'under5': print("HERE!!!") age_list = [{'lower': 'under5', 'upper': 'NA', 'minAge': 0, 'maxAge': 4, 'ageGrouping': 'under5' }] for age in range(5,upperAge,5): age_list.append({'lower': age, 'upper':age + 4, 'minAge': age, 'maxAge': age+4, 'ageGrouping': '{}_{}'.format(age, age+4)}) return age_list elif isinstance(upperAge, int) and isinstance(lowerAge, int): for age in range(lowerAge,upperAge,5): age_list.append({'lower': age, 'upper':age + 4, 'minAge': age, 'maxAge': age+4, 'ageGrouping': '{}_{}'.format(age, age+4)}) return age_list
[ "def constrain(amt: float, low: float, high: float) -> float:\n return low if amt < low else high if amt > high else amt", "def bounded(self):\n return self.lower > -np.inf or self.upper < np.inf", "def calcul_age_2050(age):\n return age+(2050-2016)", "def past_half_life(self):\n return self.half_life > 0 and self.age > self.half_life", "def check150(birth, death):\n try:\n if birth is None:\n return False\n birth = parser.parse(birth)\n\n if death is None:\n death = parser.parse(time.strftime(\"%d %b %Y\"))\n else:\n death = parser.parse(death)\n\n age = relativedelta(death, birth).years\n return age <= 150\n except ValueError:\n return False", "def boundary(value, arg):\r\n value = int(value)\r\n boundary = int(arg)\r\n if value > boundary:\r\n return boundary\r\n else:\r\n return value", "def upper_limit(self, val):\n self.gf_condition(upperLimit=val)", "def range_test(val, lower_limit, upper_limit):\n flag = (val > lower_limit) & (val < upper_limit)\n return (flag)", "def is_vintage(self):\n return self.get_age() > 50", "def determine_upper_bound(first_good,last_good):\n\t# Set some rules for the upper spectrum limit\n\t# Indo-US Library of Stellar Templates has a upper limit of 9464\n\tif ((last_good>=7000.) & (last_good<=9464.)) and (last_good-first_good>=500.): # cap at 7000 A\n\t\tauto_upp = last_good #7000.\n\telif ((last_good>=6750.) & (last_good<=7000.)) and (last_good-first_good>=500.): # include Ha/[NII]/[SII] region\n\t\tauto_upp = last_good\n\telif ((last_good>=6400.) & (last_good<=6750.)) and (last_good-first_good>=500.): # omit H-alpha/[NII] region if we can't fit all lines in region\n\t\tauto_upp = 6400.\n\telif ((last_good>=5050.) & (last_good<=6400.)) and (last_good-first_good>=500.): # Full MgIb/FeII region\n\t\tauto_upp = last_good\n\telif ((last_good>=4750.) & (last_good<=5025.)) and (last_good-first_good>=500.): # omit H-beta/[OIII] region if we can't fit all lines in region\n\t\tauto_upp = 4750.\n\telif ((last_good>=4400.) & (last_good<=4750.)) and (last_good-first_good>=500.):\n\t\tauto_upp = last_good\n\telif ((last_good>=4300.) & (last_good<=4400.)) and (last_good-first_good>=500.): # omit H-gamma region if we can't fit all lines in region\n\t\tauto_upp = 4300.\n\telif ((last_good>=3500.) & (last_good<=4300.)) and (last_good-first_good>=500.): # omit H-gamma region if we can't fit all lines in region\n\t\tauto_upp = last_good\n\telif (last_good-first_good>=500.):\n\t\tprint('\\n Not enough spectrum to fit! ')\n\t\tauto_upp = None \n\telse:\n\t\tauto_upp = last_good\n\treturn auto_upp", "def check_age(self, age):\n \n if type(age) != int:\n print('Please enter correct age, integer')\n exit()\n if age < 10 or age > 100:\n print('Please enter correct age, range acceted (10-100)')\n exit()\n \n return age", "def eyr(value):\n return 2020 <= int(value) <= 2030", "def conditional(self, lower, upper):\r\n if lower > self.max_range:\r\n raise Exception(\"Conditioning not allowed, lower bound exceeds distribution range\")\r\n if lower == 0 and upper == np.inf:\r\n self.probabilities = self.counts / self.total\r\n else:\r\n mask = np.zeros(self.max_range + 1)\r\n for i in range(lower, upper + 1):\r\n mask[i] = 1\r\n self.probabilities = self.counts * mask / np.sum(self.counts * mask)", "def get_age_range(self):\n logger.info(\"Getting age range.\")\n self.age_group_id_start = self.data_draws.ix[self.data_draws['age_group_id'].map(lambda x:\n x in self.AGES_DISAGGREGATED),\n 'age_group_id'].min()\n self.age_group_id_end = self.data_draws.ix[self.data_draws['age_group_id'].map(lambda x:\n x in self.AGES_DISAGGREGATED),\n 'age_group_id'].max()\n self.age_group_id_start = int(self.age_group_id_start)\n self.age_group_id_end = int(self.age_group_id_end)", "def get_upperbound(self) -> int:", "def average_age_under(people, maxage=200):\n\n young_ages = [one_person['age']\n for one_person in people\n if one_person['age'] < maxage]\n\n if young_ages:\n return sum(young_ages) / len(young_ages)\n else:\n return 0", "def ageRange(row):\r\n if row.Age == 100:\r\n label = '100-104'\r\n\r\n else:\r\n label = str(row.Age) + '-' + str(row.Age + 9)\r\n\r\n return label", "def _feature_minmax_item_age(self, target_age, user_id):\n if user_id in self.stats_by_user.index:\n min_age = self.stats_by_user.ix[user_id, 'item_age_min']\n max_age = self.stats_by_user.ix[user_id, 'item_age_max']\n\n rst = (min_age < target_age) & (target_age < max_age)\n return self._encode_binary(rst)\n else:\n return self._encode_binary(False)", "def check_valid_interval(lower_bound, upper_bound, lower_name, upper_name):\n if lower_bound is None or upper_bound is None:\n return\n if upper_bound < lower_bound:\n raise InvalidArgument(\n 'Cannot have %s=%r < %s=%r' % (\n upper_name, upper_bound, lower_name, lower_bound\n ))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Funtion to make pretty print sql
def Print_pretty_sql(self, sqlList,): return " \n".join(sqlList)
[ "def data_pretty_print(self, data):\n data_str = tabulate(data, headers=\"keys\", tablefmt=\"psql\")\n return data_str", "def print_query_sql(query=None, /, *, literal_binds: bool = True,\n pretty: bool = True,\n file=None, flush: bool = True):\n sql = get_query_sql(query, literal_binds=literal_binds, pretty=pretty)\n print(sql, file=file, flush=flush)", "def debugQuery(q):\n print(q.compile(dialect=postgresql.dialect()))", "def pretty_print(self): # O(N^2)\n string = '' # O(1)\n for array in self.table: # O(N)\n for element in array: # O(N)\n string += \"('{0}', {1})\\n\".format(\n element[0], element[1]\n ) # O(1)\n\n if string: # O(1)\n print(string.strip()) # O(1)", "def print_query_result(query):\n exec_query(query)\n\n table = prettytable.from_db_cursor(cur)\n print(table)", "def pretty_printer(prettify, sudoku_row):\n if prettify is True:\n print(*sudoku_row)\n else:\n print(sudoku_row)", "def _make_pretty_examples(jspark, infos):\n\n pretty_output = \"\"\n for info in infos:\n if info.examples.startswith(\"\\n Examples:\"):\n output = []\n output.append(\"-- %s\" % info.name)\n query_examples = filter(lambda x: x.startswith(\" > \"), info.examples.split(\"\\n\"))\n for query_example in query_examples:\n query = query_example.lstrip(\" > \")\n print(\" %s\" % query)\n query_output = jspark.sql(query).showString(20, 20, False)\n output.append(query)\n output.append(query_output)\n pretty_output += \"\\n\" + \"\\n\".join(output)\n if pretty_output != \"\":\n return markdown.markdown(\n \"```sql%s```\" % pretty_output, extensions=['codehilite', 'fenced_code'])", "def formatted_query(self):\n return None", "def sqlExpression(writer):", "def prettyprint(li):\n keys = sorted(li[0].keys())\n t = [keys]\n for d in li:\n m = [d[k] for k in keys]\n m = map(str, m)\n t.append(m)\n\n cols = zip(*t)\n cols_sizes = map(max_len, cols) # get the widest entry in each column\n id = 'id'\n for row in t:\n s = \" %2s | \" % id + \" | \".join((item.ljust(pad) for item, pad in zip(row, cols_sizes)))\n say(s)\n try:\n id += 1\n except TypeError:\n id = 0", "def print_query_result(raw_query_result):\n # TODO Implement function\n row = len(raw_query_result)\n column = max(len(item) for item in raw_query_result)\n row0 = \"+\" + 20 * \"-\" + \"+\" + 50 * \"-\" + \"+\"\n newform = row0\n\n for i in range(row):\n newform = newform + \"\\n\" + \"|\"\n for j in range(column):\n if j == 0:\n if len(str(raw_query_result[i][j])) <= 20:\n s = \"{:^20}\".format(raw_query_result[i][j]) + \"|\"\n else:\n s = \"{:^20}\".format(raw_query_result[i][j])[0:17] + \"...|\"\n newform += s\n else:\n if len(str(raw_query_result[i][j])) <= 50:\n s = \"{:^50}\".format(raw_query_result[i][j]) + \"|\"\n else:\n s = \"{:^50}\".format(raw_query_result[i][j])[0:47] + \"...|\"\n newform += s\n\n newform = newform + \"\\n\" + row0\n print(newform)", "def pretty_print(self): \n data = json.dumps(self.data, sort_keys=True, indent=4 * ' ')\n print(data)", "def print_console(stmt=\"\"):\n print(indent + stmt)", "def prettyprint(puzzle):\n assert(len(puzzle)==81)\n line = 19 * '-'\n result = line\n for i in range(0, 81, 9):\n result += '\\n|' + '|'.join(list(puzzle[i:i+9])) + '|\\n' + line\n assert(len(result)==379)\n return result", "def pretty(ob, lexer=None):\r\n if lexer is None:\r\n if isinstance(ob, basestring):\r\n lexer = 'text'\r\n else:\r\n lexer = 'json'\r\n\r\n if lexer == 'json':\r\n ob = json.dumps(ob, indent=4, sort_keys=True)\r\n\r\n if got_pygments:\r\n lexerob = get_lexer_by_name(lexer)\r\n formatter = get_formatter_by_name(PRETTY_FORMATTER, style=PRETTY_STYLE)\r\n #from pygments.filters import *\r\n #lexerob.add_filter(VisibleWhitespaceFilter())\r\n ret = highlight(ob, lexerob, formatter)\r\n else:\r\n ret = ob\r\n\r\n return ret.rstrip()", "def pretty_print(item):\n print(prettify(item))", "def print_table(table):\n rows = execute(\"SELECT * FROM {}\".format(table), fetch_all=True)\n \n if rows == []:\n return\n\n table = PrettyTable(rows[0].keys())\n\n for row in rows:\n table.add_row(list(row))\n\n print(table)", "def sql(self):\n return self._sql", "def pretty(settings):\n\tfilter = settings.format(settings.content)\n\tfilter.pretty()\n\tsettings.content = filter.content" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Doc string CREATE TABLE mytable AS
def Create_table(self, tableName): return "CREATE TABLE {} AS \n".format(tableName)
[ "def create_table(self, table):\n sql = 'CREATE TABLE %s (\\n ' % table.name\n sql += ',\\n '.join(map(self.field, table.fields.values()))\n sql += ',\\n PRIMARY KEY (%s)\\n);\\n' % ', '.join(map(lambda _: _.name, table.primary_keys))\n sql += ''.join(map(self.index, table.indexes.values()))\n return sql", "def create_table_string(cols, table_name):\n\n table_string = \"CREATE TABLE \" + table_name + \"( \"\n for k, i in enumerate(cols):\n if k == len(cols)-1:\n table_string += i + \" character varying(50)\"\n else:\n table_string += i + \" character varying(50), \"\n table_string += \");\"\n return table_string", "def compile_create(self, blueprint, command, _):\n columns = ', '.join(self._get_columns(blueprint))\n\n sql = 'CREATE TABLE %s (%s' % (self.wrap_table(blueprint), columns)\n\n sql += self._add_foreign_keys(blueprint)\n\n sql += self._add_primary_keys(blueprint)\n\n return sql + ')'", "def createTable(conn, table, num_cols=10, engine=\"INNODB\"):\n cursor = conn.cursor()\n subquery = \",\".join([\"col\" + str(i) + \" INT\" for i in range(1,num_cols+1)])\n query = \"create table if not exists {} ({}) ENGINE={};\".format(table, subquery, engine)\n cursor.execute(query)\n conn.commit()", "def create_table(db):\n\n cursor = db.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS likes\")\n cursor.execute(\"\"\"\n CREATE TABLE likes (\n thing text\n )\n \"\"\")", "def create_table_3(new_table_name):\n\n create_table = f\"CREATE TABLE IF NOT EXISTS {new_table_name} (\\\n ID BIGINT PRIMARY KEY,\\\n region_subregion_country_area TEXT,\\\n country_code BIGINT,\\\n '1950' FLOAT, '1955' FLOAT,\\\n '1960' FLOAT, '1965' FLOAT,\\\n '1970' FLOAT, '1975' FLOAT,\\\n '1980' FLOAT, '1985' FLOAT,\\\n '1990' FLOAT, '1995' FLOAT,\\\n '2000' FLOAT, '2005' FLOAT,\\\n '2010' FLOAT, '2015' FLOAT);\"\n return create_table", "def create_table(db, create_table_sql):\n try:\n cursor = db.cursor()\n cursor.execute(create_table_sql)\n except Error as e:\n print(e)\n\n return", "def _table_creation_command(cls) -> str:\n return sql.Metacard.create_table()", "def create_table_8(new_table_name):\n\n create_table = f\"CREATE TABLE IF NOT EXISTS {new_table_name} (\\\n ID BIGINT PRIMARY KEY,\\\n country TEXT,\\\n code TEXT,\\\n country_code BIGINT,\\\n continent TEXT,\\\n capital TEXT,\\\n latitude FLOAT,\\\n longitude FLOAT);\"\n return create_table", "def setup_table(cursor, table_name, data, **options):\n cursor.execute(\"DROP TABLE IF EXISTS \" + table_name)\n options = options.items()\n sql_statement = \"CREATE TABLE \" + table_name + \"(\"\n for index, columns in enumerate(options):\n if columns == options[-1]:\n sql_statement += columns[0] + \" \" + columns[1].upper()\n else:\n sql_statement += columns[0] + \" \" + columns[1] + \", \"\n sql_statement += \")\"\n print sql_statement\n cursor.execute(sql_statement)\n cursor.executemany(\n \"INSERT INTO \" + table_name + \" VALUES(?, ?, ?)\", data)\n import ipdb\n ipdb.set_trace()\n return cursor.lastrowid", "def create_table(conn, create_table_sql):\n\n # Attempting to create the table in the database\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n # Printing the error if failure occurs\n except Error as e:\n print(e)\n\n # Returning void\n return", "def _table_creation_command(cls):\n return sql.Card.create_table()", "def _make_create_table_q(self):\n self._get_column_types()\n\n # Convert column types back to strings for use in the create table\n # statement\n types= ['{name} {raw_type}'.format(**x) for x in self.columns]\n args = {'table': self.table, 'columns': (', ').join(types)}\n query = 'CREATE TABLE imports.{table} ({columns}) ENGINE=MyISAM;'.format(**args)\n\n return query", "def createTable(self, connection):\n self.cursor.execute('''\n CREATE TABLE IF NOT EXISTS '''\n + self.table + '''(\n id integer PRIMARY KEY,\n name text NOT NULL,\n locality text,\n date text \n );\n ''')\n connection.commit()\n return", "def create_table():\n print(\"Create first_name table\")\n query = \"CREATE TABLE first_name (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255))\"\n connector = get_database_connection()\n cursor = connector.cursor()\n cursor.execute(query)\n connector.commit()", "def create_table(conn, db_table):\n\n try:\n c = conn.cursor()\n c.execute(db_table)\n except Error as e:\n print(f\"SQL Error>: {e}\")", "def create_table (self, tablename = 'motif'):\n\n c = self.connection.cursor()\n\n # create\n\n c.execute('''create table ? (matrix text, source text, factorName text, species\n text, pmid integer, domain text, structureCategory text )''', (tablename))\n\n self.connection.commit()\n\n c.close()", "def create_basic_table_in_dev(self):\n dev_table_sql = \"create table {} ( col1 text, col2 int, col3 timestamp )\".format(self.table_name)\n\n self.dev_db_conn.exec_ddl(dev_table_sql, None)", "def create_table(self, create_table_sql: str) -> None:\n try:\n c = self.conn.cursor()\n resp = c.executescript(create_table_sql)\n return resp\n except Error as e:\n print(e)\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the age adjusted death groups ,death_age_pop as ( SELECT FROM crosstab($$ with death_records as ( SELECT 1 as place_holder, decd_dth_yr as yr, CASE WHEN decd_age_yr >= 0 AND decd_age_yr = 5 AND decd_age_yr = 0 AND decd_age_yr <= 9 ) SELECT 1 as rec_id, concat(age_category,'_',yr) as category, count(1) FROM death_records GROUP BY yr, age_category ORDER BY yr, age_category $$) AS piv_results(rec_id int, deaths_1 bigint, deaths_2 bigint, deaths_3 bigint, deaths_4 bigint, deaths_5 bigint, deaths_6 bigint, deaths_7 bigint, deaths_8 bigint, deaths_9 bigint, deaths_10 bigint) )
def age_adjusted_death(self, prettyPrint=True): dplist = [] dplist.append(", death_age_pop as") dplist.append("(") dplist.append("SELECT * FROM crosstab($$ ") dplist.append("with death_records as") dplist.append("(") dplist.append("SELECT 1 as place_holder, decd_dth_yr as yr,") dplist.append("CASE") for a in self.ageCategories: dplist.append( "WHEN decd_age_yr >= {} AND decd_age_yr <= {} THEN {} ".format(a['minAge'], a['maxAge'], a['minAge'] ) ) dplist.append("END as age_category") dplist.append("FROM disparities.decd") dplist.append("WHERE decd_age_yr >= {} AND decd_age_yr <= {}".format(self.youngestAge, self.oldestAge)) dplist.append(")") dplist.append("SELECT 1 as rec_id, concat(age_category,'_',yr) as category, count(1)") dplist.append("FROM death_records") dplist.append("GROUP BY yr, age_category") dplist.append("ORDER BY yr, age_category") dplist.append("$$)") dplist.append("AS piv_results ( rec_id int,") for t, y in enumerate(self.years): for c, a in enumerate(self.ageCategories): if c+t == 0: dplist.append( "deaths_{}_{} bigint".format(a["ageGrouping"], y) ) else: dplist.append( ", deaths_{}_{} bigint".format(a["ageGrouping"], y) ) #Closing Pivot dplist.append(")") #Closing the CTE dplist.append(")") return self.print_statements(dplist, prettyPrint)
[ "def age_pivot_table(age_df):\n # Group by heiarchical sorting.\n age_pivot_ser = age_df.groupby(by=['year', 'county', 'age',\n 'weight_indicator'\n ]\n ).birth_count.sum()\n\n # Unstack Series to create DataFrame.\n age_pivot_df = age_pivot_ser.unstack()\n\n return age_pivot_df", "def split_age_sex(df, run_id, id_cols, gbd_round_id, decomp_step,\n value_column='deaths', fix_gbd2016_mistake=True, cause_set_id=4,\n keep_weight_cause_used=False, gbd_team_for_ages='cod',\n level_of_analysis='cause_id', weight_path=None):\n\n orig_val_sum = df[value_column].sum()\n\n\n assert quick_bear_bones_structure_check(df)\n\n\n\n\n\n\n good_age_group_ids = db_queries.get_demographics(\n gbd_team_for_ages,\n gbd_round_id=5\n )['age_group_id']\n if level_of_analysis == 'icg_id' or level_of_analysis == 'bundle_id':\n good_age_group_ids.remove(2)\n good_age_group_ids.remove(3)\n good_age_group_ids.remove(4)\n good_age_group_ids.append(28)\n\n\n\n locations_in_data = list(set(df.location_id))\n years_in_data = list(set(df.year_id))\n pop_df = db_queries.get_population(\n age_group_id=good_age_group_ids,\n location_id=locations_in_data,\n year_id=years_in_data,\n sex_id=[1, 2],\n gbd_round_id=gbd_round_id,\n decomp_step=decomp_step\n )\n\n\n age_groups = get_age_groups()\n age_groups = prep_age_groups(age_groups)\n\n\n age_detail_map = prep_age_aggregate_to_detail_map(age_groups,\n good_age_group_ids)\n\n\n unsplittable_ages = set(df['age_group_id']) - \\\n set(age_detail_map['agg_age_group_id'])\n warn_text = \"\"\"\n These age group ids cannot be split onto the given age group set.\n\n {}\n \"\"\".format(unsplittable_ages)\n if len(unsplittable_ages) > 0:\n warnings.warn(warn_text)\n\n\n sex_detail_map = prep_sex_aggregate_to_detail_map()\n unsplittable_sexes = set(df['sex_id']) - set(sex_detail_map['agg_sex_id'])\n warn_text = \"\"\"\n These sex ids cannot be split onto Males/Females.\n\n {}\n \"\"\".format(unsplittable_sexes)\n if len(unsplittable_sexes) > 0:\n warnings.warn(warn_text)\n\n\n weight_df = get_age_weights(run_id=run_id,\\\n level_of_analysis=level_of_analysis,\n weight_path=weight_path)\n weight_causes = weight_df[level_of_analysis].unique()\n\n\n\n\n\n\n\n\n\n\n cause_to_weight_cause_map = \\\n prep_cause_to_weight_cause_map(cause_set_id,\n gbd_round_id, weight_causes,\n level_of_analysis=level_of_analysis)\n\n\n\n\n sep_df_dict = separate_detailed_from_aggregate(\n df,\n good_age_group_ids,\n unsplittable_ages,\n unsplittable_sexes\n )\n nosplit_df = sep_df_dict['nosplit']\n split_df = sep_df_dict['split']\n\n\n split_df = prep_split_df(\n split_df,\n pop_df,\n weight_df,\n age_detail_map,\n sex_detail_map,\n cause_to_weight_cause_map,\n id_cols,\n level_of_analysis=level_of_analysis\n )\n\n\n\n split_df = calculate_detail_val(\n split_df,\n id_cols,\n fix_gbd2016_mistake=fix_gbd2016_mistake,\n value_column=value_column\n )\n\n\n split_df = split_df[nosplit_df.columns]\n final_df = nosplit_df.append(split_df, ignore_index=True)\n\n\n group_columns = list(nosplit_df.columns)\n group_columns.remove(value_column)\n final_df = final_df.groupby(group_columns,\n as_index=False)[value_column].sum()\n\n\n val_diff = abs(final_df[value_column].sum() - orig_val_sum)\n if not np.allclose(val_diff, 0):\n text = \"Difference of {} {} from age sex \" \\\n \"splitting\".format(val_diff, value_column)\n if fix_gbd2016_mistake:\n raise AssertionError(text)\n else:\n warnings.warn(text)\n\n if level_of_analysis=='icg_id':\n\n\n\n if final_df[id_cols].duplicated().values.any():\n group_cols = list(final_df.columns)\n group_cols.remove('val')\n\n\n final_df = final_df.groupby(by=group_cols).agg({\"val\": \"sum\"}).\\\n reset_index()\n\n\n\n assert not final_df[id_cols].duplicated().values.any(), (\"there are \"\n \"duplicated rows in the final_df\")\n\n\n bad = set(final_df.age_group_id) - set(good_age_group_ids)\n if len(bad) > 0:\n text = \"Some age group ids still aggregate: {}\".format(bad)\n raise AssertionError(text)\n\n\n assert set(final_df[level_of_analysis]) == set(df[level_of_analysis])\n\n return final_df", "def getPercentilePointChageDeathsData(cases_rolling_df:pd.DataFrame() = None,\n election_df:pd.DataFrame() = None):\n\n # Find the annual number of cases and deaths per county\n if cases_rolling_df is None:\n cases_rolling_df = getCasesRollingAveragePer100K()\n\n # Get county-level presidential election data\n if election_df is None:\n election_df = getElectionSegmentsData()\n\n cases_rolling_df = cases_rolling_df[(cases_rolling_df['date']>=pd.to_datetime('2020-01-01'))\n & (cases_rolling_df['date']<=pd.to_datetime('2020-12-31'))]\n cases_rolling_df = (\n cases_rolling_df.groupby(\"COUNTYFP\")[\"deaths_avg_per_100k\"]\n .mean()\n .fillna(0)\n .reset_index()\n )\n\n # Select the top 100 in COVID deaths\n cases_rolling_df = cases_rolling_df.sort_values(\n [\"deaths_avg_per_100k\"], ascending=False\n )\n deaths_top_100_rolling_df = cases_rolling_df[:400].copy()\n\n # Merge the dataframes\n merged_df = deaths_top_100_rolling_df.merge(\n election_df, how=\"left\", on=\"COUNTYFP\", indicator=True\n )\n merged_df = merged_df[merged_df[\"_merge\"] == \"both\"].copy()\n merged_df[\"pct_increase\"] = (\n merged_df[\"fractionalvotes_2020\"] - merged_df[\"fractionalvotes_2016\"]\n )\n merged_df[\"pct_increase\"] = merged_df[\"pct_increase\"] * 100\n\n merged_df[\"segmentname\"] = merged_df[\"changecolor\"].map(color_segment_dict)\n return merged_df", "def create_death_rate_data(group_by_df: pd.DataFrame) -> pd.DataFrame:\n group_by_df[\"death_rate\"] = (\n group_by_df[\"total_deaths\"] / group_by_df[\"total_cases\"]\n ) * 100\n\n return group_by_df", "def create_age_df(db_connection):\n # this age_group_set_id is currently specific to gbd 2016\n call = \"\"\"\n SELECT age_group_id as all_ages\n FROM shared.age_group_set_list\n WHERE age_group_set_id = 12 AND is_estimate = 1;\n \"\"\"\n age_df_22 = db_connect.query(call, db_connection)\n age_df_27 = age_df_22.copy(deep=True)\n age_df_22['age'] = 22\n age_df_27['age'] = 27\n return pd.concat([age_df_22, age_df_27], ignore_index=True)", "def race_pivot_table(race_df):\n # Group by heiarchical sorting.\n race_pivot_ser = race_df.groupby(by=['year', 'county', 'race',\n 'ethnicity', 'weight_indicator'\n ]\n ).birth_count.sum()\n\n # Unstack Series to create DataFrame.\n race_pivot_df = race_pivot_ser.unstack()\n\n return race_pivot_df\n\n # Unstack Series to create DataFrame.\n race_pivot_df = race_pivot_ser.unstack()\n\n return race_pivot_df", "def prep_age_groups(age_groups):\n age_groups = age_groups.copy()\n keep_cols = ['age_group_id',\n 'age_group_years_start', 'age_group_years_end']\n age_groups = age_groups[keep_cols]\n return age_groups", "def grouper(df):\n print(\"performing groupby and sum\")\n\n df.loc[df['outcome_id'] != 'death2', 'outcome_id'] = 'case'\n\n groups = ['location_id', 'year_start', 'year_end', 'age_group_unit',\n 'age_group_id', 'sex_id', 'source', 'nid',\n 'facility_id', 'representative_id', 'diagnosis_id',\n 'metric_id', 'outcome_id', 'nonfatal_cause_name']\n df = df.groupby(groups).agg({'val': 'sum'}).reset_index()\n\n return df", "def ageGroups4():\n name = \"ageGroups4\"\n groups = {\n AGE: {\n \"Under 4 years\" : list(range(0, 4)),\n \"4 to 7 years\" : list(range(4, 8)),\n \"8 to 11 years\": list(range(8, 12)),\n \"12 to 15 years\": list(range(12, 16)),\n \"16 to 19 years\": list(range(16, 20)),\n \"20 to 23 years\": list(range(20, 24)),\n \"24 to 27 years\": list(range(24, 28)),\n \"28 to 31 years\": list(range(28, 32)),\n \"32 to 35 years\": list(range(32, 36)),\n \"36 to 39 years\": list(range(36, 40)),\n \"40 to 43 years\": list(range(40, 44)),\n \"44 to 47 years\": list(range(44, 48)),\n \"48 to 51 years\": list(range(48, 52)),\n \"52 to 55 years\": list(range(52, 56)),\n \"56 to 59 years\": list(range(56, 60)),\n \"60 to 63 years\": list(range(60, 64)),\n \"64 to 67 years\": list(range(64, 68)),\n \"68 to 71 years\": list(range(68, 72)),\n \"72 to 75 years\": list(range(72, 76)),\n \"76 to 79 years\": list(range(76, 80)),\n \"80 to 83 years\": list(range(80, 84)),\n \"84 to 87 years\": list(range(84, 88)),\n \"88 to 91 years\": list(range(88, 92)),\n \"92 to 95 years\": list(range(92, 96)),\n \"96 to 99 years\": list(range(96, 100)),\n \"100 to 103 years\": list(range(100, 104)),\n \"104 to 107 years\": list(range(104, 108)),\n \"108 to 111 years\": list(range(108, 112)),\n \"112 to 115 years\": list(range(112, 116)),\n }\n }\n return name, groups", "def check_age_death(df):\n died = df[df.DEATH == 1]\n assert (died.DEATH_AGE <= died.FU_END_AGE).all()", "def get_times_to_deaths(model,df,k=41):\n time_to_deaths=[]\n Qs=[]\n for i,pat in enumerate(df.Pat.unique()):\n pat_df=df[df.Pat==pat]\n dead=pat_df.Rewards.iloc[-1]==-15\n if dead:\n death_times=np.arange(pat_df.shape[0],0,-1)\n else :\n death_times=np.zeros(pat_df.shape[0])\n time_to_deaths.append(death_times)\n states=torch.FloatTensor(pat_df.iloc[:,:k].values).to(device)\n \n with torch.no_grad():\n Q,_=model.Q(states)\n \n \n Qs.append(Q[:,0,:].cpu().numpy())\n \n print(i/df.Pat.unique().shape[0])\n\n return time_to_deaths, Qs", "def subseting (df ,state):\n df_copy = df.copy()\n df = df[df.State == state]\n df_copy = df[[ 'Year' , 'Deaths']]\n df = pd.to_numeric(df['Deaths'])\n return df_copy", "def calculate_detail_val(df, id_cols, fix_gbd2016_mistake=True,\n value_column='deaths'):\n\n\n agg_name = 'agg_{}'.format(value_column)\n assert 'exp_val' not in df.columns, \\\n \"Unexpected: exp_val already in columns\"\n assert 'sum_exp_val' not in df.columns, \\\n \"Unexpected: sum_exp_val already in columns\"\n assert agg_name not in df.columns, \\\n \"Unexpected: {} already in columns\".format(agg_name)\n\n df = df.rename(columns={value_column: agg_name})\n df['exp_val'] = df['weight'] * df['population']\n group_cols = list(id_cols)\n group_cols.remove('age_group_id')\n group_cols.remove('sex_id')\n group_cols.append('agg_age_group_id')\n group_cols.append('agg_sex_id')\n df['sum_exp_val'] = df.groupby(group_cols)['exp_val'].transform(sum)\n\n\n if fix_gbd2016_mistake:\n\n\n\n df.loc[df['sum_exp_val'] == 0, 'exp_val'] = 1\n\n df.loc[\n df['sum_exp_val'] == 0,\n 'sum_exp_val'\n ] = df.groupby(group_cols)['exp_val'].transform(sum)\n\n\n df[value_column] = df['exp_val'] * (df[agg_name] / df['sum_exp_val'])\n\n\n if not fix_gbd2016_mistake:\n\n df[value_column] = df[value_column].fillna(0)\n\n return df", "def group_age(sex, data, dir_name, state):\n ugly_agegroups = [\"(-1, 4]\", \"(4, 9]\", \"(9, 14]\", \"(14, 19]\",\n \"(19, 24]\", \"(24, 29]\", \"(29, 34]\",\n \"(34, 39]\", \"(39, 44]\", \"(44, 49]\",\n \"(49, 54]\", \"(54, 59]\", \"(59, 64]\", \"(64, 69]\",\n \"(69, 74]\", \"(74, 79]\", \"(79, 84]\", \"(84, 200]\"]\n nice_agegroups = [\"0-4\", \"5-9\", \"10-14\", \"15-19\", \"20-24\", \"25-29\",\n \"30-34\", \"35-39\", \"40-44\", \"45-49\", \"50-54\",\n \"55-59\", \"60-64\", \"65-69\", \"70-74\", \"75-79\",\n \"80-84\", \"85plus\"]\n # Data from 1990-2009 or 2010-2030?\n if dir_name == \"state-indicators\":\n length = 21\n years = range(1990, 2010) # 1990-2009\n else:\n length = 22\n years = range(2010, 2031) # 2010-2030\n # read only the appropiate rows\n # The worksheet contains data for men at the top and women at the top\n if sex == \"Males\":\n df_xlsx = data.iloc[5:115, 1:length]\n else:\n df_xlsx = data.iloc[119:229, 1:length]\n ages = range(0, 110)\n # We want the data by 5 year age groups\n bins = [x for x in range(-1, 85, 5)]\n # No one lives to be 200\n bins.append(200)\n df_xlsx['AgeGroup'] = pd.cut(ages, bins=bins)\n df_xlsx = df_xlsx.replace(ugly_agegroups,\n nice_agegroups)\n df_xlsx = df_xlsx.groupby(\"AgeGroup\").sum()\n df_xlsx = df_xlsx.transpose()\n df_xlsx = pd.DataFrame(df_xlsx.stack())\n df_xlsx.columns = [sex]\n df_xlsx['Year'] = np.repeat(years, 18)\n df_xlsx = df_xlsx.reset_index()\n del df_xlsx['level_0']\n # Add the ugly state file name defined in STATES\n df_xlsx['State'] = state\n return df_xlsx", "def population_stats(df):\n\n return ...", "def person_deaths(self, prettyPrint):\n \n person_deaths = []\n person_deaths.append(\", person_deaths as\")\n person_deaths.append(\"(\")\n person_deaths.append(\"SELECT p.zcta_id, p.tract_id, p.person_id, p.geom, p.sex, p.age, p.race \")\n #List comprehension to generate the sql\n yearsList = [\"num_deaths_%s\" % (y) for y in self.years ]\n person_deaths.append( \",( %s ) as total_deaths \" % (\" + \".join(yearsList)) )\n person_deaths.append(\"FROM expected_deaths p\")\n person_deaths.append(\")\")\n \n \n return self.print_statements(person_deaths, prettyPrint)", "def grouped_drugs(some_data):\n some_data1 = some_data.groupby([\"MannerofDeath\", \"Sex\", \"FiscalYear\"]).agg(\n {\"Morphine_NotHeroin\": \"count\", \"Ethanol\": \"count\" })\n some_data1.reset_index(inplace=True)\n #some_data1.drop([6, 7], inplace=True)\n some_data1[\"FiscalYear\"] = some_data1[\"FiscalYear\"].astype(str)\n return some_data1", "def __get_age_table(df):\n age = df.iloc[[0,1,2]]\n age = pd.concat([pd.DataFrame([['50053', '41651', '48043', '139735']], columns = age.columns), age])\n age.index = ['Total', 'Oy', 'Working Without A Diploma', 'Not Oy']\n return age", "def tvd(data, col, group_col):\n\n tvd = (\n data\n .pivot_table(\n index=col,\n columns=group_col,\n aggfunc='size',\n fill_value=0\n )\n .apply(lambda x: x / x.sum())\n .diff(axis=1).iloc[:, -1].abs().sum() / 2\n )\n\n return tvd" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SELECT deaths_2011/est_pop_5_9_year_2011 as expected_death_rate_5_9_2011, deaths_2012/est_pop_5_9_year_2012 as expected_death_rate_5_9_2012, deaths_2013/est_pop_5_9_year_2013 as expected_death_rate_5_9_2013, deaths_2014/est_pop_5_9_year_2014 as expected_death_rate_5_9_2014, deaths_2015/est_pop_5_9_year_2015 as expected_death_rate_5_9_2015 FROM total_pop, total_deaths
def death_rates(self, prettyPrint): death_rate = [] death_rate.append(", death_rates as") death_rate.append("(") death_rate.append("SELECT ") for c, y in enumerate(self.years): if c == 0: death_rate.append("deaths_%s/est_pop_%s as expected_death_rate_%s" % (y,y,y)) else: death_rate.append(", deaths_%s/est_pop_%s as expected_death_rate_%s" % (y,y,y)) death_rate.append("FROM total_pop, total_deaths") death_rate.append(")") return self.print_statements(death_rate, prettyPrint)
[ "def create_death_rate_data(group_by_df: pd.DataFrame) -> pd.DataFrame:\n group_by_df[\"death_rate\"] = (\n group_by_df[\"total_deaths\"] / group_by_df[\"total_cases\"]\n ) * 100\n\n return group_by_df", "def percent_change_bachelors_2000s(df, sex='A'):\n df_2000 = df[(df['Year'] == 2000) &\n (df['Min degree'] == \"bachelor's\") &\n (df['Sex'] == sex)]\n df_2010 = df[(df['Year'] == 2010) &\n (df['Min degree'] == \"bachelor's\") &\n (df['Sex'] == sex)]\n df_2000 = df_2000.loc[:, ['Total']].squeeze()\n df_2010 = df_2010.loc[:, ['Total']].squeeze()\n return df_2010 - df_2000", "def convert_to_rates(df):\n pops = qry.get_pops(both_sexes=True)\n df = df.merge(pops, how = 'inner')#how='left')\n assert df.mean_pop.notnull().values.all(), 'pop merge failed'\n id_cols = dw.EPI_CHILD_OVRWGT_GROUP_COLS\n draws = [col for col in df.columns if 'draw_' in col]\n df = pd.concat([\n df[id_cols],\n df[draws].apply(lambda x: x / df['mean_pop'])\n ], axis=1\n )\n df['metric_id'] = 3\n return df", "def summary_stats(r, riskfree_rate=0.03, periods_per_year=12):\n wealth_index = (1+r).cumprod()\n total_rets = ((wealth_index.iloc[-1,:]/wealth_index.iloc[0,:]-1)*100).round(2).astype(str) + '%'\n ann_r = (r.aggregate(annualized_ret, periods_per_year=periods_per_year)*100).round(2).astype(str) + '%'\n ann_vol = (r.aggregate(annualized_vol, periods_per_year=periods_per_year)*100).round(2).astype(str) + '%'\n ann_sr = r.aggregate(sharpe_ratio, rf=riskfree_rate, periods_per_year=periods_per_year).round(2)\n sortino = r.aggregate(sortino_ratio, rf=riskfree_rate, periods_per_year=periods_per_year).round(2)\n dd = (r.aggregate(lambda r: drawdown(r).Drawdowns.min())*100).round(2).astype(str) + '%'\n skew = r.aggregate(skewness).round(2)\n kurt = r.aggregate(kurtosis).round(2)\n cf_var5 = (r.aggregate(gaussian_var, modified=True)*100).round(2).astype(str) + '%'\n hist_cvar5 = (r.aggregate(cvar_historic)*100).round(2).astype(str) + '%'\n return pd.DataFrame({\n \"Total Return\": total_rets,\n \"Annualized Return\": ann_r,\n \"Annualized Vol\": ann_vol,\n \"Skewness\": skew,\n \"Kurtosis\": kurt,\n \"Cornish-Fisher VaR (5%)\": cf_var5,\n \"Historic CVaR (5%)\": hist_cvar5,\n \"Sharpe Ratio\": ann_sr,\n \"Sortino Ratio\": sortino,\n \"Max Drawdown\": dd\n })", "def growth_rate(dataframe):\n dataframe[\"Growth Rate\"] = dataframe.Birthrate - dataframe.Deathrate", "def tripduration_genderbirth(df):\n print('\\nCalculating Trip duration based on gender and birth year\\n')\n start_time = time.time()\n\n #trip duration by gender\n dur_by_gender=df.groupby('Gender')['Trip Duration'].mean()\n dur_by_gender_min=(dur_by_gender/60).round(2) #average trip duration in minutes by gender, to 2 dp\n print(\"Average trip duration by gender:\\n{}\".format(dur_by_gender_min))\n\n #longest trip duration by birth year\n dur_by_birth_max=df.groupby('Birth Year')['Trip Duration'].mean().max()\n dur_idx_max=df.groupby('Birth Year')['Trip Duration'].mean().idxmax()\n dur_by_birth_max_minutes=(dur_by_birth_max/60).round(2)\n #shortest trip by birth year\n dur_by_birth_min=df.groupby('Birth Year')['Trip Duration'].mean().min()\n dur_idx_min=df.groupby('Birth Year')['Trip Duration'].mean().idxmin()\n dur_by_birth_min_minutes=(dur_by_birth_min/60).round(2)\n\n print(\"Average trip duration by birth year:\\nUsers born in {:.0f} spend the most time on a trip, averaging {:.2f} minutes\\nUsers born in {:.0f} spend the least time on a trip, averaging: {:.2f} minutes\".format(dur_idx_max,dur_by_birth_max_minutes,dur_idx_min,dur_by_birth_min_minutes))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def Populations():\n age_df = load_data('agerangenotificationeu')[['country', 'population']]\n testing_df = load_data('testing')[['country', 'population']]\n pop = pd.merge(age_df, testing_df, how='outer').groupby(['country']).median()\n \n def getter(country: str) -> float:\n return pop.loc[country, 'population']\n\n return getter", "def diabetes_rate():\n\n # Use Pandas to perform the sql query\n diabetes_stmt = db.session.query(Combined).statement\n diabetes_df = pd.read_sql_query(diabetes_stmt, db.session.bind)\n diabetes_df = diabetes_df[\n [\"State\",\n \"State_2\",\n \"Diabetes_Rate_2018\",\n \"Population_2019\",\n \"1st\",\n \"2nd\",\n \"3rd\"]]\n\n diabetes_df[\"Population_with_Diabetes\"] = diabetes_df[\"Population_2019\"] * (diabetes_df[\"Diabetes_Rate_2018\"] / 100)\n\n return diabetes_df.to_json()", "def create_eps_pe_ratio_revenue_income_expenditure_net_profit(rev, stk):\n\n stk[\"Date\"] = pd.to_datetime(stk[\"Date\"])\n s = min(rev.year)\n e = max(rev.year)\n cols = ['Revenue', 'Income', 'Expenditure', 'Net Profit', 'EPS']\n stk[cols] = pd.DataFrame([[0]*len(cols)], index=stk.index)\n\n rep = ['revenue', 'income', 'expenditure', 'profit', 'eps']\n\n for index, row in stk.iterrows():\n q = (row.Date.month-1)//3 + 1\n samp = rev[(rev['year'] == row.Date.year) & (rev['quartile'] == q)]\n if samp.shape[0] != 0:\n stk.loc[index, cols] = samp.iloc[0][rep].values\n else:\n stk.loc[index, cols] = [np.nan]*5\n\n stk['year'] = pd.DatetimeIndex(stk['Date']).year\n # stk = stk[(stk.year >= s)&(stk.year <= e) & stk[\"Revenue\"] !=0 ]\n # stk = stk.drop([\"year\"],axis=1)\n\n bands = [2, 4, 8]\n\n for band in bands:\n bcols = ['Revenue last '+str(band)+' quarters', 'Income last '+str(band)+' quarters', 'Expenditure last '+str(\n band)+' quarters', 'Net Profit last '+str(band)+' quarters', 'EPS last '+str(band)+' quarters']\n stk[bcols] = pd.DataFrame([[0]*len(bcols)], index=stk.index)\n\n for index, row in stk.iterrows():\n q = (row.Date.month-1)//3 + 1\n samp = rev[(rev['year'] == row.Date.year) & (rev['quartile'] == q)]\n if samp.shape[0] == 0:\n r = 1\n else:\n r = samp.index.values[0]\n if r+band+1 < rev.shape[0]:\n v = range(r+1, r+band+1)\n stk.loc[index, bcols] = rev.loc[v, rep].sum().values\n stk[\"p/e\"] = stk[\"Close Price\"]/stk[\"EPS\"]\n return stk", "def percent_change_bachelors_2000s(data, sex='A'):\n d = data[(data['Min degree'] == 'bachelor\\'s') & (data['Sex'] == sex)]\n d2000 = d[d['Year'] == 2000]['Total'].sum()\n d2010 = d[d['Year'] == 2010]['Total'].sum()\n return d2010 - d2000", "def age_adjusted_death(self, prettyPrint=True):\n \n dplist = []\n dplist.append(\", death_age_pop as\")\n dplist.append(\"(\")\n dplist.append(\"SELECT * FROM crosstab($$ \")\n dplist.append(\"with death_records as\")\n dplist.append(\"(\")\n dplist.append(\"SELECT 1 as place_holder, decd_dth_yr as yr,\")\n dplist.append(\"CASE\")\n \n \n for a in self.ageCategories:\n dplist.append( \"WHEN decd_age_yr >= {} AND decd_age_yr <= {} THEN {} \".format(a['minAge'], a['maxAge'], a['minAge'] ) )\n \n dplist.append(\"END as age_category\")\n dplist.append(\"FROM disparities.decd\")\n dplist.append(\"WHERE decd_age_yr >= {} AND decd_age_yr <= {}\".format(self.youngestAge, self.oldestAge))\n dplist.append(\")\")\n dplist.append(\"SELECT 1 as rec_id, concat(age_category,'_',yr) as category, count(1)\")\n dplist.append(\"FROM death_records\")\n dplist.append(\"GROUP BY yr, age_category\")\n dplist.append(\"ORDER BY yr, age_category\")\n dplist.append(\"$$)\")\n dplist.append(\"AS piv_results ( rec_id int,\")\n \n for t, y in enumerate(self.years):\n for c, a in enumerate(self.ageCategories):\n if c+t == 0:\n dplist.append( \"deaths_{}_{} bigint\".format(a[\"ageGrouping\"], y) )\n else:\n dplist.append( \", deaths_{}_{} bigint\".format(a[\"ageGrouping\"], y) ) \n #Closing Pivot\n dplist.append(\")\")\n #Closing the CTE\n dplist.append(\")\")\n\n return self.print_statements(dplist, prettyPrint)", "def test_4():\n table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')\n\n result = aspects.group_by(table, ['race/ethnicity'], \n enums.SummaryOperators.PROPORTION_OF_COUNT)\n \n result_table = result['table']\n result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'gender'])\n\n result_suggestions = result['suggestions']\n \n # Sum of proportion column should be(close to) 1.0\n assert(result_table['gender'].sum() == 1.0)\n\n print(result_table)\n\n expected_result_table = \"\"\" race/ethnicity gender\n0 group A 0.089\n1 group B 0.190\n2 group C 0.319\n3 group D 0.262\n4 group E 0.140\"\"\"\n\n expected_suggestions = \"[]\"\n\n assert(expected_result_table == result_table.to_string())\n assert(str(result_suggestions) == expected_suggestions)", "def calc_ratio_values(start):\n\n end = datetime(start.year + 1, start.month, start.day).date()\n term_end = datetime.utcnow().date()\n engine = get_engine()\n energy_total = 0.0\n ratio_values = 0.0\n try:\n with engine.connect() as con:\n # Query total energy which should be ~ 1.000.000 kWh\n energy_total = con.execute(\"SELECT SUM(energy) FROM loadprofile WHERE date BETWEEN \\'\"\n + str(start) + \"\\' AND \\'\" + str(end) +\n '\\' ORDER BY date').first()[0]\n\n # Query sum of energy promilles\n energy_promille = con.execute(\"SELECT SUM(energy) FROM loadprofile\"\n + \" WHERE date BETWEEN \\'\" +\n str(start) + \"\\' AND \\'\"\n + str(term_end) + '\\' ORDER BY date').first()[0]\n\n if (energy_promille is not None) and (energy_total is not None):\n ratio_values = energy_promille/energy_total\n\n except Exception as e:\n message = exception_message(e)\n logger.error(message)\n\n return ratio_values", "def fraction_licks_rewarded(expt_grp):\n result = []\n for expt in expt_grp:\n\n totalLicks = sum([\n trial.behaviorData()['licking'].shape[0]\n for trial in expt.findall('trial')])\n\n totalWater = sum([\n trial.behaviorData()['water'].shape[0]\n for trial in expt.findall('trial')])\n\n rewardRate = expt.reward_parameters().get('operant_rate', 1)\n\n try:\n fraction = float(totalWater) / (totalLicks / float(rewardRate))\n except ZeroDivisionError:\n fraction = np.nan\n\n result.append({\n 'expt': expt, 'lick': totalLicks, 'water': totalWater,\n 'value': fraction})\n return pd.DataFrame(result, columns=['expt', 'lick', 'water', 'value'])", "def test_get_proportions_data():\n test_data = {'YEAR': [2012, 2012, 2012, 2012, 2013, 2013, 2013, 2013,\n 2014, 2014, 2014, 2014, 2015, 2015, 2015, 2015,\n 2016, 2016, 2016, 2016, 2017, 2017, 2017, 2017,\n 2018, 2018, 2018, 2018],\n 'TREAT_EARLY': [1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1,\n 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0],\n 'TREAT_LATE': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0,\n 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],\n 'GOOD_GENHLTH': [0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1,\n 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1],\n 'PHYS_DISTRESS': [0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,\n 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1],\n 'MENT_DISTRESS': [1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1,\n 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],\n 'POOR_OVR_HLTH': [0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0,\n 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1],\n 'HLTHPLN': [0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0],\n 'HAS_PERSDOC': [1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1,\n 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1],\n 'MEDCOST': [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1,\n 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],\n 'ANNUAL_CHECKUP': [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1,\n 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0]}\n\n expected_data = {'YEAR': [2012, 2012, 2012, 2012, 2013, 2013, 2013, 2013,\n 2014, 2014, 2014, 2014, 2015, 2015, 2015, 2015,\n 2016, 2016, 2016, 2016, 2017, 2017, 2017, 2017,\n 2018, 2018, 2018, 2018],\n 'TREAT_EARLY': [1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0],\n 'TREAT_LATE': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1,\n 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],\n 'GOOD_GENHLTH': [0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,\n 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0,\n 0, 1],\n 'PHYS_DISTRESS': [0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0,\n 0, 1],\n 'MENT_DISTRESS': [1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,\n 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1,\n 0, 0],\n 'POOR_OVR_HLTH': [0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1,\n 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n 0, 1],\n 'HLTHPLN': [0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1,\n 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0],\n 'HAS_PERSDOC': [1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1],\n 'MEDCOST': [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0,\n 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],\n 'ANNUAL_CHECKUP': [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0,\n 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0,\n 1, 0],\n 'PROP_GOOD_GENHLTH': [1/3, 1/1, 1/3, 1/3, 0/2, 1/1, 1/1,\n 0/2, 1/2, 1/2, 0/1, 1/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 1/1, 1/2,\n 1/2, 1/2, 1/2, 1/2, 1/2, 0/1, 1/1],\n 'PROP_PHYS_DISTRESS': [1/3, 0/1, 1/3, 1/3, 1/2, 0/1, 0/1,\n 1/2, 1/2, 1/2, 0/1, 1/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 0/1, 2/2,\n 1/2, 1/2, 2/2, 0/2, 0/2, 0/1, 1/1],\n 'PROP_MENT_DISTRESS': [2/3, 0/1, 2/3, 2/3, 0/2, 1/1, 1/1,\n 0/2, 0/2, 0/2, 1/1, 0/1, 2/2, 2/2,\n 2/2, 2/2, 0/1, 0/2, 0/2, 1/1, 2/2,\n 0/2, 0/2, 2/2, 2/2, 2/2, 0/1, 0/1],\n 'PROP_POOR_OVR_HLTH': [2/3, 1/1, 2/3, 2/3, 1/2, 0/1, 0/1,\n 1/2, 1/2, 1/2, 0/1, 0/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 0/1, 1/2,\n 1/2, 1/2, 1/2, 1/2, 1/2, 0/1, 1/1],\n 'PROP_HLTHPLN': [2/3, 0/1, 2/3, 2/3, 2/2, 1/1, 0/1, 2/2,\n 1/2, 1/2, 1/1, 1/1, 1/2, 1/2, 1/2, 1/2,\n 1/1, 1/2, 1/2, 0/1, 0/2, 2/2, 2/2, 0/2,\n 2/2, 2/2, 0/1, 0/1],\n 'PROP_HAS_PERSDOC': [3/3, 0/1, 3/3, 3/3, 2/2, 1/1, 0/1,\n 2/2, 1/2, 1/2, 1/1, 1/1, 2/2, 1/2,\n 2/2, 1/2, 0/1, 1/2, 1/2, 1/1, 0/2,\n 2/2, 2/2, 0/2, 0/2, 0/2, 1/1, 1/1],\n 'PROP_MEDCOST': [2/3, 1/1, 2/3, 2/3, 1/2, 0/1, 1/1, 1/2,\n 1/2, 1/2, 0/1, 0/1, 1/2, 2/2, 1/2, 2/2,\n 1/1, 0/2, 0/2, 1/1, 1/2, 1/2, 1/2, 1/2,\n 2/2, 2/2, 0/1, 0/1],\n 'PROP_ANNUAL_CHECKUP': [1/3, 0/1, 1/3, 1/3, 1/2, 1/1,\n 0/1, 1/2, 1/2, 1/2, 1/1, 0/1,\n 0/2, 2/2, 0/2, 2/2, 1/1, 0/2,\n 0/2, 2/2, 1/2, 1/2, 1/2, 1/2,\n 1/2, 1/2, 1/1, 0/1]}\n\n test_df = pd.DataFrame(test_data)\n expected_df = pd.DataFrame(expected_data)\n actual_df = get_proportions_data(test_df)\n actual_df = actual_df.drop(columns=['TREAT', 'ALL_PROP_GOOD_GENHLTH',\n 'ALL_PROP_PHYS_DISTRESS',\n 'ALL_PROP_MENT_DISTRESS',\n 'ALL_PROP_POOR_OVR_HLTH',\n 'ALL_PROP_HLTHPLN',\n 'ALL_PROP_HAS_PERSDOC',\n 'ALL_PROP_MEDCOST',\n 'ALL_PROP_ANNUAL_CHECKUP'])\n assert_frame_equal(expected_df, actual_df)", "def decimate_years(data, num_years=5):\n df = data.copy()\n min_year = int(df.time_lower.min())\n max_year = int(df.time_lower.max())\n\n year_ids = list(range(min_year, max_year+1, 1))\n collapse_ids = np.repeat(list(range(min_year, max_year+1, num_years)), repeats=num_years)[:len(year_ids)]\n\n collapse_dict_start = {\n year_ids[i]: collapse_ids[i] for i in range(len(year_ids))\n }\n\n group_columns = [x for x in df.columns if x not in ['meas_value', 'meas_std']]\n\n df['time_lower'] = df['time_lower'].astype(int).map(collapse_dict_start) + num_years / 2\n df['time_upper'] = df['time_lower']\n\n group = df.groupby(group_columns).mean()\n group.reset_index(inplace=True)\n\n return group[group_columns + ['meas_value', 'meas_std']]", "def calculate_detail_val(df, id_cols, fix_gbd2016_mistake=True,\n value_column='deaths'):\n\n\n agg_name = 'agg_{}'.format(value_column)\n assert 'exp_val' not in df.columns, \\\n \"Unexpected: exp_val already in columns\"\n assert 'sum_exp_val' not in df.columns, \\\n \"Unexpected: sum_exp_val already in columns\"\n assert agg_name not in df.columns, \\\n \"Unexpected: {} already in columns\".format(agg_name)\n\n df = df.rename(columns={value_column: agg_name})\n df['exp_val'] = df['weight'] * df['population']\n group_cols = list(id_cols)\n group_cols.remove('age_group_id')\n group_cols.remove('sex_id')\n group_cols.append('agg_age_group_id')\n group_cols.append('agg_sex_id')\n df['sum_exp_val'] = df.groupby(group_cols)['exp_val'].transform(sum)\n\n\n if fix_gbd2016_mistake:\n\n\n\n df.loc[df['sum_exp_val'] == 0, 'exp_val'] = 1\n\n df.loc[\n df['sum_exp_val'] == 0,\n 'sum_exp_val'\n ] = df.groupby(group_cols)['exp_val'].transform(sum)\n\n\n df[value_column] = df['exp_val'] * (df[agg_name] / df['sum_exp_val'])\n\n\n if not fix_gbd2016_mistake:\n\n df[value_column] = df[value_column].fillna(0)\n\n return df", "def test_5():\n table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')\n\n result = aspects.group_by(table, ['race/ethnicity'], \n enums.SummaryOperators.PROPORTION_OF_SUM)\n \n result_table = result['table']\n result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'reading score'])\n \n result_suggestions = result['suggestions']\n\n # Sum of proportion column should be(close to) 1.0\n assert(float(format(result_table['reading score'].sum(), '.5f')) == 1)\n\n print(result_table)\n\n expected_result_table = \"\"\" race/ethnicity reading score\n0 group A 0.083216\n1 group B 0.185011\n2 group C 0.318698\n3 group D 0.265263\n4 group E 0.147812\"\"\"\n\n expected_suggestions = \"[]\"\n\n assert(expected_result_table == result_table.to_string())\n assert(str(result_suggestions) == expected_suggestions)", "def test_country_gender_equal(c1, years = [2020]):\n # fetch data\n df = data()\n # filter year\n df = df[df.year.isin(years)]\n # join population\n df = df\\\n .merge(pops, on=['region','sex','age_start','age_end','age'], suffixes=('','_2'))\n df.deaths = df.deaths / df.population\n # filter country data\n df1 = df[(df.region == c1) & (df.sex == 'F')]\n df2 = df[(df.region == c1) & (df.sex == 'M')]\n # country statistics\n n1 = df1.deaths.sum()\n n2 = df2.deaths.sum()\n x1 = (df1.age_end + df1.age_start) / 2\n x2 = (df2.age_end + df2.age_start) / 2\n mu1 = (df1.deaths @ x1) / n1\n mu2 = (df2.deaths @ x2) / n2\n var1 = (df1.deaths @ (x1 - mu1)**2) / n1\n var2 = (df2.deaths @ (x2 - mu2)**2) / n2\n var_pooled = ((n1-1)*var1 + (n2-1)*var2) / (n1+n2-2)\n # f test\n if var1 > var2:\n f_df1,f_df2 = n1-1,n2-1\n fstat = var1 / var2\n else:\n f_df1,f_df2 = n2-1,n1-1\n fstat = var2 / var1\n fpi = 1 - f.cdf(fstat, f_df1, f_df2) # f test pi value\n # t test\n if fpi > .05:\n tstat = (mu1 - mu2) / math.sqrt(var_pooled * (n1+n2)/n1/n2)\n t_df = n1 + n2 - 2\n else:\n tstat = (mu1 - mu2) / math.sqrt(var1 / n1 + var2 / n2)\n t_df = (var1**2/n1 + var2**2/n2)**2 / ((var1/n1)**2/(n1-1) + (var2/n2)**2/(n2-1))\n tpi = 1 - t.cdf(tstat, t_df) # t test pi value\n return {\n 'country': c1,\n 'f_pi': fpi,\n 'f_accept': 'Y' if fpi > .05 else 'N',\n 't_pi': tpi,\n 't_accept': 'Y' if tpi > .05 else 'N'\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
person_deaths as ( SELECT p.zcta_id, p.tract_id, p.person_id, p.geom, p.sex, p.age, p.race, (p.num_deaths_2011 + p.num_deaths_2012 + p.num_deaths_2013 + p.num_deaths_2014 + p.num_deaths_2015) as total_deaths FROM expected_deaths p )
def person_deaths(self, prettyPrint): person_deaths = [] person_deaths.append(", person_deaths as") person_deaths.append("(") person_deaths.append("SELECT p.zcta_id, p.tract_id, p.person_id, p.geom, p.sex, p.age, p.race ") #List comprehension to generate the sql yearsList = ["num_deaths_%s" % (y) for y in self.years ] person_deaths.append( ",( %s ) as total_deaths " % (" + ".join(yearsList)) ) person_deaths.append("FROM expected_deaths p") person_deaths.append(")") return self.print_statements(person_deaths, prettyPrint)
[ "def create_death_rate_data(group_by_df: pd.DataFrame) -> pd.DataFrame:\n group_by_df[\"death_rate\"] = (\n group_by_df[\"total_deaths\"] / group_by_df[\"total_cases\"]\n ) * 100\n\n return group_by_df", "def age_adjusted_death(self, prettyPrint=True):\n \n dplist = []\n dplist.append(\", death_age_pop as\")\n dplist.append(\"(\")\n dplist.append(\"SELECT * FROM crosstab($$ \")\n dplist.append(\"with death_records as\")\n dplist.append(\"(\")\n dplist.append(\"SELECT 1 as place_holder, decd_dth_yr as yr,\")\n dplist.append(\"CASE\")\n \n \n for a in self.ageCategories:\n dplist.append( \"WHEN decd_age_yr >= {} AND decd_age_yr <= {} THEN {} \".format(a['minAge'], a['maxAge'], a['minAge'] ) )\n \n dplist.append(\"END as age_category\")\n dplist.append(\"FROM disparities.decd\")\n dplist.append(\"WHERE decd_age_yr >= {} AND decd_age_yr <= {}\".format(self.youngestAge, self.oldestAge))\n dplist.append(\")\")\n dplist.append(\"SELECT 1 as rec_id, concat(age_category,'_',yr) as category, count(1)\")\n dplist.append(\"FROM death_records\")\n dplist.append(\"GROUP BY yr, age_category\")\n dplist.append(\"ORDER BY yr, age_category\")\n dplist.append(\"$$)\")\n dplist.append(\"AS piv_results ( rec_id int,\")\n \n for t, y in enumerate(self.years):\n for c, a in enumerate(self.ageCategories):\n if c+t == 0:\n dplist.append( \"deaths_{}_{} bigint\".format(a[\"ageGrouping\"], y) )\n else:\n dplist.append( \", deaths_{}_{} bigint\".format(a[\"ageGrouping\"], y) ) \n #Closing Pivot\n dplist.append(\")\")\n #Closing the CTE\n dplist.append(\")\")\n\n return self.print_statements(dplist, prettyPrint)", "def getTotalDeaths(self, terminalOutput):\n death2021 = self.converter_list.responseConversion(\"SELECT CUMULATIVE_SUM(deaths) FROM death_cases \\\n WHERE time > '2021-01-01T00:00:00Z'\", self.client)\n death2021 = int(death2021[len(death2021) - 1]['cumulative_sum'])\n self.resultNumbers.append(death2021)\n if terminalOutput:\n print(f\"\\nThe total number of deaths in 2021 is: {death2021}\")", "def get_age(df):\n df['built'] = df['date'].dt.year - df['built']\n #renaming built to age\n df = df.rename(columns={'built':'age'})\n \n #converting renovation into age of renovation at transaction, \n #with 0 renovation converted to age of property \n for i, yr in df.renovation.items(): \n if yr == 0:\n df.loc[i,'renovation'] = df.loc[i,'age'] \n else: \n df.loc[i,'renovation'] = df.loc[i,'date'].year - df.loc[i, 'renovation']\n \n #renaming renovation to reno_age\n df = df.rename(columns={'renovation':'reno_age'})\n \n return df", "def growth_rate(dataframe):\n dataframe[\"Growth Rate\"] = dataframe.Birthrate - dataframe.Deathrate", "def tripduration_genderbirth(df):\n print('\\nCalculating Trip duration based on gender and birth year\\n')\n start_time = time.time()\n\n #trip duration by gender\n dur_by_gender=df.groupby('Gender')['Trip Duration'].mean()\n dur_by_gender_min=(dur_by_gender/60).round(2) #average trip duration in minutes by gender, to 2 dp\n print(\"Average trip duration by gender:\\n{}\".format(dur_by_gender_min))\n\n #longest trip duration by birth year\n dur_by_birth_max=df.groupby('Birth Year')['Trip Duration'].mean().max()\n dur_idx_max=df.groupby('Birth Year')['Trip Duration'].mean().idxmax()\n dur_by_birth_max_minutes=(dur_by_birth_max/60).round(2)\n #shortest trip by birth year\n dur_by_birth_min=df.groupby('Birth Year')['Trip Duration'].mean().min()\n dur_idx_min=df.groupby('Birth Year')['Trip Duration'].mean().idxmin()\n dur_by_birth_min_minutes=(dur_by_birth_min/60).round(2)\n\n print(\"Average trip duration by birth year:\\nUsers born in {:.0f} spend the most time on a trip, averaging {:.2f} minutes\\nUsers born in {:.0f} spend the least time on a trip, averaging: {:.2f} minutes\".format(dur_idx_max,dur_by_birth_max_minutes,dur_idx_min,dur_by_birth_min_minutes))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def geog_unit_deaths(self, geogUnit, prettyPrint):\n \n geog_agg = []\n geog_agg.append(\", geog_unit_deaths as\")\n geog_agg.append(\"(\")\n geog_agg.append(\"SELECT {}, sum(total_deaths) as total_deaths \".format(geogUnit))\n geog_agg.append(\"FROM person_deaths\")\n geog_agg.append(\"GROUP BY {}\".format(geogUnit))\n geog_agg.append(\")\")\n \n return self.print_statements(geog_agg, prettyPrint)", "def grouped_drugs(some_data):\n some_data1 = some_data.groupby([\"MannerofDeath\", \"Sex\", \"FiscalYear\"]).agg(\n {\"Morphine_NotHeroin\": \"count\", \"Ethanol\": \"count\" })\n some_data1.reset_index(inplace=True)\n #some_data1.drop([6, 7], inplace=True)\n some_data1[\"FiscalYear\"] = some_data1[\"FiscalYear\"].astype(str)\n return some_data1", "def population_stats(df):\n\n return ...", "def total_per_person():\n\n # ENTER YOUR CODE HERE", "def addDurationVariables(df):\n for alt in [0, 1]:\n df[f\"g{alt}r\"] = df.apply(\n lambda x: computeDurations(json.loads(x[\"sequence\"]), alternative=alt),\n axis=1,\n )\n for att in [\"p\", \"m\"]:\n df[f\"g{att}r\"] = df.apply(\n lambda x: computeDurations(json.loads(x[\"sequence\"]), attribute=att), axis=1\n )\n\n # Normalize durations to 1 in each trial\n df[\"g0\"] = df[\"g0r\"] / df[[\"g0r\", \"g1r\"]].sum(axis=1)\n df[\"g1\"] = df[\"g1r\"] / df[[\"g0r\", \"g1r\"]].sum(axis=1)\n df[\"gm\"] = df[\"gmr\"] / df[[\"gmr\", \"gpr\"]].sum(axis=1)\n df[\"gp\"] = df[\"gpr\"] / df[[\"gmr\", \"gpr\"]].sum(axis=1)\n\n return df.drop([\"g0r\", \"g1r\", \"gmr\", \"gpr\"], axis=1)", "def birth_death(self)->None:\n\n\n\t\tdeath_index = self.pick_death()\n\t\t[ xy,xx ] = self.pick_parents()\n\t\tmask = np.random.choice([0,1], size=(20,)).reshape((5,4))\n\n\t\talls_xx = np.copy(self.ALLS[xx])\n\t\tcontribs_xx = np.copy(self.GPMS[xx])\n\n\t\talls_xx [mask[ :1][0]==1] = self.ALLS[xy][mask[ :1][0]==1]\n\t\tcontribs_xx[mask[1: ] ==1] = self.GPMS[xy][mask[1: ] ==1]\n\t\twhile bool(len(self.mutation_plan_alleles['iterns'])) and self.it % GENERATION == self.mutation_plan_alleles['iterns'][0]:\n\t\t\tposn = self.mutation_plan_alleles['posns'][0]\n\n\t\t\talls_xx[posn] += np.random.normal(*PICKING_MEAN_STD)\n\n\t\t\tself.mutation_plan_alleles['iterns'] = self.mutation_plan_alleles['iterns'][1:]\n\t\t\tself.mutation_plan_alleles['posns' ] = self.mutation_plan_alleles['posns' ][1:]\n\n\t\twhile bool(len(self.mutation_plan_contrib['iterns'])) and self.it % GENERATION == self.mutation_plan_contrib['iterns'][0]:\n\t\t\tposn = tuple(self.mutation_plan_contrib['posns'][0])\n\t\t\tcontribs_xx[posn] += np.random.normal(*PICKING_MEAN_STD)\n\t\t\tself.mutation_plan_contrib['iterns'] = self.mutation_plan_contrib['iterns'][1:]\n\t\t\tself.mutation_plan_contrib['posns' ] = self.mutation_plan_contrib['posns' ][1:]\n\t\t\t\n\t\tif self.it % GENERATION == 0:\n\n\t\t\tself.mutation_plan_contrib = make_mutation_plan_contrib(MUTATION_RATE_CONTRIB_CHANGE)\n\t\t\tself.mutation_plan_alleles = make_mutation_plan_alleles(MUTATION_RATE_ALLELE)\n\n\t\tself.PHNS[death_index] = contribs_xx @ alls_xx.T\n\t\tself.ALLS[death_index] = alls_xx\n\t\tself.GPMS[death_index] = contribs_xx", "def sample_adultincome_query():\n return {'age': 22, 'workclass': 'Private', 'education': 'HS-grad', 'marital_status': 'Single', 'occupation': 'Service',\n 'race': 'White', 'gender': 'Female', 'hours_per_week': 45}", "def diabetes_rate():\n\n # Use Pandas to perform the sql query\n diabetes_stmt = db.session.query(Combined).statement\n diabetes_df = pd.read_sql_query(diabetes_stmt, db.session.bind)\n diabetes_df = diabetes_df[\n [\"State\",\n \"State_2\",\n \"Diabetes_Rate_2018\",\n \"Population_2019\",\n \"1st\",\n \"2nd\",\n \"3rd\"]]\n\n diabetes_df[\"Population_with_Diabetes\"] = diabetes_df[\"Population_2019\"] * (diabetes_df[\"Diabetes_Rate_2018\"] / 100)\n\n return diabetes_df.to_json()", "def cal_agg(c, d, t, out_path):\n c = c[c['State'] == 'CA']\n c = c.drop(columns = c.columns[0:4])\n c = c.sum(axis = 0)\n\n d = d[d['State'] == 'CA']\n d = d.drop(columns = d.columns[0:4])\n d = d.sum(axis = 0)\n\n data1 = {'date': pd.to_datetime(c.index),\n 'cases': c,\n 'deaths': d}\n\n df1 = pd.DataFrame(data1)\n\n data2 = {'date': pd.to_datetime(t.date, format='%Y%m%d'),\n 'positive': t.positive,\n 'negative': t.negative,\n 'tested': t.total}\n\n df2 = pd.DataFrame(data2)\n\n df = pd.merge(df1, df2, on='date', how='outer')\n df.to_csv(out_path, index = False)", "def addMoney(candGroup, cand_index, candName, amount):\n # REMEMBER TO FIX LOG SCALE IF APPLYING LOG FOR MONEY\n candGroup = candGroup.copy()\n candGroup.loc[candGroup['CANDIDATE_NAME'] == candName, 'CAND_TOTAL_RAISED'] = amount + candGroup.loc[candGroup['CANDIDATE_NAME'] == candName, 'CAND_TOTAL_RAISED']\n candGroup['FAVORITE'] = candName\n candGroup.loc[candGroup['CANDIDATE_NAME'] == candName, 'FAVORITE'] = candName\n return candGroup", "def death_summary(some_data):\n Drug_names = [\"Heroin\", \"Cocaine\", \"Fentanyl\", \"FentanylAnalogue\", \"Oxycodone\", \"Oxymorphone\", \"Ethanol\",\n \"Hydrocodone\", \"Benzodiazepine\", \"Methadone\", \"Amphet\", \"Tramad\", \"Morphine_NotHeroin\",\n \"Hydromorphone\", \"Other\"]\n for drug in Drug_names:\n print(\"Number of deaths due to: {} is {}\".format(drug, some_data[drug].notnull().sum()))", "def grouper(df):\n print(\"performing groupby and sum\")\n\n df.loc[df['outcome_id'] != 'death2', 'outcome_id'] = 'case'\n\n groups = ['location_id', 'year_start', 'year_end', 'age_group_unit',\n 'age_group_id', 'sex_id', 'source', 'nid',\n 'facility_id', 'representative_id', 'diagnosis_id',\n 'metric_id', 'outcome_id', 'nonfatal_cause_name']\n df = df.groupby(groups).agg({'val': 'sum'}).reset_index()\n\n return df", "def fraction_licks_in_reward_zone(expt_grp):\n rew_intervals = ints.behavior(expt_grp, 'reward')\n licking_intervals = ints.behavior(expt_grp, 'licking')\n\n n_licks = licking_intervals.groupby('trial', as_index=False).agg(len)\n n_licks.rename(columns={'start': 'total_licks'}, inplace=True)\n del n_licks['stop']\n\n licks_in_reward = rew_intervals.filter_events(\n licking_intervals, 'start').groupby('trial', as_index=False).agg(len)\n licks_in_reward.rename(columns={'start': 'licks_in_reward'}, inplace=True)\n del licks_in_reward['stop']\n\n result = pd.merge(licks_in_reward, n_licks, on='trial', how='outer')\n result['licks_in_reward'] = result['licks_in_reward'].fillna(0)\n result['value'] = result['licks_in_reward'] / \\\n result['total_licks'].astype('float')\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
geog_unit_deaths as ( SELECT tract_id, sum(total_deaths) as total_deaths FROM person_deaths GROUP BY tract_id ),
def geog_unit_deaths(self, geogUnit, prettyPrint): geog_agg = [] geog_agg.append(", geog_unit_deaths as") geog_agg.append("(") geog_agg.append("SELECT {}, sum(total_deaths) as total_deaths ".format(geogUnit)) geog_agg.append("FROM person_deaths") geog_agg.append("GROUP BY {}".format(geogUnit)) geog_agg.append(")") return self.print_statements(geog_agg, prettyPrint)
[ "def grouper(df):\n print(\"performing groupby and sum\")\n\n df.loc[df['outcome_id'] != 'death2', 'outcome_id'] = 'case'\n\n groups = ['location_id', 'year_start', 'year_end', 'age_group_unit',\n 'age_group_id', 'sex_id', 'source', 'nid',\n 'facility_id', 'representative_id', 'diagnosis_id',\n 'metric_id', 'outcome_id', 'nonfatal_cause_name']\n df = df.groupby(groups).agg({'val': 'sum'}).reset_index()\n\n return df", "def grouped_drugs(some_data):\n some_data1 = some_data.groupby([\"MannerofDeath\", \"Sex\", \"FiscalYear\"]).agg(\n {\"Morphine_NotHeroin\": \"count\", \"Ethanol\": \"count\" })\n some_data1.reset_index(inplace=True)\n #some_data1.drop([6, 7], inplace=True)\n some_data1[\"FiscalYear\"] = some_data1[\"FiscalYear\"].astype(str)\n return some_data1", "def cal_agg(c, d, t, out_path):\n c = c[c['State'] == 'CA']\n c = c.drop(columns = c.columns[0:4])\n c = c.sum(axis = 0)\n\n d = d[d['State'] == 'CA']\n d = d.drop(columns = d.columns[0:4])\n d = d.sum(axis = 0)\n\n data1 = {'date': pd.to_datetime(c.index),\n 'cases': c,\n 'deaths': d}\n\n df1 = pd.DataFrame(data1)\n\n data2 = {'date': pd.to_datetime(t.date, format='%Y%m%d'),\n 'positive': t.positive,\n 'negative': t.negative,\n 'tested': t.total}\n\n df2 = pd.DataFrame(data2)\n\n df = pd.merge(df1, df2, on='date', how='outer')\n df.to_csv(out_path, index = False)", "def decorate(stu):\n return (sum(stu['credits'].values()) ,stu)", "def population_stats(df):\n\n return ...", "def person_deaths(self, prettyPrint):\n \n person_deaths = []\n person_deaths.append(\", person_deaths as\")\n person_deaths.append(\"(\")\n person_deaths.append(\"SELECT p.zcta_id, p.tract_id, p.person_id, p.geom, p.sex, p.age, p.race \")\n #List comprehension to generate the sql\n yearsList = [\"num_deaths_%s\" % (y) for y in self.years ]\n person_deaths.append( \",( %s ) as total_deaths \" % (\" + \".join(yearsList)) )\n person_deaths.append(\"FROM expected_deaths p\")\n person_deaths.append(\")\")\n \n \n return self.print_statements(person_deaths, prettyPrint)", "def create_death_rate_data(group_by_df: pd.DataFrame) -> pd.DataFrame:\n group_by_df[\"death_rate\"] = (\n group_by_df[\"total_deaths\"] / group_by_df[\"total_cases\"]\n ) * 100\n\n return group_by_df", "def sum_country():\n dic = {}\n data_thai = numpy.array(df_thai.groupby('country').sum()['suicides_no']).tolist()\n dic['th'] = data_thai[0]\n data_country = numpy.array(df_top.groupby('country').sum()['suicides_no']).tolist()\n for i in range(len(data_country)):\n dic[country_no[i]] = data_country[i]\n chart = pygal.maps.world.World()\n chart.title = 'Top 10 countries and Thailand most suicides in 20 years'\n chart.legend_at_bottom = True\n chart.add('Number of Suicides in 20 years', dic)\n chart.render_to_file('Top 10 countries and THA.svg')", "def gqOtherInstTotal():\n name = \"gqOtherInstTotal\"\n groupings = {HHGQ: {\"Other Institutional Facilities Population Total\": [4]}}\n return name, groupings", "def centroid_population_deaths(self, geogUnit, geogTable, geogJoinField, prettyPrint):\n \n geog_centroid_deaths = []\n geog_centroid_deaths.append(\", the_population as\")\n geog_centroid_deaths.append(\"(\")\n geog_centroid_deaths.append(\"SELECT g.{} as geog_id, ST_Centroid(t.geom) as geom, g.total_deaths\".format(geogUnit))\n geog_centroid_deaths.append(\"FROM geog_unit_deaths g\")\n geog_centroid_deaths.append(\"INNER JOIN {} t ON (g.{} = t.{})\".format(geogTable, geogUnit, geogJoinField))\n geog_centroid_deaths.append(\")\")\n \n return self.print_statements(geog_centroid_deaths, prettyPrint)", "def getDeathGermany(self, terminalOutput):\n germanyDeaths = self.converter_list.responseConversion(\"SELECT CUMULATIVE_SUM(deaths) FROM death_cases \\\n WHERE location = 'Germany'\", self.client)\n germanyDeaths = int(germanyDeaths[len(germanyDeaths) - 1]['cumulative_sum'])\n self.resultNumbers.append(germanyDeaths)\n if terminalOutput:\n print(f\"The total number of deaths in Germany is: {germanyDeaths}\")", "def diabetes_rate():\n\n # Use Pandas to perform the sql query\n diabetes_stmt = db.session.query(Combined).statement\n diabetes_df = pd.read_sql_query(diabetes_stmt, db.session.bind)\n diabetes_df = diabetes_df[\n [\"State\",\n \"State_2\",\n \"Diabetes_Rate_2018\",\n \"Population_2019\",\n \"1st\",\n \"2nd\",\n \"3rd\"]]\n\n diabetes_df[\"Population_with_Diabetes\"] = diabetes_df[\"Population_2019\"] * (diabetes_df[\"Diabetes_Rate_2018\"] / 100)\n\n return diabetes_df.to_json()", "def donut_districts(amount=1, unit='d'):\n query = 'sum(increase(location_punctuality[%d%s])) by (district)' % (\n amount, unit)\n result = make_prom_query(query)\n\n dct = {}\n if check_json_result(result):\n data = get_data_json_result(result)\n for l in data:\n dct[l['metric']['district']] = float(l['value'][1])\n else:\n print('Query to Prometheus database went wrong, status error code: %s'\n % (result['status']))\n\n return [{key: val} for (key, val) in\n sorted(dct.items(), key=lambda kv: kv[1], reverse=True)]", "def gqJuvenileTotal():\n name = \"gqJuvenileTotal\"\n groupings = {HHGQ: {\"Juvenile Facilities Population Total\": [2]}}\n return name, groupings", "def total_per_person():\n\n # ENTER YOUR CODE HERE", "def instTotal():\n name = \"instTotal\"\n groupings = {HHGQ: {\"Institutional Population Total\": list(range(1,5))}}\n return name, groupings", "def hhTotal():\n name = \"hhTotal\"\n groupings = {\n HHGQ: {\n \"Population in households\" : [0]\n }\n }\n return name, groupings", "def transform_aggregate_countries(df):\n index_cols = df.columns.tolist()\n index_cols.remove(\"Province/State\")\n index_cols.remove(\"Country/Region\")\n index_cols.remove(\"Lat\")\n index_cols.remove(\"Long\")\n df = df.groupby([\"Country/Region\"])[index_cols].apply(sum)\n df = df.reset_index()\n return df", "def group_df_total(df, feature):\n grouped = df.groupby(feature)['total'].sum()\n return pd.DataFrame({feature: grouped.index, 'sum_total':grouped.values})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
the_population as ( SELECT g.tract_id, ST_Centroid(t.geom) as geom, g.total_deaths FROM geog_unit_deaths g INNER JOIN mn_census_tracts t ON (g.tract_id = t.gid) ),
def centroid_population_deaths(self, geogUnit, geogTable, geogJoinField, prettyPrint): geog_centroid_deaths = [] geog_centroid_deaths.append(", the_population as") geog_centroid_deaths.append("(") geog_centroid_deaths.append("SELECT g.{} as geog_id, ST_Centroid(t.geom) as geom, g.total_deaths".format(geogUnit)) geog_centroid_deaths.append("FROM geog_unit_deaths g") geog_centroid_deaths.append("INNER JOIN {} t ON (g.{} = t.{})".format(geogTable, geogUnit, geogJoinField)) geog_centroid_deaths.append(")") return self.print_statements(geog_centroid_deaths, prettyPrint)
[ "def population_stats(df):\n\n return ...", "def get_census_tract(df):\n globals()['censusgeocode'] = __import__('censusgeocode')\n df = df.copy()\n df['tract_geocode'] = df.apply(_get_geocode_single, axis=1)\n return df", "def get_census_tract_attributes():\n if os.path.isfile(\"data/tracts_covs.feather\") and os.path.isfile(\n \"data/tracts_covs_var_dict.json\"\n ):\n with open(\"data/tracts_covs_var_dict.json\", \"r\") as f:\n var_dict = json.load(f)\n return (pd.read_feather(\"data/tracts_covs.feather\"), var_dict)\n var_dict = {\n \"B01003_001E\": \"population\",\n \"B02001_002E\": \"white_population\",\n \"C24020_001E\": \"employed_population\",\n \"B08131_001E\": \"minutes_commute\",\n \"B09010_002E\": \"supplemental_income\",\n \"B15003_021E\": \"associate\",\n \"B15003_022E\": \"bachelor\",\n \"B15003_023E\": \"master\",\n \"B15003_024E\": \"professional_school\",\n \"B15003_025E\": \"doctoral\",\n \"B16009_002E\": \"poverty\",\n \"B18140_001E\": \"median_earnings\",\n \"B19019_001E\": \"median_household_income\",\n \"B25011_001E\": \"total_housing\",\n \"B25011_026E\": \"renter_occupied\",\n \"B25031_001E\": \"median_gross_rent\",\n \"B27020_002E\": \"native_born\",\n \"B27020_003E\": \"native_born_hc_covered\",\n }\n\n state_codes = \"01 02 04 05 06 08 09 10 11 12 13 15 16 17 18 19 20 21 22 23 24 25 26 \\\n 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 45 46 47 48 49 50 51 53 54 \\\n 55 56\".split()\n\n conn = cenpy.remote.APIConnection(\"ACSDT5Y2017\")\n\n tracts_df = (\n pd.concat(\n [\n conn.query(\n cols=list(var_dict.keys()),\n geo_unit=\"tract:*\",\n geo_filter={\"state\": st},\n )\n for st in tqdm(state_codes)\n ],\n axis=0,\n )\n .astype(float)\n .rename_columns(var_dict)\n .assign(\n **{\n \"pct_white\": lambda x: x.white_population / x.population,\n \"minutes_commute\": lambda x: x.minutes_commute / x.employed_population,\n \"pct_higher_ed\": lambda x: (\n x.associate + x.bachelor + x.professional_school + x.doctoral\n )\n / x.population,\n \"pct_rent\": lambda x: x.renter_occupied / x.total_housing,\n \"pct_native_hc_covered\": lambda x: x.native_born_hc_covered\n / x.native_born,\n \"pct_poverty\": lambda x: x.poverty / x.population,\n \"log_median_earnings\": lambda x: np.log(x.median_earnings),\n \"log_median_household_income\": lambda x: np.log(\n x.median_household_income\n ),\n \"log_median_gross_rent\": lambda x: np.log(x.median_gross_rent),\n \"pct_supplemental_income\": lambda x: x.supplemental_income\n / x.population,\n \"pct_employed\": lambda x: x.employed_population / x.population,\n \"geoid\": lambda x: x.state.astype(int).astype(str).str.zfill(2)\n + x.county.astype(int).astype(str).str.zfill(3)\n + x.tract.astype(int).astype(str).str.zfill(6),\n }\n )\n .drop(\n \"white_population associate bachelor professional_school doctoral \\\n renter_occupied native_born_hc_covered native_born \\\n poverty state county tract\".split(),\n axis=1,\n )\n .reset_index(drop=True)\n )\n\n tracts_df = tracts_df.assign(\n **{\n col: (\n tracts_df[col]\n .where(tracts_df[col].ge(0))\n .replace(to_replace=[np.inf, -np.inf], value=np.nan)\n )\n for col in tracts_df.columns\n if tracts_df[col].dtype == \"float\"\n }\n )\n\n gid = tracts_df.loc[tracts_df[\"population\"] == 0, \"geoid\"].copy()\n nan_idx = (tracts_df[\"population\"] < 100).copy()\n tracts_df.loc[nan_idx, :] = np.nan\n tracts_df.loc[nan_idx, \"geoid\"] = gid\n\n tracts_df.to_feather(\"data/tracts_covs.feather\")\n with open(\"data/tracts_covs_var_dict.json\", \"w\") as f:\n json.dump(var_dict, f)\n\n return tracts_df, var_dict", "def _spatial_prox_profile(data, group_pop_var, total_pop_var, m=1000):\n\n if (str(type(data)) != '<class \\'geopandas.geodataframe.GeoDataFrame\\'>'):\n raise TypeError(\n 'data is not a GeoDataFrame and, therefore, this index cannot be calculated.'\n )\n\n if ('geometry' not in data.columns):\n data['geometry'] = data[data._geometry_column_name]\n data = data.drop([data._geometry_column_name], axis=1)\n data = data.set_geometry('geometry')\n\n if (type(m) is not int):\n raise TypeError('m must be a string.')\n\n if (m < 2):\n raise ValueError('m must be greater than 1.')\n\n if ((type(group_pop_var) is not str) or (type(total_pop_var) is not str)):\n raise TypeError('group_pop_var and total_pop_var must be strings')\n\n if ((group_pop_var not in data.columns)\n or (total_pop_var not in data.columns)):\n raise ValueError(\n 'group_pop_var and total_pop_var must be variables of data')\n\n data = data.rename(columns={\n group_pop_var: 'group_pop_var',\n total_pop_var: 'total_pop_var'\n })\n\n if any(data.total_pop_var < data.group_pop_var):\n raise ValueError(\n 'Group of interest population must equal or lower than the total population of the units.'\n )\n\n # Create the shortest distance path between two pair of units using Shimbel matrix. This step was well discussed in https://github.com/pysal/segregation/issues/5.\n w_libpysal = Queen.from_dataframe(data)\n graph = csr_matrix(w_libpysal.full()[0])\n delta = floyd_warshall(csgraph=graph, directed=False)\n\n def calculate_etat(t):\n g_t_i = np.where(data.group_pop_var / data.total_pop_var >= t, True,\n False)\n k = g_t_i.sum()\n\n # i and j only varies in the units subset within the threshold in eta_t of Hong (2014).\n sub_delta_ij = delta[g_t_i, :][:, g_t_i]\n\n den = sub_delta_ij.sum()\n eta_t = (k**2 - k) / den\n return eta_t\n\n grid = np.linspace(0, 1, m)\n aux = np.array(list(map(calculate_etat, grid)))\n aux[aux == inf] = 0\n aux[aux == -inf] = 0\n curve = np.nan_to_num(aux, 0)\n\n threshold = data.group_pop_var.sum() / data.total_pop_var.sum()\n SPP = ((threshold - ((curve[grid < threshold]).sum() / m -\n (curve[grid >= threshold]).sum() / m)) /\n (1 - threshold))\n\n core_data = data[['group_pop_var', 'total_pop_var', 'geometry']]\n\n return SPP, grid, curve, core_data", "def estimate_pop_per_node():\n path = os.path.join(DATA_INTERMEDIATE, 'elec_distribution.shp')\n elec_sites = gpd.read_file(path)\n\n path = os.path.join(DATA_INTERMEDIATE, 'oa_centroids.shp')\n output_areas = gpd.read_file(path, crs='epsg:27700')\n\n output_centroids = []\n\n for idx, output_area in output_areas.iterrows():\n\n nearest = nearest_points(output_area['geometry'], elec_sites.unary_union)[1]\n\n output_centroids.append({\n 'geometry': output_area['geometry'],\n 'properties': {\n 'origin_elec': str(nearest.coords.xy[0][0]) +\n '_' +\n str(nearest.coords.xy[1][0]),\n 'population': output_area['population'],\n },\n })\n\n output_centroids = gpd.GeoDataFrame.from_features(output_centroids, crs='epsg:27700')\n path_out = os.path.join(DATA_INTERMEDIATE, 'oa_centroids.shp')\n output_centroids.to_file(path_out, crs='epsg:27700')\n\n unique_nodes = set()\n\n for idx, output_area in output_centroids.iterrows():\n unique_nodes.add(output_area['origin_elec'])\n\n pop_per_node = []\n\n for node_id in list(unique_nodes):\n\n population = 0\n\n for idx, output_area in output_centroids.iterrows():\n if output_area['origin_elec'] == node_id:\n population += output_area['population']\n\n pop_per_node.append({\n 'id': node_id,\n 'population': population,\n })\n\n pop_per_node = pd.DataFrame(pop_per_node)\n\n path_out = os.path.join(DATA_INTERMEDIATE, 'pop_by_elec_node.csv')\n pop_per_node.to_csv(path_out, index=False)", "def set_population(feature, distance):\n geo = ee.Geometry.Point([feature.get('longitude'), feature.get('latitude')])\n disk = geo.buffer(ee.Number(distance).multiply(1000))\n count = pop.reduceRegion(reducer='sum', geometry=disk)\n count = ee.Number(count.get('population')).toInt()\n return feature.set({f'population_within_{distance}km': count})", "def Populations():\n age_df = load_data('agerangenotificationeu')[['country', 'population']]\n testing_df = load_data('testing')[['country', 'population']]\n pop = pd.merge(age_df, testing_df, how='outer').groupby(['country']).median()\n \n def getter(country: str) -> float:\n return pop.loc[country, 'population']\n\n return getter", "def grid_people(self,prettyPrint):\n \n grid_people = []\n grid_people.append(\", grid_people as\")\n grid_people.append(\"(\")\n grid_people.append(\"SELECT gid, geom, distance, sum(total_deaths) OVER w as total_deaths\")\n grid_people.append(\"FROM grid_person_join\")\n grid_people.append(\"WINDOW w AS (PARTITION BY gid, geom ORDER BY distance ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW )\")\n grid_people.append(\")\")\n\n return self.print_statements(grid_people, prettyPrint)", "def average_gpa(city, state):\n with sqlite3.connect(DBPATH) as connection:\n SELECTSQL = \"\"\"SELECT gpa \n FROM students \n JOIN campus\n ON students.campus_id=campus.id\n WHERE campus.city=:city AND campus.state=:state;\"\"\"\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n values = {\n \"city\": city,\n \"state\": state\n }\n cursor.execute(SELECTSQL, values)\n list_gpas = cursor.fetchall()\n # search in functools itemgetter\n all_gpas = []\n for gpa in list_gpas:\n all_gpas.append((gpa[0]))\n avg_gpa = sum(all_gpas)/len(all_gpas)\n return avg_gpa", "def projected_vaccine_immune_population(t, current_doses_per_100):\n AUG = np.datetime64('2021-08-01').astype(int) - dates[-1].astype(int)\n SEP = np.datetime64('2021-09-01').astype(int) - dates[-1].astype(int)\n OCT = np.datetime64('2021-10-01').astype(int) - dates[-1].astype(int)\n NOV = np.datetime64('2021-11-01').astype(int) - dates[-1].astype(int)\n\n # My national projections of doses per 100 people per day are:\n # Jul 140k per day = 0.55 %\n # Aug 165k per day = 0.66 %\n # Sep 185k per day = 0.74 %\n # Oct 230k per day = 0.92 %\n # Nov 280k per day = 1.12 %\n\n # NSW currently exceeding national rates by 15%, so let's go with that:\n PRIORITY_FACTOR = 1.15\n\n if ACCELERATED_VAX:\n # What if we give NSW double the supply, or if their rollout is prioritised such\n # that each dose reduces spread twice as much as for an average member of the\n # population?\n PRIORITY_FACTOR *= 2\n\n\n doses_per_100 = np.zeros_like(t)\n doses_per_100[0] = current_doses_per_100\n for i in range(1, len(doses_per_100)):\n if i < AUG:\n doses_per_100[i] = doses_per_100[i - 1] + 0.55 * PRIORITY_FACTOR\n elif i < SEP:\n doses_per_100[i] = doses_per_100[i - 1] + 0.66 * PRIORITY_FACTOR\n elif i < OCT:\n doses_per_100[i] = doses_per_100[i - 1] + 0.74 * PRIORITY_FACTOR\n elif i < NOV:\n doses_per_100[i] = doses_per_100[i - 1] + 0.92 * PRIORITY_FACTOR\n else:\n doses_per_100[i] = doses_per_100[i - 1] + 1.12 * PRIORITY_FACTOR\n\n doses_per_100 = np.clip(doses_per_100, 0, 85 * 2)\n immune = 0.4 * doses_per_100 / 100\n return immune", "def gj_query(table):\n try:\n conn_string=POSTGRESQL\n connection=pg.connect(conn_string)\n cur = connection.cursor()\n except Exception as e :\n print(\"[!] \",e)\n else:\n with connection:\n with cur:\n query = \"\"\"SELECT jsonb_build_object('type','FeatureCollection','features', jsonb_agg(feature)) FROM (SELECT jsonb_build_object('type','Feature','id', buildings_id,'geometry',ST_AsGeoJSON(st_transform(geom, 4326))::jsonb,'properties',to_jsonb(inputs) - 'geom') AS feature FROM (SELECT buildings_id, geom FROM {} where demolished <> 'yes') inputs) features;\"\"\".format(table).replace('\\n',' ')\n\n cur.execute(query)\n geoj = cur.fetchall()\n return geoj\n\n finally:\n connection.close()", "def add_population_to_lsoa_centroid(path):\n pop_data = pd.read_csv(path)\n\n path = os.path.join(DATA_INTERMEDIATE, 'oa_centroids.shp')\n output_areas = gpd.read_file(path, crs='epsg:27700')\n\n output_areas = output_areas.merge(pop_data, left_on='LSOA11CD', right_on='code')\n\n path_out = os.path.join(DATA_INTERMEDIATE, 'oa_centroids.shp')\n output_areas.to_file(path_out, crs='epsg:27700')", "def test_tract_merged(self):\n # Verify that the first dissolved tract no longer exists\n tract1 = self.geographies.find({ 'geoid': '10001040600' })\n self.assertEqual(tract1.count(), 0)\n\n tract2 = self.geographies.find({ 'geoid': '10001040800' })\n self.assertEqual(tract2.count(), 0)\n\n # Compute crosswalked values\n tract1_pop_2000 = 2380 \n tract2_pop_2010 = 2770\n merged_pop_2000 = tract1_pop_2000 + tract2_pop_2010\n merged_pop_2010 = 6131\n merged_pop_delta = merged_pop_2010 - merged_pop_2000\n merged_pop_pct_change = float(merged_pop_delta) / merged_pop_2000\n\n # Verify that the merged tract is correct\n merged_tract = self.geographies.find({ 'geoid': '10001043300' })\n self.assertEqual(merged_tract.count(), 1) \n merged_tract = merged_tract[0]\n\n self.assertEqual(len(merged_tract['xwalk']), 2)\n self.assertEqual(merged_tract['xwalk']['10001040600']['POPPCT00'], 1.0)\n self.assertEqual(merged_tract['xwalk']['10001040800']['POPPCT00'], 1.0)\n\n self.assertEqual(merged_tract['data']['2000']['P1']['P0010001'], merged_pop_2000)\n self.assertEqual(merged_tract['data']['2010']['P1']['P0010001'], merged_pop_2010)\n self.assertEqual(float(merged_tract['data']['delta']['P1']['P0010001']), merged_pop_delta)\n self.assertAlmostEqual(float(merged_tract['data']['pct_change']['P1']['P0010001']), merged_pop_pct_change)\n \n self.assertEqual(merged_tract['xwalk']['10001040600']['HUPCT00'], 1.0)\n self.assertEqual(merged_tract['xwalk']['10001040800']['HUPCT00'], 1.0)", "def get_the_centroid(multiday_storm_object_table, address_of_unique_storms):\n\n centroids = []\n for i in address_of_unique_storms:\n polygons = multiday_storm_object_table.loc[i]\n centroid_pt = polygons.centroid\n centroids.append([centroid_pt.x, centroid_pt.y])\n return numpy.array(centroids)", "def add_pop_to_nodes():\n path = os.path.join(DATA_INTERMEDIATE, 'pop_by_elec_node.csv')\n pop_data = pd.read_csv(path)\n\n path = os.path.join(DATA_INTERMEDIATE, 'elec_distribution.shp')\n elec_nodes = gpd.read_file(path, crs='epsg:27700')\n\n output = []\n\n for idx, item in pop_data.iterrows():\n for idx, elec_node in elec_nodes.iterrows():\n if item['id'] == elec_node['id']:\n output.append({\n 'geometry': elec_node['geometry'],\n 'properties': {\n 'id': elec_node['id'],\n 'population': item['population'],\n },\n })\n\n output = gpd.GeoDataFrame.from_features(output)\n\n path = os.path.join(DATA_INTERMEDIATE, 'elec_distribution.shp')\n output.to_file(path, crs='epsg:27700')", "def geoselect(self, sql, geom_col=\"geom\"):\n if self.debug is True:\n self.info(\"Geoselect - %s\" %sql)\n \n gdf = gpd.read_postgis(sql, self.con, geom_col=geom_col)\n \n if len(gdf) == 0:\n self.warning(\"... Table vide\")\n \n if self.debug is True:\n self.info(\"... Requête executée\")\n \n return gdf", "def agg_monthly_climate(city_df: pd.DataFrame) -> pd.DataFrame:\n agg = city_df.groupby(['year','Latitude','Longitude']).agg({\n 'AverageTemperature': [min, max, collect]\n }).reset_index(drop=False)\n return agg", "def preprocess_features(california_housing_dataframe):\n selected_features = california_housing_dataframe[[\"latitude\",\n \"longitude\",\n \"housing_median_age\",\n \"total_rooms\",\n \"total_bedrooms\",\n \"population\",\n \"households\",\n \"median_income\"]]\n \n processed_features = selected_features.copy()\n processed_features[\"rooms_per_person\"] = (california_housing_dataframe[\"total_rooms\"] / california_housing_dataframe[\"population\"])\n\n return processed_features", "def get_population():\n dir_path = path.dirname(path.realpath(__file__))\n population = pd.read_csv(\n dir_path + '/../data/population-figures-by-country-csv_csv.csv')\n population = population.set_index('Country')\n return population" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
grid_person_join as ( SELECT gid, g.geom, tp.tract_id, ST_Distance(g.geom, ST_Transform(tp.geom,26915)) as distance, tp.total_deaths FROM grid g CROSS JOIN the_population tp ),
def grid_person_cross_join(self, prettyPrint): grid_person = [] grid_person.append(", grid_person_join as") grid_person.append("(") grid_person.append("SELECT gid, g.geom, tp.geog_id, ST_Distance(g.geom, ST_Transform(tp.geom,26915)) as distance, tp.total_deaths") grid_person.append("FROM grid g CROSS JOIN the_population tp") grid_person.append(")") return self.print_statements(grid_person, prettyPrint)
[ "def add_distance_from_galvanize(df):\n \n galvanize_coords = (47.5990148, -122.3338371)\n df['dist_from_galvanize'] = [geopy.distance.distance((df['lats'][i],df['longs'][i]), galvanize_coords).miles\n for i in range(len(df))]", "def grid_people(self,prettyPrint):\n \n grid_people = []\n grid_people.append(\", grid_people as\")\n grid_people.append(\"(\")\n grid_people.append(\"SELECT gid, geom, distance, sum(total_deaths) OVER w as total_deaths\")\n grid_people.append(\"FROM grid_person_join\")\n grid_people.append(\"WINDOW w AS (PARTITION BY gid, geom ORDER BY distance ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW )\")\n grid_people.append(\")\")\n\n return self.print_statements(grid_people, prettyPrint)", "def buildingDist(edge):\n p0 = edge[0]\n p1 = edge[1]\n point0 = Point(p0)\n point1 = Point(p1)\n wkt0 = point0.wkt\n wkt1 = point1.wkt\n cur.execute(\"select bid from buildings \\\n where st_contains(geom, st_geomfromtext('%s',27700))\" % wkt0)\n bid0 = cur.fetchall()[0][0]\n\n cur.execute(\"select bid from buildings \\\n where st_contains(geom, st_geomfromtext('%s',27700))\" % wkt1)\n bid1 = cur.fetchall()[0][0]\n\n cur.execute(\"select st_distance(b1.geom, b2.geom) \\\n from buildings as b1, buildings as b2 \\\n where b1.bid=%d and b2.bid=%d\" % (bid0, bid1))\n return cur.fetchall()[0][0]", "def test_join_is_successful_between_point_and_polygon(extr):\n join_df = extr.gdf_join\n expected_df = gpd.sjoin(gpd.read_file(extr.point), gpd.read_file(extr.tile), op='within')\n num_ = expected_df['HabitatTyp']\n cls_ = expected_df['HabitatT_1']\n assert isinstance(join_df, gpd.GeoDataFrame)\n assert list(join_df.columns) == ['ID', 'Int_cls', 'Int_num', 'Int_subCls', 'Int_subNum', 'QC_cls', 'QC_num', 'QC_subCls', 'QC_subNum', 'QC_By', 'geometry']\n assert list(join_df['Int_cls']) == list(cls_)\n assert list(join_df['Int_num']) == list(num_)", "def join_cells(args):\n\tglobal conn, cur\n\tnewid, cellids = args\n\n\t#add new cell at centroid of the cluster\n\tcur.execute(\"WITH clustered_antennas AS (SELECT ST_Union(eant_pos_original.geom) AS geom FROM eant_pos_original WHERE eant_pos_original.id IN %(cluster)s) \\\n\t\t\t\t INSERT INTO eant_pos (id, lon, lat, geom) \\\n\t\t\t\t SELECT %(id)s AS id, \\\n\t\t\t\t \t\tST_X(ST_Centroid(clustered_antennas.geom)) AS lon, \\\n\t\t\t\t \t\tST_Y(ST_Centroid(clustered_antennas.geom)) AS lat, \\\n\t\t\t\t \t\tST_Centroid(clustered_antennas.geom) AS geom \\\n\t\t\t\t FROM clustered_antennas\", {\"cluster\": tuple(cellids), \"id\": newid})\n\tconn.commit()", "def matching_df_from_geoms(df, geoms): \n geom_col = geometry_column_name(df)\n return pd.DataFrame(geoms, columns=[geom_col])", "def pick_up_distance(df):\n df['pick_up_distance'] = compute_distances(df['driver_latitude'], df[\n 'driver_longitude'], df['pickup_latitude'], df['pickup_longitude'])\n return df", "def nearest(geom, df,sindex): \n matches_idx = sindex.query(geom)\n nearest_geom = min(\n [df.iloc[match_idx] for match_idx in matches_idx],\n key=lambda match: shapely.measurement.distance(match.geometry,geom)\n )\n return nearest_geom", "def join_cdr_grid(cdr, grid):\n \n\n cdr.columns = [\"cellId\", \"time\", \"countryCode\", \"smsIn\", \"smsOut\",\n \"callIn\", \"callOut\", \"internet\"]\n norm_grid = json_normalize(grid['features'])\n\n agg_df = cdr[cdr['countryCode'] != 0].groupby('cellId').agg({\n 'cellId': 'first',\n 'time': 'first',\n 'smsIn': 'sum',\n 'smsOut': 'sum',\n 'callIn': 'sum',\n 'callOut': 'sum',\n 'internet': 'sum'\n })\n\n\n joined_df = pd.merge(left=norm_grid, right=agg_df, how='left',\n left_on='properties.cellId', right_on='cellId')\n \n return joined_df", "def test_link_to_gis():\n df_property = pd.read_csv(filter.RATE_FILE, index_col=0)\n df_gis = filter.link_to_gis(df_property)\n\n assert 'geometry' in df_gis.columns.values\n assert isinstance(df_gis, gpd.GeoDataFrame)", "def add_distances(network): \n\n #Find crs of current df and arbitrary point(lat,lon) for new crs\n current_crs=\"epsg:4326\"\n #The commented out crs does not work in all cases\n #current_crs = [*network.edges.crs.values()]\n #current_crs = str(current_crs[0])\n lat = shapely.get_y(network.nodes['geometry'].iloc[0])\n lon = shapely.get_x(network.nodes['geometry'].iloc[0])\n # formula below based on :https://gis.stackexchange.com/a/190209/80697 \n approximate_crs = \"epsg:\" + str(int(32700-np.round((45+lat)/90,0)*100+np.round((183+lon)/6,0)))\n #from shapely/issues/95\n geometries = network.edges['geometry']\n coords = shapely.get_coordinates(geometries)\n transformer=pyproj.Transformer.from_crs(current_crs, approximate_crs,always_xy=True)\n new_coords = transformer.transform(coords[:, 0], coords[:, 1])\n result = shapely.set_coordinates(geometries.copy(), np.array(new_coords).T)\n dist = shapely.length(result)\n edges = network.edges.copy()\n edges['distance'] = dist\n return Network(\n nodes=network.nodes,\n edges=edges)", "def _spatial_prox_profile(data, group_pop_var, total_pop_var, m=1000):\n\n if (str(type(data)) != '<class \\'geopandas.geodataframe.GeoDataFrame\\'>'):\n raise TypeError(\n 'data is not a GeoDataFrame and, therefore, this index cannot be calculated.'\n )\n\n if ('geometry' not in data.columns):\n data['geometry'] = data[data._geometry_column_name]\n data = data.drop([data._geometry_column_name], axis=1)\n data = data.set_geometry('geometry')\n\n if (type(m) is not int):\n raise TypeError('m must be a string.')\n\n if (m < 2):\n raise ValueError('m must be greater than 1.')\n\n if ((type(group_pop_var) is not str) or (type(total_pop_var) is not str)):\n raise TypeError('group_pop_var and total_pop_var must be strings')\n\n if ((group_pop_var not in data.columns)\n or (total_pop_var not in data.columns)):\n raise ValueError(\n 'group_pop_var and total_pop_var must be variables of data')\n\n data = data.rename(columns={\n group_pop_var: 'group_pop_var',\n total_pop_var: 'total_pop_var'\n })\n\n if any(data.total_pop_var < data.group_pop_var):\n raise ValueError(\n 'Group of interest population must equal or lower than the total population of the units.'\n )\n\n # Create the shortest distance path between two pair of units using Shimbel matrix. This step was well discussed in https://github.com/pysal/segregation/issues/5.\n w_libpysal = Queen.from_dataframe(data)\n graph = csr_matrix(w_libpysal.full()[0])\n delta = floyd_warshall(csgraph=graph, directed=False)\n\n def calculate_etat(t):\n g_t_i = np.where(data.group_pop_var / data.total_pop_var >= t, True,\n False)\n k = g_t_i.sum()\n\n # i and j only varies in the units subset within the threshold in eta_t of Hong (2014).\n sub_delta_ij = delta[g_t_i, :][:, g_t_i]\n\n den = sub_delta_ij.sum()\n eta_t = (k**2 - k) / den\n return eta_t\n\n grid = np.linspace(0, 1, m)\n aux = np.array(list(map(calculate_etat, grid)))\n aux[aux == inf] = 0\n aux[aux == -inf] = 0\n curve = np.nan_to_num(aux, 0)\n\n threshold = data.group_pop_var.sum() / data.total_pop_var.sum()\n SPP = ((threshold - ((curve[grid < threshold]).sum() / m -\n (curve[grid >= threshold]).sum() / m)) /\n (1 - threshold))\n\n core_data = data[['group_pop_var', 'total_pop_var', 'geometry']]\n\n return SPP, grid, curve, core_data", "def d_within(geom, df, distance): \n return _intersects(geom, df, distance)", "def test_forming_propositions_by_distance_in_meters_to_all_buildings_of_Infrastructure():", "def proj_dist(A,B):\n return (A*B).sum()/(B**2).sum()**0.5", "def get_distance(site_coord, rg_coord):\n\n site_id = site_coord['site_id'].values[0]\n site_lat = site_coord['latitude'].values[0]\n site_lon = site_coord['longitude'].values[0]\n\n rg_coord['latitude'] = rg_coord['latitude'].apply(lambda x: float(x))\n rg_coord['longitude'] = rg_coord['longitude'].apply(lambda x: float(x))\n\n rg_coord['dlat'] = rg_coord['latitude'].apply(lambda x: x - site_lat)\n rg_coord['dlon'] = rg_coord['longitude'].apply(lambda x: x - site_lon)\n rg_coord['dlat'] = np.radians(rg_coord.dlat)\n rg_coord['dlon'] = np.radians(rg_coord.dlon)\n\n rg_coord['a1'] = rg_coord['dlat'].apply(lambda x: np.sin(x/2)**2)\n rg_coord['a3'] = rg_coord['latitude'].apply(lambda x: np.cos(np.radians(x)))\n rg_coord['a4'] = rg_coord['dlon'].apply(lambda x: np.sin(x/2)**2)\n \n rg_coord['a'] = rg_coord['a1'] + (np.cos(np.radians(site_lat)) * \\\n rg_coord['a3'] * rg_coord['a4'])\n rg_coord['c']= 2 * np.arctan2(np.sqrt(rg_coord.a),np.sqrt(1-rg_coord.a))\n rg_coord['distance']= 6371 * rg_coord.c\n rg_coord = rg_coord.sort_values('distance', ascending = True)\n \n nearest_rg = rg_coord[0:4]\n nearest_rg['site_id'] = site_id\n nearest_rg = nearest_rg[['site_id', 'rain_id', 'distance']]\n \n return nearest_rg", "def get_pass_df(match_df):\n pass_df = match_df[(match_df['type'] == 'Pass')]\n cols1 = ['x_coord', 'y_coord']\n cols2 = ['end_x_coord', 'end_y_coord']\n pass_df['distance'] = np.linalg.norm(pass_df[cols1].values - pass_df[cols2].values, axis=1)\n\n return pass_df", "def land_use_from_polygons(buildings_gdf, other_source_gdf, column, land_use_field):\n \n buildings_gdf = buildings_gdf.copy()\n buildings_gdf[column] = None\n # spatial index\n sindex = other_source_gdf.sindex\n buildings_gdf[column] = buildings_gdf.apply(lambda row: _assign_land_use_from_polygons(row[\"geometry\"], other_source_gdf, sindex, land_use_field))\n \n return buildings_gdf", "def find_destination(persons, land_uses, sampleN=15):\n # TODO: meta_grid to geogrid\n # ensure the input list of land uses is the definitve list\n # 'type': 'portal', 'geoid': geoid ??\n for p_id, person in enumerate(persons):\n activities_hourly = person['activities']\n activity_objs = [{'t': t*3600+ np.random.randint(3600), 'activity': a} for t, a in enumerate(activities_hourly) if t == 0 or a != activities_hourly[t-1]]\n for a_id, a_object in enumerate(activity_objs):\n t, a = a_object['t'], a_object['activity']\n if a == 'H':\n place = person['home_sim']\n if place['type']=='portal': place['geo_id']=person['home_geoid']\n elif a == 'W':\n place = person['work_sim']\n if place['type']=='portal': place['geo_id']=person['work_geoid']\n else:\n lu_config = activities_to_lu.get(activity_full_name[a], None)\n if len(lu_config) == 1:\n lu_type = list(lu_config)[0]\n else:\n lu_type = np.random.choice(list(lu_config), p=list(lu_config.values()))\n possible_lus = land_uses.get(lu_type, [])\n if ((sampleN) and len(possible_lus)>0):\n possible_lus = np.random.choice(possible_lus, size=min(sampleN, len(possible_lus)), replace=False)\n possible_lus_ll = [geogrid['features'][idx]['properties']['centroid'] for idx in possible_lus]\n if len(possible_lus) > 1:\n if a_id > 0:\n last_place_sim = activity_objs[a_id-1]['place_sim']\n else:\n last_place_sim = person['home_sim'] # in case that \"Home\" is not the first activity, should not happen\n last_place_sim['geo_id'] = person['home_geoid']\n if last_place_sim['type'] == 'geogrid':\n last_node_list = geogrid['features'][last_place_sim['ind']]['properties']['closest_nodes']\n dist = [internal_route_costs(last_node_list, geogrid['features'][this_grid]['properties']['closest_nodes'], \n sim_net_floyd_result, nodes_to_link_attributes)['total_distance'] for this_grid in possible_lus]\n else:\n # too much time to calculate network distance between a outside geoid and in-site metagrid, use straighline for approx.\n # dist = [external_routes(last_place_sim['geo_id'], geogrid['features'][this_grid]['properties']['closest_nodes'],\n # direction='in')[0]['route']['driving']*30/60 for this_grid in possible_lus]\n dist = [get_haversine_distance(all_geoid_centroids[last_place_sim['geo_id']], this_grid_ll) \n for this_grid_ll in possible_lus_ll]\n prob, chosen_idx = huff_model(dist, beta=2, predict_y=True, topN=5, alt_names=possible_lus)\n place = {'type': 'geogrid', 'ind': chosen_idx[0], 'll': geogrid['features'][chosen_idx[0]]['properties']['centroid']}\n elif len(possible_lus) == 1:\n place = {'type': 'geogrid', 'ind': possible_lus[0], 'll': possible_lus_ll[0]}\n elif len(possible_lus) == 0:\n # no available land use in site, randomly find a destination outside\n geo_id = np.random.choice(external_lu[lu_type])\n place = {'type': 'portal', 'geo_id': geo_id}\n if 'ind' in place:\n place['ind'] = int(place['ind']) # np.int32 will cause error for json.dumps()\n activity_objs[a_id]['place_sim'] = place\n person['activity_objs'] = activity_objs\n person['start_times'] = [activity_objs[i]['t'] for i in range(1, len(activity_objs))]\n if len(person['start_times']) > 0:\n person['start_times'].append(person['start_times'][0]) # assuming the person will repeat the schedule the next day" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
grid_people as ( SELECT gid, geom, distance, sum(total_deaths) OVER w as total_deaths FROM grid_person_join WINDOW w AS (PARTITION BY gid, geom ORDER BY distance ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) ),
def grid_people(self,prettyPrint): grid_people = [] grid_people.append(", grid_people as") grid_people.append("(") grid_people.append("SELECT gid, geom, distance, sum(total_deaths) OVER w as total_deaths") grid_people.append("FROM grid_person_join") grid_people.append("WINDOW w AS (PARTITION BY gid, geom ORDER BY distance ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW )") grid_people.append(")") return self.print_statements(grid_people, prettyPrint)
[ "def window_sum(x, lag, win_size, win_geom):\n k = create_kernel(n=win_size, geom=win_geom)\n\n #create convolve function with reduced parameters for map_overlap\n pcon = functools.partial(convolve, weights=k)\n \n if isinstance(x, da.core.Array):\n conv_padding = int(win_size//2)\n res = x.map_overlap(pcon, depth={0: conv_padding, 1: conv_padding})\n else:\n res = pcon(x)\n \n #calculate 1/2N part of variogram\n neighbours = num_neighbours(lag)\n \n num_pix = np.sum(k)\n \n factor = 2 * num_pix * neighbours\n\n return res / factor", "def population_stats(df):\n\n return ...", "def createGravityAdjustmentDf(groups, thread_no):\n # df_g = pd.DataFrame()\n df_g = pd.DataFrame()\n i = 0\n for idx, booking in groups.items():\n i += 1\n if i % 100 == 0:\n print(thread_no, i)\n window_max = slidingWindow(booking)\n g = getG(window_max)\n df_g = df_g.append({\n 'bookingID':booking['bookingID'].values[0],\n 'g_x':g[0],\n 'g_y':g[1],\n 'g_z':g[2]\n }, ignore_index=True)\n \n print(\"finished \", thread_no)\n print(df_g.head())\n df_g.to_csv('g_' + str(thread_no) + '.csv')", "def get_age_grid(df, grdx, grdy, cfg=CFG):\n df = df.sort_values(\"age\", ascending=False) # sort oldest to youngest\n ny, nx = grdy.shape[0], grdx.shape[1]\n grdlat, grdlon = xy2latlon(grdx, grdy, cfg.rad_moon)\n age_grid = np.ones((nx, ny), dtype=cfg.dtype) * cfg.timestart\n for i, row in df.iterrows():\n lon, lat, rad, age, basin = row[[\"lon\", \"lat\", \"rad\", \"age\", \"isbasin\"]]\n grd_dist = gc_dist(lon, lat, grdlon, grdlat)\n ej_thresh = cfg.basin_ej_threshold if basin else cfg.ej_threshold\n ejmask = grd_dist < rad * ej_thresh # Mask ejecta blanket\n age_grid = np.where(ejmask, age, age_grid) # Update age in ejecta\n return age_grid", "def add_window_distance(vcf_df, window_size=10):\n list_pos = vcf_df.POS.to_list() #all positions\n set_pos = set(list_pos) #to set for later comparing\n max_pos = max(vcf_df.POS.to_list()) #max to iter over positions (independent from reference)\n\n all_list = list(range(1, max_pos + 1)) #create a list to slide one by one\n \n df_header = \"window_\" + str(window_size)\n\n vcf_df[df_header] = 1 #Create all 1 by default\n\n #Slide over windows\n for i in range(0,max_pos,1):\n window_pos = all_list[i:i+window_size] #This splits the list in windows of determined length\n set_window_pos = set(window_pos)\n #How many known positions are in every window for later clasification\n num_conglomerate = set_pos & set_window_pos\n \n if len(num_conglomerate) > 1:\n for i in num_conglomerate:\n index = vcf_df.index[vcf_df[\"POS\"] == i][0] #Retrieve index with the known position\n if vcf_df.loc[index,df_header] < len(num_conglomerate):\n vcf_df.loc[index,df_header] = len(num_conglomerate)", "def grouper(df):\n print(\"performing groupby and sum\")\n\n df.loc[df['outcome_id'] != 'death2', 'outcome_id'] = 'case'\n\n groups = ['location_id', 'year_start', 'year_end', 'age_group_unit',\n 'age_group_id', 'sex_id', 'source', 'nid',\n 'facility_id', 'representative_id', 'diagnosis_id',\n 'metric_id', 'outcome_id', 'nonfatal_cause_name']\n df = df.groupby(groups).agg({'val': 'sum'}).reset_index()\n\n return df", "def _spatial_prox_profile(data, group_pop_var, total_pop_var, m=1000):\n\n if (str(type(data)) != '<class \\'geopandas.geodataframe.GeoDataFrame\\'>'):\n raise TypeError(\n 'data is not a GeoDataFrame and, therefore, this index cannot be calculated.'\n )\n\n if ('geometry' not in data.columns):\n data['geometry'] = data[data._geometry_column_name]\n data = data.drop([data._geometry_column_name], axis=1)\n data = data.set_geometry('geometry')\n\n if (type(m) is not int):\n raise TypeError('m must be a string.')\n\n if (m < 2):\n raise ValueError('m must be greater than 1.')\n\n if ((type(group_pop_var) is not str) or (type(total_pop_var) is not str)):\n raise TypeError('group_pop_var and total_pop_var must be strings')\n\n if ((group_pop_var not in data.columns)\n or (total_pop_var not in data.columns)):\n raise ValueError(\n 'group_pop_var and total_pop_var must be variables of data')\n\n data = data.rename(columns={\n group_pop_var: 'group_pop_var',\n total_pop_var: 'total_pop_var'\n })\n\n if any(data.total_pop_var < data.group_pop_var):\n raise ValueError(\n 'Group of interest population must equal or lower than the total population of the units.'\n )\n\n # Create the shortest distance path between two pair of units using Shimbel matrix. This step was well discussed in https://github.com/pysal/segregation/issues/5.\n w_libpysal = Queen.from_dataframe(data)\n graph = csr_matrix(w_libpysal.full()[0])\n delta = floyd_warshall(csgraph=graph, directed=False)\n\n def calculate_etat(t):\n g_t_i = np.where(data.group_pop_var / data.total_pop_var >= t, True,\n False)\n k = g_t_i.sum()\n\n # i and j only varies in the units subset within the threshold in eta_t of Hong (2014).\n sub_delta_ij = delta[g_t_i, :][:, g_t_i]\n\n den = sub_delta_ij.sum()\n eta_t = (k**2 - k) / den\n return eta_t\n\n grid = np.linspace(0, 1, m)\n aux = np.array(list(map(calculate_etat, grid)))\n aux[aux == inf] = 0\n aux[aux == -inf] = 0\n curve = np.nan_to_num(aux, 0)\n\n threshold = data.group_pop_var.sum() / data.total_pop_var.sum()\n SPP = ((threshold - ((curve[grid < threshold]).sum() / m -\n (curve[grid >= threshold]).sum() / m)) /\n (1 - threshold))\n\n core_data = data[['group_pop_var', 'total_pop_var', 'geometry']]\n\n return SPP, grid, curve, core_data", "def grid_glider_data(df, varname, delta_z=.3):\n df.dropna(inplace=True)\n #df.dropna() # Changed to work with ru29 2020 datatset by JG\n df.drop(df[df['depth'] < .1].index, inplace=True) # drop rows where depth is <1\n df.drop(df[df[varname] == 0].index, inplace=True) # drop rows where the variable equals zero\n df.sort_values(by=['time', 'depth'], inplace=True)\n\n # find unique times and coordinates\n timeg, ind = np.unique(df.time.values, return_index=True)\n latg = df['latitude'].values[ind]\n long = df['longitude'].values[ind]\n dg = df['depth'].values\n vg = df[varname].values\n zn = np.int(np.max(np.diff(np.hstack([ind, len(dg)]))))\n\n depthg = np.empty((zn, len(timeg)))\n depthg[:] = np.nan\n varg = np.empty((zn, len(timeg)))\n varg[:] = np.nan\n\n for i, ii in enumerate(ind):\n if i < len(timeg) - 1:\n i_f = ind[i + 1]\n else:\n i_f = len(dg)\n depthi = dg[ind[i]:i_f]\n vari = vg[ind[i]:i_f]\n depthg[0:len(dg[ind[i]:i_f]), i] = depthi\n varg[0:len(vg[ind[i]:i_f]), i] = vari\n\n # sort time variable\n okt = np.argsort(timeg)\n timegg = timeg[okt]\n depthgg = depthg[:, okt]\n vargg = varg[:, okt]\n\n # Grid variables\n depthg_gridded = np.arange(0, np.nanmax(depthgg), delta_z)\n varg_gridded = np.empty((len(depthg_gridded), len(timegg)))\n varg_gridded[:] = np.nan\n\n for t, tt in enumerate(timegg):\n depthu, oku = np.unique(depthgg[:, t], return_index=True)\n varu = vargg[oku, t]\n okdd = np.isfinite(depthu)\n depthf = depthu[okdd]\n varf = varu[okdd]\n ok = np.asarray(np.isfinite(varf))\n if np.sum(ok) < 3:\n varg_gridded[:, t] = np.nan\n else:\n okd = np.logical_and(depthg_gridded >= np.min(depthf[ok]), depthg_gridded < np.max(depthf[ok]))\n varg_gridded[okd, t] = np.interp(depthg_gridded[okd], depthf[ok], varf[ok])\n\n return timegg, long, latg, depthg_gridded, varg_gridded", "def createSpendingsPerGroup(df_full, demographics, time_windows, return_raw=False):\n\n # Merge the demographic data\n demog_cols = [\"age_group\", \"income_group\", \"cohort_group\", \"gender\"]\n demographics_groups = demographics[[\"id\"] + demog_cols]\n demographics_groups = demographics_groups.rename(columns={\"id\": \"person\"})\n demog_spendings = df_full.merge(demographics_groups, on=\"person\", how=\"left\")\n\n # Coalesce spending windows into a spending for the specific duration of the offer received\n # and normalize by the offer duration\n target_col = \"daily_offer_spending\"\n for t in time_windows:\n spend_col = f\"spending_next_{t}h\"\n window_mask = demog_spendings[\"offer_duration\"]==t\n demog_spendings.loc[window_mask, target_col] = demog_spendings.loc[window_mask, spend_col] / (t/24)\n\n # Subset columns\n feat_cols = demog_cols + [\"offer_code\"]\n demog_spendings = demog_spendings[feat_cols + [target_col]]\n\n # Drop rows where the end of the offer exceeded the last observed time\n demog_spendings = demog_spendings.dropna(subset=[target_col])\n\n # Get aggregate spendings by demographic groups and offers (median to filter outliers)\n agg_metrics = {target_col: [\"median\",\"size\"]}\n metric_names = [\"spending_median\", \"size\"]\n spendings_per_groups = demog_spendings.groupby(feat_cols).agg(agg_metrics).reset_index()\n spendings_per_groups.columns = feat_cols + metric_names\n\n # Drop groups with small sample size\n spendings_per_groups = spendings_per_groups[spendings_per_groups[\"size\"] >= 30]\n\n if return_raw:\n return demog_spendings, spendings_per_groups\n else:\n return spendings_per_groups", "def expand_households():\n\n if setting('NO_INTEGERIZATION_EVER', False):\n logger.warning(\"skipping expand_households: NO_INTEGERIZATION_EVER\")\n inject.add_table('expanded_household_ids', pd.DataFrame())\n return\n\n geographies = setting('geographies')\n household_id_col = setting('household_id_col')\n\n low_geography = geographies[-1]\n\n # only one we really need is low_geography\n seed_geography = setting('seed_geography')\n geography_cols = geographies[geographies.index(seed_geography):]\n\n weights = get_weight_table(low_geography, sparse=True)\n weights = weights[geography_cols + [household_id_col, 'integer_weight']]\n\n # - expand weights table by integer_weight, so there is one row per desired hh\n weight_cols = weights.columns.values\n weights_np = np.repeat(weights.values, weights.integer_weight.values, axis=0)\n expanded_weights = pd.DataFrame(data=weights_np, columns=weight_cols)\n\n if setting('GROUP_BY_INCIDENCE_SIGNATURE'):\n\n # get these in a repeatable order so np.random.choice behaves the same regardless of weight table order\n # i.e. which could vary depending on whether we ran single or multi process due to apportioned/coalesce\n expanded_weights = expanded_weights.sort_values(geography_cols + [household_id_col])\n\n # the household_id_col is really the group_id\n expanded_weights.rename(columns={household_id_col: 'group_id'}, inplace=True)\n\n # the original incidence table with one row per hh, with index hh_id\n household_groups = pipeline.get_table('household_groups')\n household_groups = household_groups[[household_id_col, 'group_id', 'sample_weight']]\n\n # for each group, lists of hh_ids and their sample_weights (as relative probabiliities)\n # [ [ [<group_0_hh_id_list>], [<group_0_hh_prob_list>] ],\n # [ [<group_1_hh_id_list>], [<group_1_hh_prob_list>] ], ... ]\n HH_IDS = 0\n HH_PROBS = 1\n grouper = household_groups.groupby('group_id')\n group_hh_probs = [0] * len(grouper)\n for group_id, df in grouper:\n hh_ids = list(df[household_id_col])\n probs = list(df.sample_weight / df.sample_weight.sum())\n group_hh_probs[group_id] = [hh_ids, probs]\n\n # get a repeatable random number sequence generator for consistent choice results\n prng = pipeline.get_rn_generator().get_external_rng('expand_households')\n\n # now make a hh_id choice for each group_id in expanded_weights\n def chooser(group_id):\n hh_ids = group_hh_probs[group_id][HH_IDS]\n hh_probs = group_hh_probs[group_id][HH_PROBS]\n return prng.choice(hh_ids, p=hh_probs)\n expanded_weights[household_id_col] = \\\n expanded_weights.group_id.apply(chooser, convert_dtype=True,)\n\n # FIXME - omit in production?\n del expanded_weights['group_id']\n del expanded_weights['integer_weight']\n\n append = inject.get_step_arg('append', False)\n replace = inject.get_step_arg('replace', False)\n assert not (append and replace), \"can't specify both append and replace for expand_households\"\n\n if append or replace:\n t = inject.get_table('expanded_household_ids').to_frame()\n prev_hhs = len(t.index)\n added_hhs = len(expanded_weights.index)\n\n if replace:\n # FIXME - should really get from crosswalk table?\n low_ids_to_replace = expanded_weights[low_geography].unique()\n t = t[~t[low_geography].isin(low_ids_to_replace)]\n\n expanded_weights = pd.concat([t, expanded_weights], ignore_index=True)\n\n dropped_hhs = prev_hhs - len(t.index)\n final_hhs = len(expanded_weights.index)\n op = 'append' if append else 'replace'\n logger.info(\"expand_households op: %s prev hh count %s dropped %s added %s final %s\" %\n (op, prev_hhs, dropped_hhs, added_hhs, final_hhs))\n\n # sort this so results will be consistent whether single or multiprocessing, GROUP_BY_INCIDENCE_SIGNATURE, etc...\n expanded_weights = expanded_weights.sort_values(geography_cols + [household_id_col])\n\n repop = inject.get_step_arg('repop', default=False)\n inject.add_table('expanded_household_ids', expanded_weights, replace=repop)", "def window_and_sort(df, n):\n print(\"windowing and sorting\")\n window = Window.partitionBy(df[domain_code]).orderBy(df[count_views].desc())\n ranked = df.withColumn(\"rank\", rank().over(window)).filter(col('rank') <= n)\n return ranked", "def add_distance_from_galvanize(df):\n \n galvanize_coords = (47.5990148, -122.3338371)\n df['dist_from_galvanize'] = [geopy.distance.distance((df['lats'][i],df['longs'][i]), galvanize_coords).miles\n for i in range(len(df))]", "def estimate_pop_per_node():\n path = os.path.join(DATA_INTERMEDIATE, 'elec_distribution.shp')\n elec_sites = gpd.read_file(path)\n\n path = os.path.join(DATA_INTERMEDIATE, 'oa_centroids.shp')\n output_areas = gpd.read_file(path, crs='epsg:27700')\n\n output_centroids = []\n\n for idx, output_area in output_areas.iterrows():\n\n nearest = nearest_points(output_area['geometry'], elec_sites.unary_union)[1]\n\n output_centroids.append({\n 'geometry': output_area['geometry'],\n 'properties': {\n 'origin_elec': str(nearest.coords.xy[0][0]) +\n '_' +\n str(nearest.coords.xy[1][0]),\n 'population': output_area['population'],\n },\n })\n\n output_centroids = gpd.GeoDataFrame.from_features(output_centroids, crs='epsg:27700')\n path_out = os.path.join(DATA_INTERMEDIATE, 'oa_centroids.shp')\n output_centroids.to_file(path_out, crs='epsg:27700')\n\n unique_nodes = set()\n\n for idx, output_area in output_centroids.iterrows():\n unique_nodes.add(output_area['origin_elec'])\n\n pop_per_node = []\n\n for node_id in list(unique_nodes):\n\n population = 0\n\n for idx, output_area in output_centroids.iterrows():\n if output_area['origin_elec'] == node_id:\n population += output_area['population']\n\n pop_per_node.append({\n 'id': node_id,\n 'population': population,\n })\n\n pop_per_node = pd.DataFrame(pop_per_node)\n\n path_out = os.path.join(DATA_INTERMEDIATE, 'pop_by_elec_node.csv')\n pop_per_node.to_csv(path_out, index=False)", "def open_window(annotation_dataframe, window_size, use_tss=False):\n window_annotate = annotation_dataframe.copy()\n\n try:\n if len(window_size) == 1:\n w_up, w_down = window_size[0], window_size[0]\n elif len(window_size) == 2:\n w_up, w_down = window_size[0], window_size[1]\n else:\n raise ValueError(\"window_size must have 1 or 2 values only\")\n except TypeError:\n w_up, w_down = window_size, window_size\n\n if use_tss:\n window_annotate.loc[window_annotate[GTF_STRAND] == \"+\", SEQ_START] = window_annotate[SEQ_TSS] - w_up\n window_annotate.loc[window_annotate[GTF_STRAND] == \"+\", SEQ_STOP] = window_annotate[SEQ_TSS] + w_down\n window_annotate.loc[window_annotate[GTF_STRAND] == \"-\", SEQ_START] = window_annotate[SEQ_TSS] - w_down\n window_annotate.loc[window_annotate[GTF_STRAND] == \"-\", SEQ_STOP] = window_annotate[SEQ_TSS] + w_up\n else:\n window_annotate.loc[window_annotate[GTF_STRAND] == \"+\", SEQ_START] = window_annotate[SEQ_START] - w_up\n window_annotate.loc[window_annotate[GTF_STRAND] == \"+\", SEQ_STOP] = window_annotate[SEQ_STOP] + w_down\n window_annotate.loc[window_annotate[GTF_STRAND] == \"-\", SEQ_START] = window_annotate[SEQ_START] - w_down\n window_annotate.loc[window_annotate[GTF_STRAND] == \"-\", SEQ_STOP] = window_annotate[SEQ_STOP] + w_up\n\n window_annotate.loc[window_annotate[SEQ_START] < 0, SEQ_START] = 0\n\n return window_annotate", "def get_features(df):\n \n schema = StructType([\n StructField(\"proj_ver\", VectorUDT(), False),\n StructField(\"proj_hor\", VectorUDT(), False)\n ])\n\n proj_func = udf(proj_for_spark.project_gravity_xyz, schema)\n\n df = df['X', 'Y', 'Z', 'key'].withColumn('proj', proj_func(\"X\", \"Y\", \"Z\"))\n df = df.select('key',\n 'proj.proj_ver', \n 'proj.proj_hor')\n \n df = df['proj_ver','proj_hor', 'key'].withColumn('denoised_ver',\n utils_function_spark.denoise_func(\"proj_ver\")).withColumn('denoised_hor', \n utils_function_spark.denoise_func(\"proj_hor\"))\n df = df.select('key', \"denoised_ver\", \"denoised_hor\") \n \n df = df[\"denoised_ver\", \"denoised_hor\", 'key'].withColumn('rel_features_ver', \n utils_function_spark.toDWT_relative_udf(\"denoised_ver\")).\\\n withColumn('cont_features_ver',\n utils_function_spark.toDWT_cont_udf(\"denoised_ver\"))\n\n df = df[\"rel_features_ver\", \"cont_features_ver\", \"denoised_hor\", 'key'].\\\n withColumn('rel_features_hor', \n utils_function_spark.toDWT_relative_udf(\"denoised_hor\")).\\\n withColumn('cont_features_hor',\n utils_function_spark.toDWT_cont_udf(\"denoised_hor\"))\n\n\n df = df.select('key', 'rel_features_ver', 'cont_features_ver',\n 'rel_features_hor', 'cont_features_hor')\n\n \n return df", "def group_df_by(df_in, timewindow, dfg=None):\n\n if timewindow is None:\n return df_in\n\n col = {\"D\": c.cols.DATE, \"M\": c.cols.MONTH_DATE, \"Y\": c.cols.YEAR}[timewindow]\n\n # Group by date\n df = df_in.copy()[[col, c.cols.AMOUNT]].groupby(col).sum()\n\n if dfg is None:\n return df\n\n # Fill missing rows based on unique values of input data\n return df.reindex(dfg[col].unique(), fill_value=0)", "def get_deltas(df: pd.DataFrame, side: str) -> pd.DataFrame:\n\n result = df.groupby(['co_occurring_joint', 'reference_type'])[[\n 'co_occurring_side', 'reference_side', 'conditional_probability'\n ]].apply(get_delta_co_occurring_joints)\n\n result.name = 'delta'\n\n return result.reset_index()", "def fraction_licks_in_reward_zone(expt_grp):\n rew_intervals = ints.behavior(expt_grp, 'reward')\n licking_intervals = ints.behavior(expt_grp, 'licking')\n\n n_licks = licking_intervals.groupby('trial', as_index=False).agg(len)\n n_licks.rename(columns={'start': 'total_licks'}, inplace=True)\n del n_licks['stop']\n\n licks_in_reward = rew_intervals.filter_events(\n licking_intervals, 'start').groupby('trial', as_index=False).agg(len)\n licks_in_reward.rename(columns={'start': 'licks_in_reward'}, inplace=True)\n del licks_in_reward['stop']\n\n result = pd.merge(licks_in_reward, n_licks, on='trial', how='outer')\n result['licks_in_reward'] = result['licks_in_reward'].fillna(0)\n result['value'] = result['licks_in_reward'] / \\\n result['total_licks'].astype('float')\n\n return result", "def inner(window):\n acc = sum((i*w for i,w in zip(window, weights)))\n return acc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
buffer_definition as ( SELECT gid, geom, min(distance) as min_buffer_distance FROM grid_people WHERE total_deaths >= 50 For having 50 deaths in each buffer GROUP BY gid, geom ),
def buffer_definition(self, populationThreshold, prettyPrint): # should there be an input for total_deaths? buffer_definition = [] buffer_definition.append(", buffer_definition as") buffer_definition.append("(") buffer_definition.append("SELECT gid, geom, min(distance) as min_buffer_distance") buffer_definition.append("FROM grid_people") buffer_definition.append("WHERE total_deaths >= {}".format(populationThreshold)) buffer_definition.append("GROUP BY gid, geom") buffer_definition.append(")") return self.print_statements(buffer_definition, prettyPrint)
[ "def buffer_grid(gdf_admin, radius):\n data = gdf_admin.total_bounds\n box_data = box(*data)\n buffer = box_data.buffer(radius)\n bounds_extent = buffer.bounds\n return bounds_extent", "def get_age_grid(df, grdx, grdy, cfg=CFG):\n df = df.sort_values(\"age\", ascending=False) # sort oldest to youngest\n ny, nx = grdy.shape[0], grdx.shape[1]\n grdlat, grdlon = xy2latlon(grdx, grdy, cfg.rad_moon)\n age_grid = np.ones((nx, ny), dtype=cfg.dtype) * cfg.timestart\n for i, row in df.iterrows():\n lon, lat, rad, age, basin = row[[\"lon\", \"lat\", \"rad\", \"age\", \"isbasin\"]]\n grd_dist = gc_dist(lon, lat, grdlon, grdlat)\n ej_thresh = cfg.basin_ej_threshold if basin else cfg.ej_threshold\n ejmask = grd_dist < rad * ej_thresh # Mask ejecta blanket\n age_grid = np.where(ejmask, age, age_grid) # Update age in ejecta\n return age_grid", "def greedy(self):\n # Set initial values\n house = 1\n sorted_distances = None\n sorted_index = None\n sorted_list = []\n\n # Iterate over every house and the distances to the batteries\n for distances_house in self.distances:\n # Sort distances and remember index\n sorted_distances = sorted(distances_house)\n sorted_index = sorted(range(len(distances_house)), key=lambda k: distances_house[k])\n\n # Add connected battery + house to distance\n index = 0\n for distance in sorted_distances:\n battery = sorted_index[index] + 1\n sorted_list.append([distance, battery, house])\n index += 1\n\n house += 1\n\n # Get sorted list of all distances\n sorted_list = sorted(sorted_list)\n\n # Set total distance Grid to 0 and create empty list with connections of houses to batteries\n connections = []\n total_distance = 0\n attached_houses = []\n\n # Iterate over sorted list\n for i in sorted_list:\n # Check if house already attached to a battery\n house = i[2]\n if house not in attached_houses:\n # Get output house\n max_output = self.houses[house].max_output\n # Check if capacity battery not yet reached\n battery = i[1]\n current_capacity = self.batteries[battery].currentCapacity\n max_capacity = self.batteries[battery].capacity\n if (current_capacity + max_output) <= max_capacity:\n # Add to capacity, total distance and attached houses\n self.batteries[battery].currentCapacity += max_output\n distance = i[0]\n total_distance += distance\n attached_houses.append(house)\n house_to_battery = {'house': house, 'battery': battery,\n 'distance': distance, 'max_output_house': max_output}\n connections.append(house_to_battery)\n\n # Check for missing houses\n missing_houses = [value for value in range(1, 150) if value not in attached_houses]\n\n return [total_distance, connections, missing_houses]", "def from_occupancy_grid(grid: OccupancyGrid) -> LikelihoodField:\n\n def to_pos(ix: int) -> Tuple[int, int]:\n \"\"\"\n Map a 1D occupancy grid index to a 2D index (row, col).\n \"\"\"\n return (ix % grid.info.width, ix // grid.info.width)\n\n cells_indexed = [*enumerate(grid.data)]\n\n occupied_positions = [to_pos(ix) for (ix, c) in cells_indexed if c == cell.OCCUPIED]\n\n # Use a two-dimensional k-d tree to quickly find the occupied cell closest\n # to a given free cell.\n occupied_tree = kdtree.create(point_list=occupied_positions)\n\n def compute_distance(ix: int, c: int) -> float:\n \"\"\"\n Compute the distance from the given cell to the closest occupied cell.\n\n @param `ix`: The 1D index of the given cell.\n\n @param `c`: The value representation of the given cell.\n \"\"\"\n if c == cell.FREE:\n nearest_occupied: Optional[\n Tuple[kdtree.Node, float]\n ] = occupied_tree.search_nn(to_pos(ix), dist=points_dist)\n\n # Contingency for a map with no occupied cells.\n if nearest_occupied is None:\n return DIST_UNKNOWN\n\n (_, distance) = nearest_occupied\n\n return distance\n\n return DIST_OCCUPIED if c == cell.OCCUPIED else DIST_UNKNOWN\n\n field_1d = [compute_distance(*indexed) for indexed in cells_indexed]\n field = np.reshape(field_1d, (grid.info.width, grid.info.height))\n\n return LikelihoodField(\n width=grid.info.width,\n height=grid.info.height,\n resolution=grid.info.resolution,\n origin=v2.from_point(grid.info.origin.position),\n field=field,\n )", "def nearest(geom, df,sindex): \n matches_idx = sindex.query(geom)\n nearest_geom = min(\n [df.iloc[match_idx] for match_idx in matches_idx],\n key=lambda match: shapely.measurement.distance(match.geometry,geom)\n )\n return nearest_geom", "def soundings_in_geom(geom):\n return DepthSounding.objects.filter(geometry__within=geom)", "def get_shortestPath(coords):\n # getting the healthcare vertices. \n source_vertices = get_healthcare()\n #holders \n sources=[]\n shortest_paths=[]\n aggregations = []\n shortest_geom_holder=[]\n # find the closest vertices to coords\n with connection.cursor() as cur:\n cur.execute(f\"select ST_AsText(ST_MakePoint({coords['lng']},{coords['lat']}),4326)\")\n j=''\n closest_node=''\n for i in cur:\n j=i[0]\n cur.execute(f\"select id,source from lines.cleaned2 order by ST_Transform(lines.cleaned2.geom,4326) <-> ST_GeomFromText('{j}',4326) limit 1\")\n for i in cur:\n closest_node=i\n \n #getting the source of vertices only. \n for i in source_vertices:\n sources.append(i[1])\n with connection.cursor() as cur:\n # each time the function runs, empty the previous result, which is a linestring geometry \n cur.execute('delete from lines.dijkstra_results;')\n # this is the main part, dijkstra alogrithm. \n for i in sources:\n looper=[] # this is an array to divide each result comming from dijkstra for each facility \n cur.execute(f\"select pgr_dijkstra('select id,geom,source,target,ST_Length(geom) as cost from lines.cleaned2',{closest_node[1]},{i})\") \n for j in cur:\n looper.append(j)\n if looper!=[]:\n shortest_paths.append(looper)\n for i in shortest_paths:\n k = len(i)-1\n elem = i[k][0] #this is the string which holds the aggregation value, taking the form of a tuple \n aggregations.append(float(elem.split(\",\")[5].strip(')'))) \n shortest = min(aggregations) # this is the aggregation value . \n # now the last step is to get the line geometry from the aggregation value \n # get pgr_dijkstra sequence of nodes that represents the shortest line sequence. \n for i in shortest_paths: # for each array that holds the values \n k = len(i)-1\n elem = i[k][0]\n aggrey_cost=float((elem.split(\",\")[5].split(')'))[0]) \n \n if aggrey_cost==shortest:\n shortest_geom_holder.append(i)\n nodes_holder=[]\n line_geom_parts_holder=[]\n for lines_list in shortest_geom_holder:\n for i in lines_list:\n tupler = eval(i[0])\n id1 = tupler[2]\n id2 = tupler[3]\n nodes_holder.append([id1,id2])\n lent = len(nodes_holder)-1 # getting the length of nodes holder to get the last pair, as the first node of the last pair is the hospital location \n first_id = nodes_holder[0][0]\n last_id = nodes_holder[lent][0]\n cur.execute(f\"select lines.cleaned2.geom from ( select * from pgr_dijkstra('select id,geom,source,target,ST_length(geom) as cost from lines.cleaned2', {first_id},{last_id})) as route left outer join lines.cleaned2 on id= route.edge\")\n for i in cur:\n line_geom_parts_holder.append(i)\n # inserting newly created geometry to the empty table. \n index=0\n returned_values='' # this will be returned to the view. \n collector1=[]\n collector2=[]\n\n for i in line_geom_parts_holder:\n if(i[0]!=None):\n index+=1\n collector1.append(i)\n for i in collector1:\n cur.execute(f\"select ST_AsText('{i[0]}')\")\n for i in cur:\n collector2.append(i[0])\n else:\n pass\n cur.execute(f'select ST_Collect(ARRAY{collector2})')\n for i in cur:\n returned_values=i[0]\n cur.execute(f\"select ST_CollectionExtract(ST_GeomFromText(ST_AsText('{returned_values}')),2)\")\n line=''\n for i in cur:\n last_line=i[0]\n cur.execute(f\"insert into lines.dijkstra_results values({0},ST_SetSRID(ST_GeomFromText(ST_AsText('{last_line}')),32636))\")\n cur.execute(f\"select ST_Transform(ST_SetSRID(ST_GeomFromText(ST_AsText('{last_line}')),32636),4326)\")\n for i in cur:\n adjust_line = i[0]\n connection.commit()\n return adjust_line", "def gj_query(table):\n try:\n conn_string=POSTGRESQL\n connection=pg.connect(conn_string)\n cur = connection.cursor()\n except Exception as e :\n print(\"[!] \",e)\n else:\n with connection:\n with cur:\n query = \"\"\"SELECT jsonb_build_object('type','FeatureCollection','features', jsonb_agg(feature)) FROM (SELECT jsonb_build_object('type','Feature','id', buildings_id,'geometry',ST_AsGeoJSON(st_transform(geom, 4326))::jsonb,'properties',to_jsonb(inputs) - 'geom') AS feature FROM (SELECT buildings_id, geom FROM {} where demolished <> 'yes') inputs) features;\"\"\".format(table).replace('\\n',' ')\n\n cur.execute(query)\n geoj = cur.fetchall()\n return geoj\n\n finally:\n connection.close()", "def spatial_filter_query(self, prettyPrint):\n \n spatial_filter = []\n spatial_filter.append(\"SELECT e.gid, e.geom, e.min_buffer_distance, e.expected_deaths, o.number_of_zctas_used, o.observed_deaths, coalesce(o.observed_deaths,0)/e.expected_deaths as ratio\")\n spatial_filter.append(\"FROM filter_expected e \")\n spatial_filter.append(\"INNER JOIN filter_observed o ON (e.gid = o.gid)\")\n \n return self.print_statements(spatial_filter, prettyPrint)", "def grid_glider_data(df, varname, delta_z=.3):\n df.dropna(inplace=True)\n #df.dropna() # Changed to work with ru29 2020 datatset by JG\n df.drop(df[df['depth'] < .1].index, inplace=True) # drop rows where depth is <1\n df.drop(df[df[varname] == 0].index, inplace=True) # drop rows where the variable equals zero\n df.sort_values(by=['time', 'depth'], inplace=True)\n\n # find unique times and coordinates\n timeg, ind = np.unique(df.time.values, return_index=True)\n latg = df['latitude'].values[ind]\n long = df['longitude'].values[ind]\n dg = df['depth'].values\n vg = df[varname].values\n zn = np.int(np.max(np.diff(np.hstack([ind, len(dg)]))))\n\n depthg = np.empty((zn, len(timeg)))\n depthg[:] = np.nan\n varg = np.empty((zn, len(timeg)))\n varg[:] = np.nan\n\n for i, ii in enumerate(ind):\n if i < len(timeg) - 1:\n i_f = ind[i + 1]\n else:\n i_f = len(dg)\n depthi = dg[ind[i]:i_f]\n vari = vg[ind[i]:i_f]\n depthg[0:len(dg[ind[i]:i_f]), i] = depthi\n varg[0:len(vg[ind[i]:i_f]), i] = vari\n\n # sort time variable\n okt = np.argsort(timeg)\n timegg = timeg[okt]\n depthgg = depthg[:, okt]\n vargg = varg[:, okt]\n\n # Grid variables\n depthg_gridded = np.arange(0, np.nanmax(depthgg), delta_z)\n varg_gridded = np.empty((len(depthg_gridded), len(timegg)))\n varg_gridded[:] = np.nan\n\n for t, tt in enumerate(timegg):\n depthu, oku = np.unique(depthgg[:, t], return_index=True)\n varu = vargg[oku, t]\n okdd = np.isfinite(depthu)\n depthf = depthu[okdd]\n varf = varu[okdd]\n ok = np.asarray(np.isfinite(varf))\n if np.sum(ok) < 3:\n varg_gridded[:, t] = np.nan\n else:\n okd = np.logical_and(depthg_gridded >= np.min(depthf[ok]), depthg_gridded < np.max(depthf[ok]))\n varg_gridded[okd, t] = np.interp(depthg_gridded[okd], depthf[ok], varf[ok])\n\n return timegg, long, latg, depthg_gridded, varg_gridded", "def make_buffered_fishnet(xmin, ymin, xmax, ymax, crs, spacing=1000,\n overlap=50):\n xx, yy = np.meshgrid(\n np.arange(xmin, xmax + spacing, spacing),\n np.arange(ymin, ymax + spacing, spacing))\n\n xx_leftbuff = xx[:, :-1] - overlap\n xx_rightbuff = xx[:, 1:] + overlap\n yy_downbuff = yy[:-1, :] - overlap\n yy_upbuff = yy[1:, :] + overlap\n\n ll = np.stack((\n xx_leftbuff[1:, :].ravel(), # skip top row\n yy_downbuff[:, :-1].ravel())).T # skip right-most column\n\n ul = np.stack((\n xx_leftbuff[:-1, :].ravel(), # skip bottom row\n yy_upbuff[:, :-1].ravel())).T # skip right-most column\n\n ur = np.stack((\n xx_rightbuff[:-1, :].ravel(), # skip bottom row\n yy_upbuff[:, 1:].ravel())).T # skip left-most column\n\n lr = np.stack((\n xx_rightbuff[1:, :].ravel(), # skip top row\n yy_downbuff[:, 1:].ravel())).T # skip left-most column\n\n buff_fishnet = np.stack([ll, ul, ur, lr])\n\n polys = [\n Polygon(buff_fishnet[:, i, :]) for i in range(buff_fishnet.shape[1])\n ]\n ll_names = [x for x in (ll).astype(int).astype(str)]\n tile_ids = [\n '_'.join(tile) + '_{}'.format(str(spacing)) for tile in ll_names\n ]\n\n buff_fishnet_gdf = gpd.GeoDataFrame(geometry=polys, crs=crs)\n buff_fishnet_gdf['tile_id'] = tile_ids\n\n return buff_fishnet_gdf.set_index('tile_id')", "def create_distance_field(entity_list):\n # create grid using grid width and height\n grid = [[1000 for x in range(GRID_WIDTH)] for y in range(GRID_HEIGHT)]\n # iterate through grid positions:\n for x in range(GRID_HEIGHT):\n print('x=', x)\n for y in range(GRID_WIDTH):\n print('y=', y)\n # compare distances to each entity\n for ent in entity_list:\n dist = manhattan_distance(x, y, ent[0], ent[1]) #abs(x - ent[0]) + abs(y - ent[1])\n print(dist)\n if dist < grid[x][y]:\n grid[x][y] = dist\n # mark position as smallest number from entity\n return grid", "def get_near_groups(lat, lon, num):\n with db_conn.cursor() as cur:\n sql = '''SELECT CITY, LAT, LON FROM (\n SELECT DISTINCT CITY, LAT, LON, (X+Y) AS Z FROM (\n SELECT CITY, LAT, LON, ABS(LAT - %s) AS X, ABS(LON - %s) AS Y FROM cities ORDER BY X ASC) AS PIZZA \n GROUP BY CITY \n ORDER BY Z \n LIMIT %s \n ) AS CICCIO'''\n cur.execute(sql, (lat, lon, num))\n query_result = cur.fetchall() # a list of dicts\n\n # update the returned dict with distance information\n for i in query_result:\n i.update({'DIST': round(distance.distance((lat, lon), (i['LAT'], i['LON'])).km, 2)}) # geopy geodesic distance measure\n\n return sorted(query_result, key=lambda x: x['DIST'])", "def get_bob_query(m,rcut=3.6):\n na = len(m)\n zs = m.numbers\n esa = zs #-1. * zs**2.4\n zs1 = np.unique(zs)\n nz1 = len(zs1)\n izs = np.arange(nz1)\n boa = np.zeros((na,nz1))\n for i in range(na):\n boa[i,izs[zs[i]==zs1][0]] = esa[i]\n\n zpairs = [ (zi,zi) for zi in zs1 ] + list( itl.combinations(zs1,2) )\n dic0 = {}\n for zpair in zpairs: dic0[zpair] = []\n ds = ssd.squareform(ssd.pdist(m.positions))\n\n dics = []; ns = []\n for i in range(na):\n dic = dic0.copy()\n for j in range(na):\n if i==j or ds[i,j]>rcut: continue\n zi,zj = zs[i],zs[j]\n pair = (min(zi,zj), max(zj,zi))\n if pair in dic.keys():\n dic[pair] +=[ ds[i,j] ]# [zi*zj/ds[i,j]]\n _ns = []\n for zpair in zpairs:\n _bob = dic[zpair]\n _l = len(_bob)\n if _l > 0: _bob.sort(reverse=True)\n _ns.append( _l )\n ns.append(_ns); dics.append(dic)\n nbs = np.max(ns, axis=0) #; print 'nbs=',nbs\n l = sum(nbs)\n idx2 = np.cumsum(nbs)\n idx1 = np.array([0]+list(idx2[:-1]),dtype=int)\n bob = np.zeros((na,l))\n for i in range(na):\n dic = dics[i]\n for j,zpair in enumerate(zpairs):\n _bob = dic[zpair]\n lenj = len(_bob)\n ib = idx1[j]; ie = ib+lenj\n bob[i, ib:ie] = _bob\n bob = np.concatenate((boa,bob), axis=1)\n return zs1, zpairs,idx1,idx2,bob", "def calc_geom(work_array, percent_complete, result_queue):\n\n results_map = {}\n\n def get_line_index(line, position, index):\n if index[0] == index[1]:\n return index[0]\n\n if index[1] - index[0] == 1:\n p = index[0] if Point(line.coords[index[0]]).distance(position) < Point(line.coords[index[1]]).distance(position) else index[1]\n return p\n\n pivot_point = math.floor((index[1] - index[0])/2) + index[0]\n i1 = (index[0], pivot_point)\n i2 = (pivot_point+1, index[1])\n\n if i1[0] != i1[1] and LineString(line.coords[i1[0]:i1[1]+1]).intersects(position):\n return get_line_index(line, position, i1)\n else:\n return get_line_index(line, position, i2)\n\n processed = 0\n for w in work_array:\n try:\n wid, position, edge_id, destination, geom = [w[\"id\"], w[\"p1\"], w[\"road\"], w[\"p2\"], w[\"geom\"]]\n\n for i in range(0, len(geom)):\n line = geom[i]\n if line.intersects(position) and line.intersects(destination):\n op = get_line_index(line, position, (0, len(line.coords)-1))\n dp = get_line_index(line, destination, (0, len(line.coords)-1))\n\n begin, end = (op, dp) if op < dp else (dp, op)\n\n if begin == end:\n results_map[wid] = None\n break\n\n results_map[wid] = LineString(line.coords[begin:end+1])\n processed += 1\n percent_complete.value = processed / len(work_array) * 100.0\n\n except Exception as e:\n print(\"There was a problem \", e)\n continue\n\n result_queue.put(results_map)\n percent_complete.value = 100\n return None", "def grid_on_point(self, x, y):\n qry = \"\"\"\n SELECT g.fid\n FROM grid AS g\n WHERE g.ROWID IN (\n SELECT id FROM rtree_grid_geom\n WHERE\n {0} <= maxx AND\n {0} >= minx AND\n {1} <= maxy AND\n {1} >= miny)\n AND\n ST_Intersects(GeomFromGPB(g.geom), ST_GeomFromText('POINT({0} {1})'));\n \"\"\"\n qry = qry.format(x, y)\n data = self.execute(qry).fetchone()\n if data is not None:\n gid = data[0]\n else:\n gid = None\n return gid", "def query5():\n docs = db.airbnb.aggregate([\n {\n '$geoNear': {\n 'near': {'type': 'Point', 'coordinates': [-73.9654, 40.7829]},\n 'distanceField': 'dist.calculated',\n 'maxDistance': 1000,\n 'includeLocs': 'dist.location',\n 'spherical': False\n }\n },\n {\n '$project': {\n '_id': 0,\n 'dist': 1,\n 'location': 1,\n 'name': 1,\n 'neighbourhood': 1,\n 'neighbourhood_group': 1,\n 'price': 1,\n 'room_type': 1\n }\n }\n ])\n result = [doc for doc in docs]\n return result", "def collect_gaze(df, collect_init_look=True, threshold=134):\r\n\r\n min_sample_nr = int(threshold / ST) # 8 samples at 134 threshold\r\n\r\n gaze_list = [] # list of hit lists\r\n\r\n hits = [] # first element is timestamp of gaze start, rest are look tags.\r\n for i in df.index:\r\n\r\n look = df.at[i, \"aoi\"]\r\n\r\n if look != \"OUT\":\r\n\r\n if not hits: # empty hits list\r\n latency = df.at[i, \"TimeStamp\"] #- start\r\n hits.append(latency)\r\n hits.append(look)\r\n\r\n elif look in hits:\r\n hits.append(look)\r\n # if last row:\r\n if (i == df.index[-1]) and (len(hits) > min_sample_nr): # time is also in the hits\r\n gaze_list.append(hits[:])\r\n\r\n else: # new aoi tag\r\n if len(hits) >= min_sample_nr:\r\n gaze_list.append(hits[:])\r\n\r\n hits[:] = []\r\n latency = df.at[i, \"TimeStamp\"] #- start\r\n hits.append(latency)\r\n hits.append(look)\r\n\r\n else:\r\n if len(hits) > min_sample_nr:\r\n gaze_list.append(hits[:])\r\n hits[:] = []\r\n\r\n # -> tag_time_dur_dict: {rank:[tag, time, duration (lenght)]}\r\n tag_time_dur_dict = {i+1:[gaze[-1], gaze[0], len(gaze[1:])] for i,gaze in enumerate(gaze_list)}\r\n\r\n gaze_coll = GazeCollection(tag_time_dur_dict)\r\n\r\n return gaze_coll", "def get_gbank_hits(genbank_recs,molecule_id):\n\n molecule_id_parts = molecule_id.split('|')\n\n # probably not needed but I'm going to match on the molecule id as well\n # just in case.\n molecule_id_parts.append(molecule_id)\n\n for mip in molecule_id_parts:\n\n if '.' in mip:\n try:\n molecule_id_parts.append( mip.split('.')[0] )\n except Exception:\n #print \"Error getting '.' prefix from locus tag %s in molecule id %s\" % (mip,molcule_id) \n\n gbhits = [g for g in genbank_recs if g.name in molecule_id_parts or g.id in molecule_id_parts]\n\n return gbhits" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
filter_expected as ( SELECT b.gid, b.geom, b.min_buffer_distance, sum(gpj.total_deaths) as expected_deaths FROM grid_person_join gpj INNER JOIN buffer_definition b ON gpj.gid = b.gid WHERE gpj.distance <= b.min_buffer_distance GROUP BY b.gid, b.geom, b.min_buffer_distance ),
def filter_expected(self, prettyPrint): filter_calc = [] filter_calc.append(", filter_expected as") filter_calc.append("(") filter_calc.append("SELECT b.gid, b.geom, b.min_buffer_distance, sum(gpj.total_deaths) as expected_deaths") filter_calc.append("FROM grid_person_join gpj") filter_calc.append("INNER JOIN buffer_definition b ON gpj.gid = b.gid") filter_calc.append("WHERE gpj.distance <= b.min_buffer_distance") filter_calc.append("GROUP BY b.gid, b.geom, b.min_buffer_distance") filter_calc.append(")") return self.print_statements(filter_calc, prettyPrint)
[ "def spatial_filter_query(self, prettyPrint):\n \n spatial_filter = []\n spatial_filter.append(\"SELECT e.gid, e.geom, e.min_buffer_distance, e.expected_deaths, o.number_of_zctas_used, o.observed_deaths, coalesce(o.observed_deaths,0)/e.expected_deaths as ratio\")\n spatial_filter.append(\"FROM filter_expected e \")\n spatial_filter.append(\"INNER JOIN filter_observed o ON (e.gid = o.gid)\")\n \n return self.print_statements(spatial_filter, prettyPrint)", "def buffer_definition(self, populationThreshold, prettyPrint): # should there be an input for total_deaths?\n \n buffer_definition = []\n buffer_definition.append(\", buffer_definition as\")\n buffer_definition.append(\"(\")\n buffer_definition.append(\"SELECT gid, geom, min(distance) as min_buffer_distance\")\n buffer_definition.append(\"FROM grid_people\")\n buffer_definition.append(\"WHERE total_deaths >= {}\".format(populationThreshold))\n buffer_definition.append(\"GROUP BY gid, geom\")\n buffer_definition.append(\")\")\n \n return self.print_statements(buffer_definition, prettyPrint)", "def filter_observed(self, prettyPrint):\n\n filter_obs = []\n filter_obs.append(\", filter_observed as\")\n filter_obs.append(\"(\")\n filter_obs.append(\"SELECT b.gid, count(o.observed_deaths) as number_of_zctas_used, sum(o.observed_deaths) as observed_deaths\")\n filter_obs.append(\"FROM buffer_definition b\")\n #### This seems wierd we need an index on this with geom 26915 on the centroids?\n filter_obs.append(\"LEFT JOIN observed o on ST_DWithin( b.geom, ST_Transform(ST_Centroid(o.geom), 26915), b.min_buffer_distance)\")\n filter_obs.append(\"GROUP BY b.gid, b.geom\")\n filter_obs.append(\")\")\n \n return self.print_statements(filter_obs, prettyPrint)", "def get_tupleBreakpoints_for_filters_GRIDSS(df_gridss, filters_dict, reference_genome, return_timing=False):\n\n # initialize time\n start_time = time.time()\n\n # debug the fact that there is no min_af_EitherSmallOrLargeEvent\n if \"min_af_EitherSmallOrLargeEvent\" not in filters_dict: filters_dict[\"min_af_EitherSmallOrLargeEvent\"] = 0.0\n\n # get the filtered df\n df_filt = get_gridssDF_filtered(df_gridss, reference_genome, min_Nfragments=filters_dict[\"min_Nfragments\"], min_af=filters_dict[\"min_af\"], wrong_INFOtags=filters_dict[\"wrong_INFOtags\"], wrong_FILTERtags=filters_dict[\"wrong_FILTERtags\"], filter_polyGC=filters_dict[\"filter_polyGC\"], filter_noSplitReads=filters_dict[\"filter_noSplitReads\"], filter_noReadPairs=filters_dict[\"filter_noReadPairs\"], maximum_strand_bias=filters_dict[\"maximum_strand_bias\"], maximum_microhomology=filters_dict[\"maximum_microhomology\"], maximum_lenght_inexactHomology=filters_dict[\"maximum_lenght_inexactHomology\"], range_filt_DEL_breakpoints=filters_dict[\"range_filt_DEL_breakpoints\"], min_length_inversions=filters_dict[\"min_length_inversions\"], dif_between_insert_and_del=filters_dict[\"dif_between_insert_and_del\"], max_to_be_considered_small_event=filters_dict[\"max_to_be_considered_small_event\"], min_size=filters_dict[\"min_size\"], add_columns=False, min_af_EitherSmallOrLargeEvent=filters_dict[\"min_af_EitherSmallOrLargeEvent\"], min_QUAL=filters_dict[\"min_QUAL\"], filter_overlappingRepeats=filters_dict[\"filter_overlappingRepeats\"] )\n\n # get the breakpoints that have both breakends\n correct_breakpoints = tuple(sorted([bp for bp, N in Counter(df_filt.breakpointID).items() if N==2]))\n\n if return_timing: return (time.time() - start_time)\n else: return correct_breakpoints", "def get_gridssDF_filtered_from_filtersDict(df_gridss, filters_dict, reference_genome):\n\n # debug the fact that there is no min_af_EitherSmallOrLargeEvent\n if \"min_af_EitherSmallOrLargeEvent\" not in filters_dict: filters_dict[\"min_af_EitherSmallOrLargeEvent\"] = 0.0\n\n # get the filtered df\n df_filt = get_gridssDF_filtered(df_gridss, reference_genome, min_Nfragments=filters_dict[\"min_Nfragments\"], min_af=filters_dict[\"min_af\"], wrong_INFOtags=filters_dict[\"wrong_INFOtags\"], wrong_FILTERtags=filters_dict[\"wrong_FILTERtags\"], filter_polyGC=filters_dict[\"filter_polyGC\"], filter_noSplitReads=filters_dict[\"filter_noSplitReads\"], filter_noReadPairs=filters_dict[\"filter_noReadPairs\"], maximum_strand_bias=filters_dict[\"maximum_strand_bias\"], maximum_microhomology=filters_dict[\"maximum_microhomology\"], maximum_lenght_inexactHomology=filters_dict[\"maximum_lenght_inexactHomology\"], range_filt_DEL_breakpoints=filters_dict[\"range_filt_DEL_breakpoints\"], min_length_inversions=filters_dict[\"min_length_inversions\"], dif_between_insert_and_del=filters_dict[\"dif_between_insert_and_del\"], max_to_be_considered_small_event=filters_dict[\"max_to_be_considered_small_event\"], min_size=filters_dict[\"min_size\"], add_columns=False, min_af_EitherSmallOrLargeEvent=filters_dict[\"min_af_EitherSmallOrLargeEvent\"], min_QUAL=filters_dict[\"min_QUAL\"], filter_overlappingRepeats=filters_dict[\"filter_overlappingRepeats\"] )\n\n return df_filt", "def get_age_grid(df, grdx, grdy, cfg=CFG):\n df = df.sort_values(\"age\", ascending=False) # sort oldest to youngest\n ny, nx = grdy.shape[0], grdx.shape[1]\n grdlat, grdlon = xy2latlon(grdx, grdy, cfg.rad_moon)\n age_grid = np.ones((nx, ny), dtype=cfg.dtype) * cfg.timestart\n for i, row in df.iterrows():\n lon, lat, rad, age, basin = row[[\"lon\", \"lat\", \"rad\", \"age\", \"isbasin\"]]\n grd_dist = gc_dist(lon, lat, grdlon, grdlat)\n ej_thresh = cfg.basin_ej_threshold if basin else cfg.ej_threshold\n ejmask = grd_dist < rad * ej_thresh # Mask ejecta blanket\n age_grid = np.where(ejmask, age, age_grid) # Update age in ejecta\n return age_grid", "def test_0_4_greater():\n # get data\n x = data()\n # filter poland\n age_groups = ['0_4','5_9','10_14','15_19']\n x = x[x.region.isin(['CZ','PL','SE','IT']) &\n (x.age.isin(age_groups)) &\n (x.year >= 2014) & (x.year < 2021) &\n (x.week < 54)]\\\n .reset_index(drop = True)\n # aggregate\n x = x\\\n .groupby(['year','week','region','age'])\\\n .aggregate({'deaths': 'sum'})\\\n .reset_index(drop = False)\n x['year'] = x.year.apply(int)\n # get population\n POP = population._get_populations()\n POP = POP[(POP.age.isin(age_groups)) & (POP.sex == 'T') &\n POP['geo\\\\time'].isin(['CZ','PL','SE','IT'])]\\\n .rename({'geo\\\\time': 'region'}, axis=1)\n POP = pd.melt(POP.drop(['sex'], axis=1),\n id_vars=['region','age'], var_name='year', value_name='population')\n POP['year'] = POP.year.apply(int)\n x = x.merge(POP, how='left', on=['year','region','age'])\n x['deaths100K'] = x.deaths / x.population * 1e5\n x = x[x.year == 2020]\n # filter\n def filter_region(x, reg):\n return x[x.region.apply(lambda r: r[:2] == reg)]\\\n .reset_index(drop=True)\n x_pl = filter_region(x,'PL')\n x_it = filter_region(x,'IT')\n x_se = filter_region(x,'SE')\n x_cz = filter_region(x,'CZ')\n # test\n t_pl_cz = ttest_ind(x_pl.deaths100K[x_pl.age == '0_4'],\n x_cz.deaths100K[x_cz.age == '0_4'], alternative='less')\n t_pl_it = ttest_ind(x_pl.deaths100K[x_pl.age == '0_4'],\n x_it.deaths100K[x_it.age == '0_4'], alternative='less')\n t_pl_se = ttest_ind(x_pl.deaths100K[x_pl.age == '0_4'],\n x_se.deaths100K[x_se.age == '0_4'], alternative='less')\n t_cz_it = ttest_ind(x_cz.deaths100K[x_cz.age == '0_4'],\n x_it.deaths100K[x_it.age == '0_4'], alternative='two-sided')\n t_cz_se = ttest_ind(x_cz.deaths100K[x_cz.age == '0_4'],\n x_se.deaths100K[x_se.age == '0_4'], alternative='two-sided')\n t_it_se = ttest_ind(x_it.deaths100K[x_it.age == '0_4'],\n x_se.deaths100K[x_se.age == '0_4'], alternative='two-sided')\n def decide(pvalue, thres = .05):\n return 'Y' if pvalue > thres else 'N'\n return [\n {\n 'country1': 'PL', 'country2': 'CZ',\n 't_pi': t_pl_cz.pvalue,\n 't_accept': decide(t_pl_cz.pvalue)\n },\n {\n 'country1': 'PL', 'country2': 'IT',\n 't_pi': t_pl_it.pvalue,\n 't_accept': decide(t_pl_it.pvalue)\n },\n {\n 'country1': 'PL', 'country2': 'SE',\n 't_pi': t_pl_se.pvalue,\n 't_accept': decide(t_pl_se.pvalue) \n },\n {\n 'country1': 'CZ', 'country2': 'IT',\n 't_pi': t_cz_it.pvalue,\n 't_accept': decide(t_cz_it.pvalue, .025)\n },\n {\n 'country1': 'CZ', 'country2': 'SE',\n 't_pi': t_cz_se.pvalue,\n 't_accept': decide(t_cz_se.pvalue, .025)\n },\n {\n 'country1': 'IT', 'country2': 'SE',\n 't_pi': t_it_se.pvalue,\n 't_accept': decide(t_it_se.pvalue, .025)\n }\n ]", "def transform_aggregated(data):\n test_gr_df = data.copy()\n test_gr_df['pred'] = (test_gr_df['sum(similar)'] > 0)\n\n test_gr_df['act'] = (test_gr_df['avg(stars)'] < 3)\n test_gr_df['base'] = (test_gr_df['avg(rating)'] < 3)\n test_gr_df['base_3.5'] = (test_gr_df['avg(rating)'] < 3.5)\n\n return test_gr_df", "def filter_queries(args):\n for split_name in ['train', 'val']:\n split_dir = '%s/datasets/netvlad/%d/%s'%(cst.SCRIPT_DIR, args.data_id, split_name)\n metadata = Metadata(split_dir)\n\n knn = NearestNeighbors(n_jobs=-1)\n knn.fit(metadata.utmDb)\n\n # list of array of db idx matching a query\n # nontrivial_positives[i] = list of db img idx matching the i-th query\n nontrivial_positives = list(knn.radius_neighbors(metadata.utmQ,\n radius=metadata.dist_pos, return_distance=False))\n #print(nontrivial_positives) # [i]=array([ 0, 1, 2, 3, 4, 5])\n \n # its possible some queries don't have any non trivial potential positives\n # lets filter those out\n queries_idx = np.where(np.array([len(x) for x in nontrivial_positives])>0)[0]\n #metadata.utmQ = metadata.utmQ[queries_idx,:]\n #metadata.qImage = metadata.qImage[queries_idx]\n #num_queries = queries_idx.shape[0]\n \n metadata.filter(queries_idx)\n metadata.save()\n \n # debug\n if (1==1):\n toto = np.array([i for i,l in enumerate(nontrivial_positives) if len(l)>0])\n toto_sum = np.sum( (toto - queries_idx))\n if toto_sum!=0:\n print(toto_sum)\n print(\"Error somewhere in dataset\")\n exit(1)\n nontrivial_positives = [l for l in nontrivial_positives if len(l)>0]", "def test_4():\n table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')\n\n result = aspects.group_by(table, ['race/ethnicity'], \n enums.SummaryOperators.PROPORTION_OF_COUNT)\n \n result_table = result['table']\n result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'gender'])\n\n result_suggestions = result['suggestions']\n \n # Sum of proportion column should be(close to) 1.0\n assert(result_table['gender'].sum() == 1.0)\n\n print(result_table)\n\n expected_result_table = \"\"\" race/ethnicity gender\n0 group A 0.089\n1 group B 0.190\n2 group C 0.319\n3 group D 0.262\n4 group E 0.140\"\"\"\n\n expected_suggestions = \"[]\"\n\n assert(expected_result_table == result_table.to_string())\n assert(str(result_suggestions) == expected_suggestions)", "def populate_hit_filter(filtered_data):\n\n print(\"** populating hit options\")\n\n df_filtered = pd.read_json(filtered_data, orient=\"split\")\n df_filtered = df_filtered.where(pd.notnull(df_filtered), None)\n\n # print('df_filtered', df_filtered.iloc[0])\n\n populated_hit_filter = []\n return [\n html.Div(\n [\n html.Label(\"Select {} range\".format(ATTRIBUTE_COL)),\n html.Div(id=\"attribute-slider-histogram-hit-container\"),\n dcc.RangeSlider(\n id=\"attribute-range-slider-hit\",\n min=df_filtered[ATTRIBUTE_COL].min(),\n max=df_filtered[ATTRIBUTE_COL].max(),\n step=0.1,\n value=[df_filtered[ATTRIBUTE_COL].min(), df_filtered[ATTRIBUTE_COL].max()],\n marks={\n df_filtered[ATTRIBUTE_COL].min(): str(df_filtered[ATTRIBUTE_COL].min()),\n df_filtered[ATTRIBUTE_COL].max(): str(df_filtered[ATTRIBUTE_COL].max()),\n },\n ),\n ]\n ),\n html.Div(\n [\n html.Label(\"Select minimum number of mutations\"),\n dcc.Input(\n id=\"numMutation-input-hit\",\n type=\"number\",\n # debounce=True, # limits firing after every change, does not change until e.g. hit enter\n min=df_filtered[\"numMutation\"].min(),\n max=df_filtered[\"numMutation\"].max(),\n step=1,\n value=df_filtered[\"numMutation\"].min(),\n ),\n ]\n ),\n html.Div(\n [\n html.Label(\"select mutation site(s)\"),\n dcc.Dropdown(\n id=\"mutation-site-dropdown-hit\",\n # populatte options with all available sites\n options=populate_mutation_site_dropdown(df_filtered, 'options'),\n # value=populate_mutation_site_dropdown(df_filtered, 'value'),\n multi=True,\n ),\n ]\n ),\n html.Div(\n [\n html.Label(\"select mutation site(s)\"),\n dcc.Dropdown(\n id=\"new-mutation-site-dropdown-hit\",\n # populatte options with all available sites\n options=populate_new_mutation_site_dropdown(df_filtered, 'options'),\n # value=populate_new_mutation_site_dropdown(df_filtered, 'value'),\n multi=True,\n ),\n ]\n ),\n html.Div(\n [\n html.Label(\"select variant whose children to select\"),\n dcc.Dropdown(\n id=\"children-dropdown-hit\",\n # populatte options with all available sites\n options=populate_children_options(df_filtered),\n # value='None',\n ),\n ]\n ),\n html.Div(\n [\n # 2 dropdowns for exact position\n html.Label(\n \"Select position and amino acid\"\n ),\n dcc.Dropdown(\n id=\"exact-position-site-hit\",\n # populate options with all available sites\n options=[{\"label\": str(site), \"value\": site} for site in list(range(1, seq_length + 1))],\n placeholder='Select position', \n ),\n dcc.Dropdown(\n id=\"exact-position-aa-hit\",\n # populate options with available aa at that site with callback, none if position is none\n placeholder='Select amino acid', \n ),\n ],\n ),\n\n ]", "def create_query(lowerb, upperb, highprecisionsubset='1'):\n query = \"\"\"SELECT DISTINCT rxcui_in, drug_desc FROM (\n SELECT * FROM medi_indication\n WHERE icd9 NOT LIKE '%-%'\n AND icd9 NOT LIKE '%|%'\n AND icd9 !~ '^[A-z]+') AS single_icd9_codes\n WHERE cast(icd9 AS float) >= {0}\n AND cast(icd9 AS float) < {1}\n AND highprecisionsubset ='{2}';\"\"\".format(lowerb, upperb, highprecisionsubset)\n return query", "def test_forming_propositions_by_distance_in_meters_to_all_buildings_of_Infrastructure():", "def get_agg_goals(self, experiment: Experiment) -> pd.DataFrame:\n pass", "def grid_people(self,prettyPrint):\n \n grid_people = []\n grid_people.append(\", grid_people as\")\n grid_people.append(\"(\")\n grid_people.append(\"SELECT gid, geom, distance, sum(total_deaths) OVER w as total_deaths\")\n grid_people.append(\"FROM grid_person_join\")\n grid_people.append(\"WINDOW w AS (PARTITION BY gid, geom ORDER BY distance ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW )\")\n grid_people.append(\")\")\n\n return self.print_statements(grid_people, prettyPrint)", "def filter_gps_distance(correct_gps,distorted_gps,match_features):\n\n dist = []\n for i in range(len(correct_gps)):\n (x1,y1) = correct_gps[i]\n (x2,y2) = distorted_gps[i]\n dist.append(metres_between_gps(y1,x1,y2,x2))\n\n mean = np.mean(dist)\n\n print 'mean dist = ', mean\n print 'min dist = ', np.min(dist)\n print 'max dist = ', np.max(dist)\n\n sel_gps_correct = []\n sel_gps_distorted = []\n sel_match = []\n for j in range(len(correct_gps)):\n #if dist[j] <mean +std and dist[j] > mean -std:\n if dist[j] <= 5:\n sel_gps_correct.append(correct_gps[j])\n sel_gps_distorted.append(distorted_gps[j])\n sel_match.append(match_features[j])\n print '#selected matches: %d out of %d'% (len(sel_gps_distorted), len(distorted_gps))\n return sel_gps_correct,sel_gps_distorted,sel_match", "def grouper(df):\n print(\"performing groupby and sum\")\n\n df.loc[df['outcome_id'] != 'death2', 'outcome_id'] = 'case'\n\n groups = ['location_id', 'year_start', 'year_end', 'age_group_unit',\n 'age_group_id', 'sex_id', 'source', 'nid',\n 'facility_id', 'representative_id', 'diagnosis_id',\n 'metric_id', 'outcome_id', 'nonfatal_cause_name']\n df = df.groupby(groups).agg({'val': 'sum'}).reset_index()\n\n return df", "def test_5():\n table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')\n\n result = aspects.group_by(table, ['race/ethnicity'], \n enums.SummaryOperators.PROPORTION_OF_SUM)\n \n result_table = result['table']\n result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'reading score'])\n \n result_suggestions = result['suggestions']\n\n # Sum of proportion column should be(close to) 1.0\n assert(float(format(result_table['reading score'].sum(), '.5f')) == 1)\n\n print(result_table)\n\n expected_result_table = \"\"\" race/ethnicity reading score\n0 group A 0.083216\n1 group B 0.185011\n2 group C 0.318698\n3 group D 0.265263\n4 group E 0.147812\"\"\"\n\n expected_suggestions = \"[]\"\n\n assert(expected_result_table == result_table.to_string())\n assert(str(result_suggestions) == expected_suggestions)", "def test_grdfilter_dataarray_in_dataarray_out(grid, expected_grid):\n result = grdfilter(\n grid=grid, filter=\"g600\", distance=\"4\", region=[-53, -49, -20, -17]\n )\n # check information of the output grid\n assert isinstance(result, xr.DataArray)\n assert result.gmt.gtype == 1 # Geographic grid\n assert result.gmt.registration == 1 # Pixel registration\n # check information of the output grid\n xr.testing.assert_allclose(a=result, b=expected_grid)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
observed as ( SELECT d.decd_res_zip5 as zip, z.geom, COUNT(1) as observed_deaths FROM disparities.decd d
def observed(self, prettyPrint): observed_calc = [] observed_calc.append(", observed as") observed_calc.append("(") observed_calc.append("SELECT d.decd_res_zip5 as zip, z.geom, COUNT(1) as observed_deaths") observed_calc.append("FROM disparities.decd d ") observed_calc.append("INNER JOIN mn_zcta_wgs84 z ON z.zcta5ce10::integer = d.decd_res_zip5::integer") observed_calc.append("WHERE decd_age_yr >= {}".format(self.youngestAge)) observed_calc.append("AND decd_age_yr <= {}".format(self.oldestAge)) observed_calc.append("AND d.decd_res_zip5 <> 'NA'") observed_calc.append("GROUP BY zip, z.geom") observed_calc.append(")") return self.print_statements(observed_calc, prettyPrint)
[ "def filter_observed(self, prettyPrint):\n\n filter_obs = []\n filter_obs.append(\", filter_observed as\")\n filter_obs.append(\"(\")\n filter_obs.append(\"SELECT b.gid, count(o.observed_deaths) as number_of_zctas_used, sum(o.observed_deaths) as observed_deaths\")\n filter_obs.append(\"FROM buffer_definition b\")\n #### This seems wierd we need an index on this with geom 26915 on the centroids?\n filter_obs.append(\"LEFT JOIN observed o on ST_DWithin( b.geom, ST_Transform(ST_Centroid(o.geom), 26915), b.min_buffer_distance)\")\n filter_obs.append(\"GROUP BY b.gid, b.geom\")\n filter_obs.append(\")\")\n \n return self.print_statements(filter_obs, prettyPrint)", "def generate_metric(dedup_df: pd.DataFrame) -> pd.DataFrame:\n try:\n dedup_us_df = dedup_df[dedup_df['Country_Region'] == 'US']\n cleaned_df = dedup_us_df.copy()\n cleaned_df['month'] = pd.DatetimeIndex(cleaned_df['Date']).month\n cleaned_df['year'] = pd.DatetimeIndex(cleaned_df['Date']).year\n metric_df = cleaned_df.groupby(['Province_State', 'year', 'month'])[\"ConfirmedCases\"].sum()\n LOG.info(f\"data: generate_metric [{metric_df.shape[0]}] records\")\n except Exception as error:\n LOG.exception(f\"data: generate_metric could not be completed. {error}\")\n return metric_df", "def demogs_dim_table(config, bucket):\n out_path = 's3://' + bucket.name + '/' + config['S3']['staging'] + '/'\n \n print(\"Loading Demogs data and state lookup table\")\n file = 's3://' + bucket.name + '/' + config['S3']['RAWDATA'] + '/' + config['RAWDATA']['demogs']\n df = pd.read_csv(file, sep = \";\")\n \n file = 's3://' + bucket.name + '/' + config['S3']['RAWDATA'] + '/' + config['RAWDATA']['lookup'] + '/' + config['RAWDATA']['state_lookup']\n state = pd.read_csv(file)\n \n print(\"Data loaded\")\n print(\"Cleaning Demographics data\")\n \n df.columns = df.columns.str.lower().str.replace(' ', '_')\n \n ## Extract race level columns\n tmp = pd.pivot_table(df, values = 'count', columns = ['race'] , index = 'state_code')\n tmp.columns = tmp.columns.str.lower().str.replace(' ', '_')\n tmp.columns = ['american_indian_and_alaska_native', 'asian','black_or_african', 'hispanic_or_latino', 'white']\n tmp = tmp.reset_index()\n \n ## Summarizing Demographics table at State level\n df = df.groupby(['state_code', 'state', 'city']).agg({'median_age': np.mean,\n 'male_population': np.mean,\n 'female_population': np.mean,\n 'total_population': np.mean,\n 'number_of_veterans': np.mean,\n 'foreign-born': np.mean,\n 'average_household_size': np.mean}).reset_index(). \\\n groupby(['state_code', 'state']). \\\n agg({'median_age': np.mean,\n 'male_population': np.sum,\n 'female_population': np.sum,\n 'total_population': np.sum,\n 'number_of_veterans': np.sum,\n 'foreign-born': np.sum,\n 'average_household_size': np.mean}).reset_index()\n \n \n tmp.index = tmp['state_code']\n df.index = df['state_code']\n \n df = df.drop(['state_code','state'], axis = 1)\n tmp = tmp.drop('state_code', axis = 1)\n \n df = df.join(tmp)\n df = df.reset_index()\n df = df.fillna(0)\n \n state = state.drop('Unnamed: 0', axis = 1)\n df = df.merge(state, left_on = 'state_code', right_on = 'id', how = \"right\")\n df = df.fillna(-10)\n df = df.drop('id', axis = 1)\n # Change data type\n to_int_cols = ['american_indian_and_alaska_native', 'asian', 'black_or_african', 'hispanic_or_latino', 'white',\n 'male_population', 'female_population', 'total_population', 'number_of_veterans', 'foreign-born']\n df.loc[:,to_int_cols] = df.loc[:,to_int_cols].astype(int)\n \n print(\"Data Cleaning completed\")\n \n print(f\"Data quality check: Demogs data has {df.shape[0]} rows and {df.shape[1]} columns\")\n \n df.to_csv(out_path + config['STAGING']['demogs_dim_table'] + '/demogs.csv', index = False)\n \n print(\"Demogs dimension table written to S3\")", "def case_death_timeseries(states = \"*\", download = False, aggregation_cols = [\"detected_state\", \"detected_district\"], last_API_file: int = 26):\n ts = get_state_timeseries(states, download)\n ts_index = pd.date_range(start = ts.index.get_level_values(-1).min(), end = ts.index.get_level_values(-1).max(), freq = \"D\")\n\n return ts.unstack(-1)\\\n .fillna(0)\\\n .stack()\\\n .swaplevel(-1, 0)\\\n .reindex(ts_index, level = 0, fill_value = 0)\\\n .swaplevel(-1, 0)", "def climate():\n sel = [\n zipcode_data.zipcode,\n zipcode_data.jan_avg_temp, # Average invidiual income\n ]\n\n results = db.session.query(*sel).filter(zipcode_data.state == \"06\").all()\n\n # Create dataframe for results, to be sorted\n df = pd.DataFrame({\n \"zipcode\":[],\n \"jan_avg_temp\":[]\n })\n\n # Counter for populating dataframe\n zipcodes=[]\n jan_avg_temps=[]\n\n # Loop through results and create dataframe\n for result in results:\n zipcodes.append(result[0])\n jan_avg_temps.append(result[1])\n\n df=pd.DataFrame({\n \"zipcode\":zipcodes,\n \"jan_avg_temp\":jan_avg_temps\n })\n \n # Sort dataframe\n sorted_df=df.sort_values(by=[\"jan_avg_temp\"],ascending=False)\n\n # Take top 10 zips from sorted dataframe\n shorted_df=sorted_df[\"zipcode\"][0:10]\n\n # Return sorted zipcodes\n return(jsonify(list(shorted_df)))", "def get_simple_covid_data():\n #download latest data from Oxford\n DATA_URL = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'\n df = pd.read_csv(DATA_URL,\n parse_dates=['Date'],\n encoding=\"ISO-8859-1\",\n dtype={\"RegionName\": str},\n error_bad_lines=False,\n usecols = ['Date','Jurisdiction','RegionName',\n 'CountryName','ConfirmedCases','ConfirmedDeaths'])\n \n #forward fill NaNs in confirmed cases and confirmed deaths columns\n #if January 1st is NaN, set to 0\n df.loc[(df.Date == '2020-01-01') & (df['ConfirmedCases'].isna()), 'ConfirmedCases'] = 0\n df.loc[(df.Date == '2020-01-01') & (df['ConfirmedDeaths'].isna()), 'ConfirmedDeaths'] = 0\n df[['ConfirmedCases','ConfirmedDeaths']] = df[['ConfirmedCases','ConfirmedDeaths']].fillna(method = 'ffill')\n\n #add new cases and new deaths columns\n for state in df[(df['Jurisdiction'] == 'STATE_TOTAL')]['RegionName'].unique():\n state_inds = (df['Jurisdiction'] == 'STATE_TOTAL') & (df['RegionName'] == state)\n df.loc[state_inds, 'NewCases'] = df.loc[state_inds, 'ConfirmedCases'].diff().fillna(0)\n df.loc[state_inds, 'NewDeaths'] = df.loc[state_inds, 'ConfirmedDeaths'].diff().fillna(0)\n\n for country in df[(df['Jurisdiction'] == 'NAT_TOTAL')]['CountryName'].unique():\n nat_inds = (df['Jurisdiction'] == 'NAT_TOTAL') & (df['CountryName'] == country)\n df.loc[nat_inds, 'NewCases'] = df.loc[nat_inds, 'ConfirmedCases'].diff().fillna(0)\n df.loc[nat_inds, 'NewDeaths'] = df.loc[nat_inds, 'ConfirmedDeaths'].diff().fillna(0)\n \n return df", "def population_stats(df):\n\n return ...", "def nominal_ventilation_aggregation(x):\n how_dict = {\n \"Name\": top(x[\"Name\"], x, \"Zone Floor Area {m2}\"),\n \"Schedule Name\": top(x[\"Schedule Name\"], x, \"Zone Floor Area {m2}\"),\n \"Zone Floor Area {m2}\": top(\n x[\"Zone Floor Area {m2}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"# Zone Occupants\": top(x[\"# Zone Occupants\"], x, \"Zone Floor Area {m2}\"),\n \"Design Volume Flow Rate {m3/s}\": weighted_mean(\n x[\"Design Volume Flow Rate {m3/s}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Volume Flow Rate/Floor Area {m3/s/m2}\": weighted_mean(\n x.filter(like=\"Volume Flow Rate/Floor Area\").squeeze(axis=1),\n x,\n \"Zone Floor Area {m2}\",\n ),\n \"Volume Flow Rate/person Area {m3/s/person}\": weighted_mean(\n x.filter(like=\"Volume Flow Rate/person Area\").squeeze(axis=1),\n x,\n \"Zone Floor \" \"Area {m2}\",\n ),\n \"ACH - Air Changes per Hour\": weighted_mean(\n x[\"ACH - Air Changes per Hour\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Fan Pressure Rise {Pa}\": weighted_mean(\n x[\"Fan Pressure Rise {Pa}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Fan Efficiency {}\": weighted_mean(\n x[\"Fan Efficiency {}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Equation A - Constant Term Coefficient {}\": top(\n x[\"Equation A - Constant Term Coefficient {}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Equation B - Temperature Term Coefficient {1/C}\": top(\n x[\"Equation B - Temperature Term Coefficient {1/C}\"],\n x,\n \"Zone Floor Area {m2}\",\n ),\n \"Equation C - Velocity Term Coefficient {s/m}\": top(\n x[\"Equation C - Velocity Term Coefficient {s/m}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Equation D - Velocity Squared Term Coefficient {s2/m2}\": top(\n x[\"Equation D - Velocity Squared Term Coefficient {s2/m2}\"],\n x,\n \"Zone Floor Area {m2}\",\n ),\n \"Minimum Indoor Temperature{C}/Schedule\": top(\n x[\"Minimum Indoor Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Maximum Indoor Temperature{C}/Schedule\": top(\n x[\"Maximum Indoor Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Delta Temperature{C}/Schedule\": top(\n x[\"Delta Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Minimum Outdoor Temperature{C}/Schedule\": top(\n x[\"Minimum Outdoor Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Maximum Outdoor Temperature{C}/Schedule\": top(\n x[\"Maximum Outdoor Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Maximum WindSpeed{m/s}\": top(\n x[\"Maximum WindSpeed{m/s}\"], x, \"Zone Floor Area {m2}\"\n ),\n }\n try:\n df = pd.DataFrame(how_dict, index=range(0, 1)) # range should always be\n # one since we are trying to merge zones\n except Exception as e:\n log(\"{}\".format(e))\n else:\n return df", "def test_measurements_count(db: me_db.Database):\n with db.Session() as s:\n q = s.query(me_db.Measurement).filter(\n me_db.Measurement.source == \"LifeCycle\"\n )\n\n # Test dataset has 25 entries - the first of which spans three days.\n assert q.count() == 28", "def grouped_drugs(some_data):\n some_data1 = some_data.groupby([\"MannerofDeath\", \"Sex\", \"FiscalYear\"]).agg(\n {\"Morphine_NotHeroin\": \"count\", \"Ethanol\": \"count\" })\n some_data1.reset_index(inplace=True)\n #some_data1.drop([6, 7], inplace=True)\n some_data1[\"FiscalYear\"] = some_data1[\"FiscalYear\"].astype(str)\n return some_data1", "def summarize_col_data( data_rec: DataCacheRec ) -> Dict:\n # %%\n df = data_rec.data\n df['confirmed'] = 1\n\n by_sex = df['sex'].value_counts()\n # %%\n agg_spec = {\n 'confirmed': 'count',\n 'death': 'sum',\n 'recovered': 'sum',\n 'active': 'sum'\n }\n\n by_city = ( df\n .groupby('city').agg(agg_spec).reset_index()\n .sort_values('confirmed', ascending=False) )\n by_state = ( df.groupby('state').agg(agg_spec).reset_index()\n .sort_values('confirmed', ascending=False) )\n\n by_sex_age_agg = ( df\n .groupby( ['age', 'sex'] )\n .agg( {'confirmed': 'count'} )\n .reset_index() )\n\n by_sex_age = ( by_sex_age_agg\n .pivot(index='age', columns='sex', values='confirmed')\n .reset_index() )\n\n by_sex_age.index.name = None\n by_sex_age.columns.name = None\n\n # %%\n data = {\"confirmed\": df['confirmed'].sum(),\n \"deaths\": df['death'].sum(),\n \"active\": df['active'].sum(),\n \"recovered\": df['recovered'].sum(),\n \"in_hospital\": df['in_hospital'].sum(),\n \"men\": by_sex.loc['M'],\n \"women\": by_sex.loc['F'],\n \"n_cities\": by_city.shape[0],\n \"n_states\": by_state.shape[0],\n \"by_city\": by_city,\n \"by_state\": by_state,\n \"by_sex_age\": by_sex_age,\n \"last_mtime\": data_rec.mtime\n }\n # %%\n return data\n # %%", "def aggregate_national_estimates_by_district(self):\n data = {}\n states = Division.objects.filter(level=self.STATE_LEVEL)\n for state in tqdm(states):\n districts = Division.objects.filter(\n level=self.DISTRICT_LEVEL, parent=state\n )\n for district in districts:\n aggregated_labels = []\n estimates = CensusEstimate.objects.filter(division=district)\n for estimate in estimates:\n series = estimate.variable.table.series\n year = estimate.variable.table.year\n table = estimate.variable.table.code\n\n label = None\n if estimate.variable.label:\n label = estimate.variable.label.label\n table_label = \"{}{}\".format(table, label)\n\n code = estimate.variable.code\n if series not in data:\n data[series] = {}\n if year not in data[series]:\n data[series][year] = {}\n if table not in data[series][year]:\n data[series][year][table] = {}\n if state.code not in data[series][year][table]:\n data[series][year][table][state.code] = {}\n if (\n district.code\n not in data[series][year][table][state.code]\n ):\n data[series][year][table][state.code][\n district.code\n ] = {}\n if label is not None:\n if table_label not in aggregated_labels:\n aggregated_labels.append(table_label)\n if (\n len(\n CensusEstimate.objects.filter(\n variable=estimate.variable,\n division=district.id,\n )\n )\n > 0\n ):\n data[series][year][table][state.code][\n district.code\n ][label] = self.aggregate_variable(\n estimate, district.id\n )\n else:\n data[series][year][table][state.code][district.code][\n code\n ] = estimate.estimate\n return data", "def observed_stat(heroes):\n\n return ...", "def spatial_filter_query(self, prettyPrint):\n \n spatial_filter = []\n spatial_filter.append(\"SELECT e.gid, e.geom, e.min_buffer_distance, e.expected_deaths, o.number_of_zctas_used, o.observed_deaths, coalesce(o.observed_deaths,0)/e.expected_deaths as ratio\")\n spatial_filter.append(\"FROM filter_expected e \")\n spatial_filter.append(\"INNER JOIN filter_observed o ON (e.gid = o.gid)\")\n \n return self.print_statements(spatial_filter, prettyPrint)", "def _calc_distinct_id_counts(db_config, month, year):\n with utils.create_db_connection(db_config) as conn, conn.cursor() as cursor, utils.CodeProfiler() as cp:\n results = defaultdict(lambda: dict(num_triplets=0, num_imeis=0, num_imsis=0, num_msisdns=0,\n num_imei_imsis=0, num_imei_msisdns=0, num_imsi_msisdns=0))\n cursor.execute(\n \"\"\"SELECT operator_id,\n data_date,\n (hll_cardinality(COALESCE(hll_union_agg(triplet_hll), hll_empty())))::BIGINT AS num_triplets,\n (hll_cardinality(COALESCE(hll_union_agg(imei_hll), hll_empty())))::BIGINT AS num_imeis,\n (hll_cardinality(COALESCE(hll_union_agg(imsi_hll), hll_empty())))::BIGINT AS num_imsis,\n (hll_cardinality(COALESCE(hll_union_agg(msisdn_hll), hll_empty())))::BIGINT AS num_msisdns,\n (hll_cardinality(COALESCE(hll_union_agg(imei_imsis_hll), hll_empty())))::BIGINT\n AS num_imei_imsis,\n (hll_cardinality(COALESCE(hll_union_agg(imei_msisdns_hll), hll_empty())))::BIGINT\n AS num_imei_msisdns,\n (hll_cardinality(COALESCE(hll_union_agg(imsi_msisdns_hll), hll_empty())))::BIGINT\n AS num_imsi_msisdns\n FROM daily_per_mno_hll_sketches\n WHERE date_part('month', data_date) = %(month)s\n AND date_part('year', data_date) = %(year)s\n GROUP BY CUBE (operator_id, data_date)\n \"\"\",\n {'month': month, 'year': year}\n )\n\n results = [res._asdict() for res in cursor]\n\n return results, cp.duration, [cp.duration]", "def per_location_analysis(location, Drugs_Grouped_InjuryPlace):\n deaths_residence = Drugs_Grouped_InjuryPlace.loc[(Drugs_Grouped_InjuryPlace[\"InjuryPlace\"] == location)]\n df = pd.DataFrame(deaths_residence.groupby(\"FiscalYear\")[\"Heroin\"].count())\n df.reset_index(inplace=True)\n return df", "def exampleone():\n example_untitdy = {'country': ['Afghanistan', 'Afghanistan', 'Afghanistan', 'Afghanistan', 'Brazil', 'Brazil'],\n 'year': [1999, 1999, 2000, 2000, 1999, 1999],\n 'obser': ['cases', 'population', 'cases', 'population', 'cases', 'population'],\n 'count': [745, 19987071, 2666, 20595360, 37737, 172006362]}\n df_untidy = pd.DataFrame(example_untitdy)\n return df_untidy", "def count_synonymous_per_gene(de_novos):\n \n synonymous = de_novos[de_novos[\"consequence\"] == \"synonymous_variant\"]\n counts = synonymous.pivot_table(index=\"symbol\", values=\"alt\", aggfunc=len)\n if len(counts) > 0:\n counts = pandas.DataFrame({\"hgnc\": counts.index, \"observed\": list(counts['alt'])})\n else:\n counts = pandas.DataFrame({\"hgnc\": counts.index, \"observed\": []})\n \n counts.index = range(len(counts))\n \n return counts", "def activity_dailies(obj):\n df = obj.datasets['motion']\n drop = ['bed_in','Back door','Front door']\n activity_metrics = (df.\n query('location_name not in @drop').\n assign(activity=True).\n set_index('start_date').\n groupby(['patient_id','location_name']).\n resample('1D',offset='12h').\n activity.\n count().\n swaplevel(-2,-1).\n unstack())\n activity_metrics['Total'] = activity_metrics.sum(axis=1)\n activity_metrics= activity_metrics.query('Total > 0.0')\n return activity_metrics" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
filter_observed as ( SELECT b.gid, count(o.observed_deaths) as number_of_zctas_used, sum(o.observed_deaths) as observed_deaths FROM buffer_definition b INNER JOIN observed o on ST_DWithin( b.geom, ST_Transform(ST_Centroid(o.geom), 26915), b.min_buffer_distance) GROUP BY b.gid, b.geom )
def filter_observed(self, prettyPrint): filter_obs = [] filter_obs.append(", filter_observed as") filter_obs.append("(") filter_obs.append("SELECT b.gid, count(o.observed_deaths) as number_of_zctas_used, sum(o.observed_deaths) as observed_deaths") filter_obs.append("FROM buffer_definition b") #### This seems wierd we need an index on this with geom 26915 on the centroids? filter_obs.append("LEFT JOIN observed o on ST_DWithin( b.geom, ST_Transform(ST_Centroid(o.geom), 26915), b.min_buffer_distance)") filter_obs.append("GROUP BY b.gid, b.geom") filter_obs.append(")") return self.print_statements(filter_obs, prettyPrint)
[ "def spatial_filter_query(self, prettyPrint):\n \n spatial_filter = []\n spatial_filter.append(\"SELECT e.gid, e.geom, e.min_buffer_distance, e.expected_deaths, o.number_of_zctas_used, o.observed_deaths, coalesce(o.observed_deaths,0)/e.expected_deaths as ratio\")\n spatial_filter.append(\"FROM filter_expected e \")\n spatial_filter.append(\"INNER JOIN filter_observed o ON (e.gid = o.gid)\")\n \n return self.print_statements(spatial_filter, prettyPrint)", "def filter(self, context, pools, zone):", "def soundings_in_geom(geom):\n return DepthSounding.objects.filter(geometry__within=geom)", "def get_gridssDF_filtered_from_filtersDict(df_gridss, filters_dict, reference_genome):\n\n # debug the fact that there is no min_af_EitherSmallOrLargeEvent\n if \"min_af_EitherSmallOrLargeEvent\" not in filters_dict: filters_dict[\"min_af_EitherSmallOrLargeEvent\"] = 0.0\n\n # get the filtered df\n df_filt = get_gridssDF_filtered(df_gridss, reference_genome, min_Nfragments=filters_dict[\"min_Nfragments\"], min_af=filters_dict[\"min_af\"], wrong_INFOtags=filters_dict[\"wrong_INFOtags\"], wrong_FILTERtags=filters_dict[\"wrong_FILTERtags\"], filter_polyGC=filters_dict[\"filter_polyGC\"], filter_noSplitReads=filters_dict[\"filter_noSplitReads\"], filter_noReadPairs=filters_dict[\"filter_noReadPairs\"], maximum_strand_bias=filters_dict[\"maximum_strand_bias\"], maximum_microhomology=filters_dict[\"maximum_microhomology\"], maximum_lenght_inexactHomology=filters_dict[\"maximum_lenght_inexactHomology\"], range_filt_DEL_breakpoints=filters_dict[\"range_filt_DEL_breakpoints\"], min_length_inversions=filters_dict[\"min_length_inversions\"], dif_between_insert_and_del=filters_dict[\"dif_between_insert_and_del\"], max_to_be_considered_small_event=filters_dict[\"max_to_be_considered_small_event\"], min_size=filters_dict[\"min_size\"], add_columns=False, min_af_EitherSmallOrLargeEvent=filters_dict[\"min_af_EitherSmallOrLargeEvent\"], min_QUAL=filters_dict[\"min_QUAL\"], filter_overlappingRepeats=filters_dict[\"filter_overlappingRepeats\"] )\n\n return df_filt", "def buffer_definition(self, populationThreshold, prettyPrint): # should there be an input for total_deaths?\n \n buffer_definition = []\n buffer_definition.append(\", buffer_definition as\")\n buffer_definition.append(\"(\")\n buffer_definition.append(\"SELECT gid, geom, min(distance) as min_buffer_distance\")\n buffer_definition.append(\"FROM grid_people\")\n buffer_definition.append(\"WHERE total_deaths >= {}\".format(populationThreshold))\n buffer_definition.append(\"GROUP BY gid, geom\")\n buffer_definition.append(\")\")\n \n return self.print_statements(buffer_definition, prettyPrint)", "def get_bob_query(m,rcut=3.6):\n na = len(m)\n zs = m.numbers\n esa = zs #-1. * zs**2.4\n zs1 = np.unique(zs)\n nz1 = len(zs1)\n izs = np.arange(nz1)\n boa = np.zeros((na,nz1))\n for i in range(na):\n boa[i,izs[zs[i]==zs1][0]] = esa[i]\n\n zpairs = [ (zi,zi) for zi in zs1 ] + list( itl.combinations(zs1,2) )\n dic0 = {}\n for zpair in zpairs: dic0[zpair] = []\n ds = ssd.squareform(ssd.pdist(m.positions))\n\n dics = []; ns = []\n for i in range(na):\n dic = dic0.copy()\n for j in range(na):\n if i==j or ds[i,j]>rcut: continue\n zi,zj = zs[i],zs[j]\n pair = (min(zi,zj), max(zj,zi))\n if pair in dic.keys():\n dic[pair] +=[ ds[i,j] ]# [zi*zj/ds[i,j]]\n _ns = []\n for zpair in zpairs:\n _bob = dic[zpair]\n _l = len(_bob)\n if _l > 0: _bob.sort(reverse=True)\n _ns.append( _l )\n ns.append(_ns); dics.append(dic)\n nbs = np.max(ns, axis=0) #; print 'nbs=',nbs\n l = sum(nbs)\n idx2 = np.cumsum(nbs)\n idx1 = np.array([0]+list(idx2[:-1]),dtype=int)\n bob = np.zeros((na,l))\n for i in range(na):\n dic = dics[i]\n for j,zpair in enumerate(zpairs):\n _bob = dic[zpair]\n lenj = len(_bob)\n ib = idx1[j]; ie = ib+lenj\n bob[i, ib:ie] = _bob\n bob = np.concatenate((boa,bob), axis=1)\n return zs1, zpairs,idx1,idx2,bob", "def _filter_gt_instances_by_size(batch_data_samples: SampleList,\n wh_thr: tuple) -> SampleList:\n for data_samples in batch_data_samples:\n bboxes = data_samples.gt_instances.bboxes\n if bboxes.shape[0] > 0:\n w = bboxes[:, 2] - bboxes[:, 0]\n h = bboxes[:, 3] - bboxes[:, 1]\n data_samples.gt_instances = data_samples.gt_instances[\n (w > wh_thr[0]) & (h > wh_thr[1])]\n return batch_data_samples", "def filter_queries(args):\n for split_name in ['train', 'val']:\n split_dir = '%s/datasets/netvlad/%d/%s'%(cst.SCRIPT_DIR, args.data_id, split_name)\n metadata = Metadata(split_dir)\n\n knn = NearestNeighbors(n_jobs=-1)\n knn.fit(metadata.utmDb)\n\n # list of array of db idx matching a query\n # nontrivial_positives[i] = list of db img idx matching the i-th query\n nontrivial_positives = list(knn.radius_neighbors(metadata.utmQ,\n radius=metadata.dist_pos, return_distance=False))\n #print(nontrivial_positives) # [i]=array([ 0, 1, 2, 3, 4, 5])\n \n # its possible some queries don't have any non trivial potential positives\n # lets filter those out\n queries_idx = np.where(np.array([len(x) for x in nontrivial_positives])>0)[0]\n #metadata.utmQ = metadata.utmQ[queries_idx,:]\n #metadata.qImage = metadata.qImage[queries_idx]\n #num_queries = queries_idx.shape[0]\n \n metadata.filter(queries_idx)\n metadata.save()\n \n # debug\n if (1==1):\n toto = np.array([i for i,l in enumerate(nontrivial_positives) if len(l)>0])\n toto_sum = np.sum( (toto - queries_idx))\n if toto_sum!=0:\n print(toto_sum)\n print(\"Error somewhere in dataset\")\n exit(1)\n nontrivial_positives = [l for l in nontrivial_positives if len(l)>0]", "def dist_filter(xyz, dists, result):\n i = knn2\n il = 0\n end = xyz.shape[0] - knn2\n while i < end:\n dst = ((xyz[il,0] - xyz[i,0])**2 + \\\n (xyz[il,1] - xyz[i,1])**2 + \\\n (xyz[il,2] - xyz[i,2])**2)**0.5\n\n if dst >= abs(xyz[i,2])*depth_multiplier:\n il = i - knn2 + np.argmin(dists[i-knn2:i+knn2+1])\n result[il] = True\n i += knn2 - 1\n else:\n i += 1\n\n return result", "def query5():\n docs = db.airbnb.aggregate([\n {\n '$geoNear': {\n 'near': {'type': 'Point', 'coordinates': [-73.9654, 40.7829]},\n 'distanceField': 'dist.calculated',\n 'maxDistance': 1000,\n 'includeLocs': 'dist.location',\n 'spherical': False\n }\n },\n {\n '$project': {\n '_id': 0,\n 'dist': 1,\n 'location': 1,\n 'name': 1,\n 'neighbourhood': 1,\n 'neighbourhood_group': 1,\n 'price': 1,\n 'room_type': 1\n }\n }\n ])\n result = [doc for doc in docs]\n return result", "def test_spatial_filter_eeg_and_meg():\n new_inst = apply_spatial_filter(raw_all.copy(), \"eeg\")\n picks = dict(_picks_by_type(raw_all.info, exclude=[]))\n picks_eeg = picks[\"eeg\"]\n picks_non_eeg = [\n idx for idx in np.arange(len(raw_all.ch_names)) if idx not in picks_eeg\n ]\n assert not np.all(\n new_inst._data[picks_eeg, :] == raw_all._data[picks_eeg, :]\n )\n assert np.all(\n new_inst._data[picks_non_eeg, :] == raw_all._data[picks_non_eeg, :]\n )", "def get_stats(gene_df, df_path, criteria, cell_type, bin=3000, df_function=df_to_index_danpos):\n # print 'get stats', len(gene_df['gene'].unique())\n if criteria != 'skewness' and criteria != 'kurtosis':\n table_dict = df_function(df_path)\n else:\n df_function = df_to_index_sk\n table_dict = df_function(df_path)\n\n results = defaultdict(float)\n\n for k in range(gene_df.shape[0]):\n gene_name = gene_df.iloc[k, 0]\n\n chr_name, start, end, length = gene_df.iloc[k, 1], gene_df.iloc[k, 2], gene_df.iloc[k, 3], gene_df.iloc[k, 4]\n ## Here is the problem, danpos selector will consider the entire overlapped peaks\n ## The other approach is using self designed peak calling, to make sure each parameter will return different value\n cur_table = set()\n\n if end < start:\n mid = (start + end) / 2\n start = mid\n end = mid\n\n for i in range(int(start/bin), int(end/bin) + 1):\n if chr_name in table_dict and i in table_dict[chr_name]:\n table = table_dict[chr_name][i]\n cur_table = cur_table.union(table)\n\n if len(cur_table) == 0:\n continue\n\n selected_table = []\n for t in cur_table:\n if start < t[1] < end:\n selected_table.append(t)\n elif start < t[2] < end:\n selected_table.append(t)\n elif t[1] <= start and end <= t[2]:\n selected_table.append(t)\n\n if len(selected_table) == 0:\n continue\n\n cur_df = pd.DataFrame(list(selected_table))\n\n if cur_df.shape[1] == 6:\n cur_df.columns = ['chr',\n 'start',\n 'end',\n 'width_above_cutoff',\n 'total_signal',\n 'height',]\n else:\n cur_df.columns = ['chr',\n 'start',\n 'end',\n 'width_above_cutoff',\n 'total_signal',\n 'height',\n 'skewness',\n 'kurtosis']\n\n if criteria == 'total_width':\n cur_col = cur_df['end'] - cur_df['start']\n cur_value = cur_col.sum()\n elif criteria == 'height':\n cur_value = cur_df['height'].max()\n elif criteria == 'single_width':\n cur_col = cur_df['end'] - cur_df['start']\n cur_value = cur_col.max()\n elif criteria == 'total_signal':\n cur_value = cur_df['total_signal'].sum()\n elif criteria == 'single_signal':\n cur_value = cur_df['total_signal'].max()\n elif criteria == 'coverage':\n cur_value = (cur_df['end'] - cur_df['start']).sum()*1.0/length\n\n\n #\n # # This is for kurtosis and skewness\n elif cur_df.shape[0] > 0 and criteria == 'skewness' and 'skewness' in cur_df.columns:\n cur_value = cur_df.ix[cur_df['total_signal'].argmax(),'skewness']\n elif cur_df.shape[0] > 0 and criteria == 'kurtosis' and 'kurtosis' in cur_df.columns:\n cur_value = cur_df.ix[cur_df['total_signal'].argmax(), 'kurtosis']\n\n\n if cur_value > results[gene_name] and criteria != 'skewness' and criteria != 'kurtosis':\n results[gene_name] = cur_value\n # this is for kurtosis and skewness\n\n elif criteria == 'kurtosis':\n if abs(cur_value) > abs(results[gene_name]):\n results[gene_name] = cur_value\n elif criteria == 'skewness':\n if abs(cur_value) > results[gene_name]:\n results[gene_name] = abs(cur_value)\n\n final = []\n\n for gene_name in gene_df['gene'].unique():\n final.append((gene_name, results[gene_name], cell_type))\n print len(final)\n return final", "def nominal_ventilation_aggregation(x):\n how_dict = {\n \"Name\": top(x[\"Name\"], x, \"Zone Floor Area {m2}\"),\n \"Schedule Name\": top(x[\"Schedule Name\"], x, \"Zone Floor Area {m2}\"),\n \"Zone Floor Area {m2}\": top(\n x[\"Zone Floor Area {m2}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"# Zone Occupants\": top(x[\"# Zone Occupants\"], x, \"Zone Floor Area {m2}\"),\n \"Design Volume Flow Rate {m3/s}\": weighted_mean(\n x[\"Design Volume Flow Rate {m3/s}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Volume Flow Rate/Floor Area {m3/s/m2}\": weighted_mean(\n x.filter(like=\"Volume Flow Rate/Floor Area\").squeeze(axis=1),\n x,\n \"Zone Floor Area {m2}\",\n ),\n \"Volume Flow Rate/person Area {m3/s/person}\": weighted_mean(\n x.filter(like=\"Volume Flow Rate/person Area\").squeeze(axis=1),\n x,\n \"Zone Floor \" \"Area {m2}\",\n ),\n \"ACH - Air Changes per Hour\": weighted_mean(\n x[\"ACH - Air Changes per Hour\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Fan Pressure Rise {Pa}\": weighted_mean(\n x[\"Fan Pressure Rise {Pa}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Fan Efficiency {}\": weighted_mean(\n x[\"Fan Efficiency {}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Equation A - Constant Term Coefficient {}\": top(\n x[\"Equation A - Constant Term Coefficient {}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Equation B - Temperature Term Coefficient {1/C}\": top(\n x[\"Equation B - Temperature Term Coefficient {1/C}\"],\n x,\n \"Zone Floor Area {m2}\",\n ),\n \"Equation C - Velocity Term Coefficient {s/m}\": top(\n x[\"Equation C - Velocity Term Coefficient {s/m}\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Equation D - Velocity Squared Term Coefficient {s2/m2}\": top(\n x[\"Equation D - Velocity Squared Term Coefficient {s2/m2}\"],\n x,\n \"Zone Floor Area {m2}\",\n ),\n \"Minimum Indoor Temperature{C}/Schedule\": top(\n x[\"Minimum Indoor Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Maximum Indoor Temperature{C}/Schedule\": top(\n x[\"Maximum Indoor Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Delta Temperature{C}/Schedule\": top(\n x[\"Delta Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Minimum Outdoor Temperature{C}/Schedule\": top(\n x[\"Minimum Outdoor Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Maximum Outdoor Temperature{C}/Schedule\": top(\n x[\"Maximum Outdoor Temperature{C}/Schedule\"], x, \"Zone Floor Area {m2}\"\n ),\n \"Maximum WindSpeed{m/s}\": top(\n x[\"Maximum WindSpeed{m/s}\"], x, \"Zone Floor Area {m2}\"\n ),\n }\n try:\n df = pd.DataFrame(how_dict, index=range(0, 1)) # range should always be\n # one since we are trying to merge zones\n except Exception as e:\n log(\"{}\".format(e))\n else:\n return df", "def capture_vsx_data(dates):\n df_obs = pd.DataFrame() # master dataframe of all obs and metadata, to be written to local .csv file.\n nobs_dict_list = []\n\n star_ids = get_bulletin_star_ids()\n df_bulletin = get_df_bulletin()\n jd_df_nobs_start = jd_from_datetime_utc(dates['nobs_start']) # these are the same for all stars.\n jd_df_nobs_end = jd_from_datetime_utc(dates['nobs_end']) # \"\n\n for star_id in star_ids:\n # Ensure enough dates to cover both df_obs and df_nobs:\n # ... where df_obs has all the observations, and df_nobs exists only to count nobs in past year.\n period = float(df_bulletin.loc[star_id, 'PERIOD']) # in days.\n jd_df_obs_start = jd_from_datetime_utc(dates['fit_end']) - FIT_PERIODS_TO_CAPTURE * period\n jd_vsx_start = min(jd_df_obs_start, jd_df_nobs_start) # jd_df_obs will almost always be the min.\n jd_vsx_end = jd_from_datetime_utc(dates['fit_end'])\n df_vsx = get_vsx_obs(star_id, jd_start=jd_vsx_start, jd_end=jd_vsx_end)\n\n # Calculate nobs for this star_id, save it as a dict, store dict in list.\n jd_all_obs = pd.Series([float(jd) for jd in df_vsx['JD']])\n keep_for_df_nobs = (jd_df_nobs_start <= jd_all_obs) & (jd_all_obs <= jd_df_nobs_end)\n nobs = sum(keep_for_df_nobs)\n nobs_dict = {'star_id': star_id, 'nobs': nobs}\n nobs_dict_list.append(nobs_dict)\n\n # Screen received observation dataframe, add weights column, add df to end of df_obs.\n keep_for_df_obs = list(jd_all_obs >= jd_df_obs_start)\n df_vsx_obs = df_vsx[keep_for_df_obs] # this will usually keep all obs\n df_screened = screen_star_obs(df_vsx_obs)\n df_this_star = add_obs_weights(df_screened)\n df_obs = pd.concat([df_obs, df_this_star])\n print(star_id.ljust(10),\n str(len(df_this_star)), 'added of',\n str(len(df_vsx)), 'downloaded in JD range',\n str(int(jd_vsx_start)), 'to', str(int(jd_vsx_end)),\n 'for running df_obs running total of ', str(len(df_obs)) + '.')\n\n df_obs_csv_fullpath = os.path.join(DATA_DIRECTORY, DF_OBS_FILENAME).replace('\\\\', '/')\n df_obs.to_csv(df_obs_csv_fullpath, sep=';', quotechar='\"', encoding='UTF-8',\n quoting=2, index=False) # quoting=2-->quotes around non-numerics.\n print(' df_obs dataframe written to', df_obs_csv_fullpath)\n\n df_nobs = pd.DataFrame(nobs_dict_list) # number of observations in NOBS date range, one row per star.\n df_nobs = reorder_df_columns(df_nobs, left_column_list=['star_id'])\n df_nobs = df_nobs.set_index('star_id', drop=False)\n df_nobs_csv_fullpath = os.path.join(DATA_DIRECTORY, DF_NOBS_FILENAME).replace('\\\\', '/')\n df_nobs.to_csv(df_nobs_csv_fullpath, sep=';', quotechar='\"', encoding='UTF-8',\n quoting=2, index=False) # quoting=2-->quotes around non-numerics.\n print(' df_nobs dataframe written to', df_nobs_csv_fullpath)\n print('\\nNext: df_fit_results = u.process_all_stars(dates).')", "def _buffer_invalid_geometries(gdf):\n # only apply the filters if the GeoDataFrame is not empty\n if not gdf.empty:\n # create a filter for rows with invalid geometries\n invalid_geometry_filter = ~gdf[\"geometry\"].is_valid\n\n # if there are invalid geometries\n if invalid_geometry_filter.any():\n # get their unique_ids from the index\n invalid_geometry_ids = gdf.loc[invalid_geometry_filter].index.to_list()\n\n # create a list of their urls and log them\n osm_url = \"https://www.openstreetmap.org/\"\n invalid_geom_urls = [osm_url + unique_id for unique_id in invalid_geometry_ids]\n utils.log(\n f\"{len(invalid_geometry_ids)} invalid geometries\"\n f\".buffer(0) applied to {invalid_geom_urls}\",\n level=lg.WARNING,\n )\n\n gdf.loc[invalid_geometry_filter, \"geometry\"] = gdf.loc[\n invalid_geometry_filter, \"geometry\"\n ].buffer(0)\n\n return gdf", "def filter_hits_by_atlas(self) -> None:\n start_len = len(self.hits)\n keep_adducts = self.atlas_df.loc[:, [\"inchi_key\", \"adduct\"]].drop_duplicates()\n logger.info(\"Number of inchi_key-adduct pairs is %d.\", len(keep_adducts))\n hits_plus = self.hits.copy()\n hits_plus[\"copy_index\"] = hits_plus.index.to_numpy()\n new_hits = hits_plus.merge(keep_adducts, on=[\"inchi_key\", \"adduct\"], how=\"inner\")\n logger.info(\"Number rows in new_hits is %d.\", len(new_hits))\n new_hits.index = pd.MultiIndex.from_tuples(new_hits[\"copy_index\"], names=self.hits.index.names)\n new_hits.drop([\"copy_index\"], axis=1)\n self._hits = new_hits\n logger.info(\n \"Filtering reduced number of MSMS hits from %d to %d (%d removed).\",\n start_len,\n len(self.hits),\n start_len - len(self.hits),\n )", "def filter_observations(self):\n raise NotImplementedError(\"broken\")\n n_min = self.bkg_method['n_min']\n obs = self.observations\n mask = obs.filter_by_reflected_regions(n_min)\n inv_mask = np.where([_ not in mask for _ in np.arange(len(mask + 1))])\n excl_obs = self.obs_table[inv_mask[0]]['OBS_ID'].data\n log.info('Excluding obs {} : Found less than {} reflected '\n 'region(s)'.format(excl_obs, n_min))\n self._observations = SpectrumObservationList(np.asarray(obs)[mask])\n self.obs_table = self.obs_table[mask]", "def remove_redundancies(self):\n start = timeit.default_timer()\n nrows_before = len(self.all_geometries.index)\n df = self.all_geometries.copy()\n df = df.round(10)\n og_cols = df.columns.tolist()\n # sort interatomic distance columns according to alphabetized bond types\n # e.g. OH HH CH --> CH HH OH\n alpha_bond_cols = [og_cols[i] for i in self.mol.alpha_bond_types_indices]\n alpha_bond_cols.append('cartesians')\n alpha_bond_cols.append('internals')\n df = df[alpha_bond_cols]\n df_cols = df.columns.tolist()\n # sort values of each 'bondtype' subpartition of interatomic distance columns\n # subpartitions are defined by the index of the first occurance of each \n # bond_type label. CH CH CH HH HH OH would be [0,3,5]. These define partition bounds.\n ind = self.mol.alpha_bond_types_first_occur_indices\n K = len(ind)\n # sort each subpartition\n for i in range(K):\n if i < (K - 1):\n cut = slice(ind[i], ind[i+1])\n mask = df_cols[cut]\n df.loc[:,mask] = np.sort(df.loc[:,mask].values, axis=1)\n else:\n mask = df_cols[i:self.n_interatomics]\n df.loc[:,mask] = np.sort(df.loc[:,mask].values, axis=1)\n\n # Remove duplicates\n # take opposite of duplicate boolean Series (which marks duplicates as True)\n mask = -df.duplicated(subset=self.bond_columns)\n self.unique_geometries = self.all_geometries.loc[mask] \n self.n_disps = len(self.unique_geometries.index)\n print(\"Redundancy removal took {} seconds\".format(round((timeit.default_timer() - start),2)))\n print(\"Removed {} redundant geometries from a set of {} geometries\".format(nrows_before-self.n_disps, nrows_before))", "def _get_stations_buffer(\n self,\n stations: list=[],\n crossover_radius=200000,\n ):\n result = None\n sql = \"\"\"\n select coalesce(\n st_collect(\n st_buffer(position::geography, {})::geometry\n )::geography,\n ST_GeomFromText('POLYGON EMPTY')\n )\n from d2qc_stations where id in ({})\n \"\"\".format(\n crossover_radius,\n DataSet._in_stations(stations),\n )\n\n result = DataSet._fetchall_query(sql, True)[0]\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SELECT e.gid, e.geom, e.min_buffer_distance, e.expected_deaths, o.number_of_zctas_used, o.observed_deaths, o.observed_deaths/e.expected_deaths as ratio FROM filter_expected e INNER JOIN filter_observed o ON e.gid=o.gid
def spatial_filter_query(self, prettyPrint): spatial_filter = [] spatial_filter.append("SELECT e.gid, e.geom, e.min_buffer_distance, e.expected_deaths, o.number_of_zctas_used, o.observed_deaths, coalesce(o.observed_deaths,0)/e.expected_deaths as ratio") spatial_filter.append("FROM filter_expected e ") spatial_filter.append("INNER JOIN filter_observed o ON (e.gid = o.gid)") return self.print_statements(spatial_filter, prettyPrint)
[ "def fraction_licks_in_reward_zone(expt_grp):\n rew_intervals = ints.behavior(expt_grp, 'reward')\n licking_intervals = ints.behavior(expt_grp, 'licking')\n\n n_licks = licking_intervals.groupby('trial', as_index=False).agg(len)\n n_licks.rename(columns={'start': 'total_licks'}, inplace=True)\n del n_licks['stop']\n\n licks_in_reward = rew_intervals.filter_events(\n licking_intervals, 'start').groupby('trial', as_index=False).agg(len)\n licks_in_reward.rename(columns={'start': 'licks_in_reward'}, inplace=True)\n del licks_in_reward['stop']\n\n result = pd.merge(licks_in_reward, n_licks, on='trial', how='outer')\n result['licks_in_reward'] = result['licks_in_reward'].fillna(0)\n result['value'] = result['licks_in_reward'] / \\\n result['total_licks'].astype('float')\n\n return result", "def filter_observed(self, prettyPrint):\n\n filter_obs = []\n filter_obs.append(\", filter_observed as\")\n filter_obs.append(\"(\")\n filter_obs.append(\"SELECT b.gid, count(o.observed_deaths) as number_of_zctas_used, sum(o.observed_deaths) as observed_deaths\")\n filter_obs.append(\"FROM buffer_definition b\")\n #### This seems wierd we need an index on this with geom 26915 on the centroids?\n filter_obs.append(\"LEFT JOIN observed o on ST_DWithin( b.geom, ST_Transform(ST_Centroid(o.geom), 26915), b.min_buffer_distance)\")\n filter_obs.append(\"GROUP BY b.gid, b.geom\")\n filter_obs.append(\")\")\n \n return self.print_statements(filter_obs, prettyPrint)", "def filter_expected(self, prettyPrint):\n\n filter_calc = []\n filter_calc.append(\", filter_expected as\")\n filter_calc.append(\"(\")\n filter_calc.append(\"SELECT b.gid, b.geom, b.min_buffer_distance, sum(gpj.total_deaths) as expected_deaths\")\n filter_calc.append(\"FROM grid_person_join gpj\")\n filter_calc.append(\"INNER JOIN buffer_definition b ON gpj.gid = b.gid\")\n filter_calc.append(\"WHERE gpj.distance <= b.min_buffer_distance\")\n filter_calc.append(\"GROUP BY b.gid, b.geom, b.min_buffer_distance\")\n filter_calc.append(\")\")\n \n return self.print_statements(filter_calc, prettyPrint)", "def fraction_licks_rewarded(expt_grp):\n result = []\n for expt in expt_grp:\n\n totalLicks = sum([\n trial.behaviorData()['licking'].shape[0]\n for trial in expt.findall('trial')])\n\n totalWater = sum([\n trial.behaviorData()['water'].shape[0]\n for trial in expt.findall('trial')])\n\n rewardRate = expt.reward_parameters().get('operant_rate', 1)\n\n try:\n fraction = float(totalWater) / (totalLicks / float(rewardRate))\n except ZeroDivisionError:\n fraction = np.nan\n\n result.append({\n 'expt': expt, 'lick': totalLicks, 'water': totalWater,\n 'value': fraction})\n return pd.DataFrame(result, columns=['expt', 'lick', 'water', 'value'])", "def prep_df(md):\n df = json_normalize(md[\"events\"], sep=\"_\")\n d = {float(k):v for k,v in md[\"playerIdNameDictionary\"].items()}\n df[\"player_name\"] = df[\"playerId\"].map(d)\n #df[[\"x\", \"y\", \"endX\", \"endY\"]] = df[[\"x\", \"y\", \"endX\", \"endY\"]]/100\n df[\"qualifiers\"] = df[\"qualifiers\"].astype(str)\n\n return df", "def query_fink_photometry(objectId):\n\n desired_columns = {\n 'i:jd',\n 'i:ra',\n 'i:dec',\n 'i:magpsf',\n 'i:sigmapsf',\n 'i:diffmaglim',\n 'i:magzpsci',\n 'i:fid',\n }\n\n r = requests.post(\n urllib.parse.urljoin(cfg['app.fink_endpoint'], 'api/v1/objects'),\n json={'objectId': objectId, 'output-format': 'json'},\n )\n df = pd.DataFrame.from_dict(r.json())\n\n if not desired_columns.issubset(set(df.columns)):\n raise ValueError('Missing expected column')\n\n df.rename(\n columns={\n 'i:jd': 'jd',\n 'i:ra': 'ra',\n 'i:dec': 'dec',\n 'i:magpsf': 'mag',\n 'i:sigmapsf': 'magerr',\n 'i:diffmaglim': 'limiting_mag',\n 'i:magzpsci': 'zp',\n 'i:fid': 'filter',\n },\n inplace=True,\n )\n df['filter'] = [inv_bands[int(filt)] for filt in df['filter']]\n df['mjd'] = [Time(jd, format='jd').mjd for jd in df['jd']]\n\n columns_to_keep = ['mjd', 'ra', 'dec', 'mag', 'magerr', 'limiting_mag', 'filter']\n df = df[columns_to_keep]\n df['magsys'] = 'ab'\n\n return df", "def test_4():\n table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')\n\n result = aspects.group_by(table, ['race/ethnicity'], \n enums.SummaryOperators.PROPORTION_OF_COUNT)\n \n result_table = result['table']\n result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'gender'])\n\n result_suggestions = result['suggestions']\n \n # Sum of proportion column should be(close to) 1.0\n assert(result_table['gender'].sum() == 1.0)\n\n print(result_table)\n\n expected_result_table = \"\"\" race/ethnicity gender\n0 group A 0.089\n1 group B 0.190\n2 group C 0.319\n3 group D 0.262\n4 group E 0.140\"\"\"\n\n expected_suggestions = \"[]\"\n\n assert(expected_result_table == result_table.to_string())\n assert(str(result_suggestions) == expected_suggestions)", "def query4():\n docs = db.taxi.aggregate([\n {\n '$group': {\n '_id': {'$hour': \"$pickup_datetime\"},\n 'avgFare': {'$avg': '$fare_amount'},\n 'avgDistance': {\n '$avg': {\n '$add': [\n {'$abs': {'$subtract': [\n '$pickup_latitude', '$dropoff_latitude']}},\n {'$abs': {'$subtract': [\n '$pickup_longitude', '$dropoff_longitude']}}\n ]\n }\n },\n 'count': {'$sum': 1}\n }\n },\n {\n '$sort': {'avgFare': -1}\n }\n ])\n result = [doc for doc in docs]\n return result", "def calculate_gdp_per_capita():\n pass", "def provide_GSEE_fields(data):\n diff_frac = 1 - (data[\"direct_radiation\"] / data[\"global_horizontal\"]).fillna(0)\n # division by zero leads to lots of nans\n diff_frac = diff_frac.where(diff_frac > 0, 0)\n diff_frac = diff_frac.where(diff_frac < 1, 0)\n # remove inf and -inf that sometimes apeear when direct_radiation approximately zero\n data[\"diffuse_fraction\"] = diff_frac\n data = data.drop(\"direct_radiation\")\n return data", "def test_country_gender_equal(c1, years = [2020]):\n # fetch data\n df = data()\n # filter year\n df = df[df.year.isin(years)]\n # join population\n df = df\\\n .merge(pops, on=['region','sex','age_start','age_end','age'], suffixes=('','_2'))\n df.deaths = df.deaths / df.population\n # filter country data\n df1 = df[(df.region == c1) & (df.sex == 'F')]\n df2 = df[(df.region == c1) & (df.sex == 'M')]\n # country statistics\n n1 = df1.deaths.sum()\n n2 = df2.deaths.sum()\n x1 = (df1.age_end + df1.age_start) / 2\n x2 = (df2.age_end + df2.age_start) / 2\n mu1 = (df1.deaths @ x1) / n1\n mu2 = (df2.deaths @ x2) / n2\n var1 = (df1.deaths @ (x1 - mu1)**2) / n1\n var2 = (df2.deaths @ (x2 - mu2)**2) / n2\n var_pooled = ((n1-1)*var1 + (n2-1)*var2) / (n1+n2-2)\n # f test\n if var1 > var2:\n f_df1,f_df2 = n1-1,n2-1\n fstat = var1 / var2\n else:\n f_df1,f_df2 = n2-1,n1-1\n fstat = var2 / var1\n fpi = 1 - f.cdf(fstat, f_df1, f_df2) # f test pi value\n # t test\n if fpi > .05:\n tstat = (mu1 - mu2) / math.sqrt(var_pooled * (n1+n2)/n1/n2)\n t_df = n1 + n2 - 2\n else:\n tstat = (mu1 - mu2) / math.sqrt(var1 / n1 + var2 / n2)\n t_df = (var1**2/n1 + var2**2/n2)**2 / ((var1/n1)**2/(n1-1) + (var2/n2)**2/(n2-1))\n tpi = 1 - t.cdf(tstat, t_df) # t test pi value\n return {\n 'country': c1,\n 'f_pi': fpi,\n 'f_accept': 'Y' if fpi > .05 else 'N',\n 't_pi': tpi,\n 't_accept': 'Y' if tpi > .05 else 'N'\n }", "def convert_to_rates(df):\n pops = qry.get_pops(both_sexes=True)\n df = df.merge(pops, how = 'inner')#how='left')\n assert df.mean_pop.notnull().values.all(), 'pop merge failed'\n id_cols = dw.EPI_CHILD_OVRWGT_GROUP_COLS\n draws = [col for col in df.columns if 'draw_' in col]\n df = pd.concat([\n df[id_cols],\n df[draws].apply(lambda x: x / df['mean_pop'])\n ], axis=1\n )\n df['metric_id'] = 3\n return df", "def test_calculate_combined():\n disease = '206200'\n drug = 'DB00136'\n\n # di_feat_col ='HPO-SIM'\n # dr_feat_col ='SE-SIM'\n #diseaseDF= disease_df[di_feat_col]\n #drugDF = drug_df[dr_feat_col]\n\n drugDF= pd.DataFrame.from_dict({'DB00136': {'DB00136': 1.0, 'DB00286': 0.13522012578616352},\n 'DB00286': {'DB00136': 0.13522012578616352, 'DB00286': 1.0}})\n\n data_dis = {'208085': {'208085': 1.0, '206200': 0.3738388048970476, '156000': 0.27540399660290193},\n '206200': {'208085': 0.3738388048970476, '206200': 1.0, '156000': 0.19287170205206816},\n '156000': {'208085': 0.27540399660290193, '206200': 0.19287170205206816,'156000': 1.0}}\n diseaseDF= pd.DataFrame.from_dict(data_dis, orient='index')\n\n\n knownDrugDisease = np.array([['DB00136','208085'],['DB00286','206200'],['DB00286','156000']])\n x1 = geometricMean(drug, disease, knownDrugDisease, drugDF, diseaseDF)\n print(x1,np.sqrt(0.373839))\n assert( np.isclose(x1,np.sqrt(0.373839), rtol=1e-05, atol=1e-08, equal_nan=False))\n\n disease = '206200'\n drug = 'DB00286'\n x2 = geometricMean(drug, disease, knownDrugDisease, drugDF, diseaseDF)\n print(x2, np.sqrt(0.192872))\n assert( np.isclose(x2, np.sqrt(0.192872), rtol=1e-05, atol=1e-08, equal_nan=False))", "def get_best_shock_models(gbd_round):\r\n sql_statement = \"\"\" SELECT\r\n gr.gbd_round,\r\n mv.model_version_id,\r\n mv.cause_id,\r\n mv.sex_id,\r\n mv.model_version_type_id,\r\n mv.is_best\r\n FROM\r\n cod.shock_version sv\r\n JOIN\r\n cod.shock_version_model_version svmv USING (shock_version_id)\r\n JOIN\r\n cod.model_version mv USING (model_version_id)\r\n JOIN\r\n \tshared.gbd_round gr USING (gbd_round_id)\r\n WHERE\r\n shock_version_status_id = 1 AND\r\n gbd_round = {gbd_round};\r\n \"\"\".format(gbd_round=gbd_round)\r\n result_df = run_query(sql_statement)\r\n return result_df", "def buildingDist(edge):\n p0 = edge[0]\n p1 = edge[1]\n point0 = Point(p0)\n point1 = Point(p1)\n wkt0 = point0.wkt\n wkt1 = point1.wkt\n cur.execute(\"select bid from buildings \\\n where st_contains(geom, st_geomfromtext('%s',27700))\" % wkt0)\n bid0 = cur.fetchall()[0][0]\n\n cur.execute(\"select bid from buildings \\\n where st_contains(geom, st_geomfromtext('%s',27700))\" % wkt1)\n bid1 = cur.fetchall()[0][0]\n\n cur.execute(\"select st_distance(b1.geom, b2.geom) \\\n from buildings as b1, buildings as b2 \\\n where b1.bid=%d and b2.bid=%d\" % (bid0, bid1))\n return cur.fetchall()[0][0]", "def get_age_grid(df, grdx, grdy, cfg=CFG):\n df = df.sort_values(\"age\", ascending=False) # sort oldest to youngest\n ny, nx = grdy.shape[0], grdx.shape[1]\n grdlat, grdlon = xy2latlon(grdx, grdy, cfg.rad_moon)\n age_grid = np.ones((nx, ny), dtype=cfg.dtype) * cfg.timestart\n for i, row in df.iterrows():\n lon, lat, rad, age, basin = row[[\"lon\", \"lat\", \"rad\", \"age\", \"isbasin\"]]\n grd_dist = gc_dist(lon, lat, grdlon, grdlat)\n ej_thresh = cfg.basin_ej_threshold if basin else cfg.ej_threshold\n ejmask = grd_dist < rad * ej_thresh # Mask ejecta blanket\n age_grid = np.where(ejmask, age, age_grid) # Update age in ejecta\n return age_grid", "def get_team_model_performance(x, model_id, home_team=True):\n team_id = x['home_id'] if home_team else x['away_id']\n date = x['date']\n season = x['season']\n perf = run_query(query=\"\"\"select avg(correct) from historic_predictions where \n model_id = '{}' and season = '{}' and home_id = {} \n or away_id = {} and date < '{}'\"\"\".format(\n model_id, season, team_id, team_id, date))\n return perf.iloc[0, 0]", "def diabetes_rate():\n\n # Use Pandas to perform the sql query\n diabetes_stmt = db.session.query(Combined).statement\n diabetes_df = pd.read_sql_query(diabetes_stmt, db.session.bind)\n diabetes_df = diabetes_df[\n [\"State\",\n \"State_2\",\n \"Diabetes_Rate_2018\",\n \"Population_2019\",\n \"1st\",\n \"2nd\",\n \"3rd\"]]\n\n diabetes_df[\"Population_with_Diabetes\"] = diabetes_df[\"Population_2019\"] * (diabetes_df[\"Diabetes_Rate_2018\"] / 100)\n\n return diabetes_df.to_json()", "def retrieve_player_stats(player1,player2,date,r,sur,year):\n\t#COMMON OPPONENTS APPROACH\n\t#print(\"Retrieving data about {} with respect to {} for matches before {}...\".format(player1,player2,date))\n\t\n\t#TIME DISCOUNTING\n\t#we try to give higher weight to most recent matches\n\t#to do so, we select the rows of interest AND the difference (in years) from the present date which will serve as weight\n\n\t####\n\t#games played by player1 in the most recent 5 years\n\tg1=df[((df[\"winner_name\"]==player1) | (df[\"loser_name\"]==player1)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\n\tow=list(g1.loc[(g1.winner_name==player1, 'loser_name')].values[:])\n\tol=list(g1.loc[(g1.loser_name==player1, 'winner_name') ].values[:])\n\to1=set(ow+ol) #player 1 opponents\n\n\t#games played by player2\n\tg2=df[((df[\"winner_name\"]==player2) | (df[\"loser_name\"]==player2)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\n\tow=list(g2.loc[(df.winner_name==player2, 'loser_name')].values[:])\n\tol=list(g2.loc[(df.loser_name==player2, 'winner_name') ].values[:])\n\to2=set(ow+ol) #player 2 opponents\n\n\t#list of common opponents \n\tco=[x for x in o1 if x in o2]\n\t#print(\"Common opponents in the last 5 years:\")\n\t#print(co)\n\n\tcolumn_names=[\"fs\",\"w1sp\",\"w2sp\",\"wsp\",\"wrp\",\"tpw\",\"aces\",\"df\",\"bpc\",\"bps\",\"bpo\",\"bpw\",\"tmw\",\"data_amount\",\"opponent\",]\n\taverages=pd.DataFrame(columns=column_names) #df to be filled with one row per opponent\n\t\n\tif len(co)>=5:\n\t\t\n\t\tcount=0\n\t\t#now evaluate average statistics of player1 wrt to each common opponent, then we'll do the average\n\t\tfor o in co:\n\t\t\t#print(\"Matches of {} vs {}...\".format(player1,o))\n\t\t\ttot_w=0\n\t\t\ttot_l=0\n\n\t\t\t#select matches of player 1 vs opponent o\n\t\t\tm=df[((((df[\"winner_name\"]==player1) & (df[\"loser_name\"]==o))) | ((df[\"winner_name\"]==o) & (df[\"loser_name\"]==player1))) & \\\n\t\t\t((df[\"tourney_date\"]<date) | ((df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\t\tif m.shape[0] > 0:\n\t\t\t\t#we have min 2 past matches against opponent o\n\t\t\t\t#won matches\n\t\t\t\tw=m[m[\"winner_name\"]==player1].loc[:,['w_fs', 'w_w1s', 'w_w2s', 'w_wsp', 'w_wrp', 'w_tpw', 'w_apg', 'w_dfpg', 'w_bppg', 'w_bps', 'l_bppg', 'l_bps', 'loser_name',\\\n\t\t\t\t'tourney_date','surface']].rename(columns={'w_fs':'fs','w_w1s':'w1s','w_w2s':'w2s','w_wsp':'wsp','w_wrp':'wrp','w_tpw':'tpw','w_apg':'apg','w_dfpg':'dfpg','w_bppg':'bppg',\\\n\t\t\t\t'w_bps':'bps','l_bppg':'bpo','l_bps':'l_bps','loser_name':'opponent', 'tourney_date':'date','surface':'s'})\n\t\t\t\tif w.shape[0]>0:\n\t\t\t\t\tw[\"bpc\"]=w.apply(lambda row: 1-row[\"l_bps\"],axis=1)\n\t\t\t\t\t#set year difference param.\n\t\t\t\t\tw[\"year_diff\"]=w.apply(lambda row: int(date.year-row[\"date\"].year), axis=1)\n\n\t\t\t\t\ttot_w=w.shape[0]\n\t\t\t\tw=w.drop(\"l_bps\", axis=1)\n\n\t\t\t\t#lost matches\n\t\t\t\tl=m[m[\"loser_name\"]==player1].loc[:,['l_fs', 'l_w1s', 'l_w2s', 'l_wsp', 'l_wrp', 'l_tpw', 'l_apg', 'l_dfpg', 'l_bppg', 'l_bps', 'w_bppg', 'w_bps', 'winner_name',\\\n\t\t\t\t'tourney_date','surface']].rename(columns={'l_fs':'fs','l_w1s':'w1s','l_w2s':'w2s','l_wsp':'wsp','l_wrp':'wrp','l_tpw':'tpw','l_apg':'apg','l_dfpg':'dfpg','l_bppg':'bppg',\\\n\t\t\t\t'l_bps':'bps','w_bppg':'bpo','w_bps':'w_bps','winner_name':'opponent','tourney_date':'date','surface':'s'})\n\t\t\t\tif l.shape[0]>0:\n\t\t\t\t\tl[\"bpc\"]=l.apply(lambda row: 1-row[\"w_bps\"],axis=1)\n\t\t\t\t\t\n\t\t\t\t\tl[\"year_diff\"]=l.apply(lambda row: int(date.year-row[\"date\"].year), axis=1)\n\n\t\t\t\t\ttot_l=l.shape[0]\n\t\t\t\t\t\n\t\t\t\tl=l.drop(\"w_bps\", axis=1)\n\n\t\t\t\t#join the two datframes, so that we have all the matches\n\t\t\t\tj = pd.concat([w, l],sort=False)\n\t\t\t\t#weight for surface\n\t\t\t\tj[\"s_ref\"]=j.apply(lambda row: sur,axis=1) #reference surface of match under study\n\t\t\t\tj[\"s_w\"]=j.apply(surface_weighting,axis=1) #surface weight of each previous match\n\t\t\t\tj=j.drop(\"s\", axis=1) #not useful anymore\n\n\t\t\t\t#assign weight which decreases as year_diff is higher\n\t\t\t\tj[\"discounting\"]=j.apply(time_discount,axis=1)\n\t\t\t\t#further multiply time weights by surface weights\n\t\t\t\tj[\"discounting\"]=j.apply(lambda row: row[\"discounting\"]*row[\"s_w\"],axis=1)\n\t\t\t\tj=j.drop(\"s_ref\", axis=1)\n\t\t\t\tj=j.drop(\"s_w\", axis=1)\n\t\t\t\tj=j.drop(\"year_diff\", axis=1)\n\n\t\t\t\t#print(j)\n\t\t\t\ttot_weights=j[\"discounting\"].sum()\n\t\t\t\t#normalize weights to sum to 1\n\t\t\t\tj[\"discounting\"]=j.apply(lambda row: row[\"discounting\"]/j[\"discounting\"].sum(),axis=1)\n\t\t\t\t#print(j)\n\t\t\t\t#weight all the matches for the discounting param\n\t\t\t\t#hence, multiply columns 0-11 for column \"discounting\"\n\t\t\t\tj.update(j.iloc[:, 0:11].mul(j.discounting, 0))\n\t\t\t\tj[\"bpc\"]=j.apply(lambda row: row[\"bpc\"]*row[\"discounting\"],axis=1)\n\t\t\t\t#now to have the weghted average of each stat, sum all the column\n\t\t\t\tavg=list(j.sum(axis=0,numeric_only=True)[0:12])\n\t\t\t\tavg.append(tot_w/(tot_w+tot_l)) #append % of matches won against o\n\t\t\t\t#UNCERTAINTY\n\t\t\t\t#print(\"Uncertainty: 1/{}\".format(tot_weights))\n\t\t\t\tavg.append(tot_weights) #add \"data amount\" CHANGED FROM BEFORE!!\n\t\t\t\tavg.append(o)\n\t \t\t\n\t \t\t#NOW we have data for past matches of player1 against common opponent o\n\t\t\t\t#add to dataframe, go to next one\n\t\t\t\taverages.loc[count]=avg\n\t\t\t\tcount+=1\n\n\t\t\t\t#print(j)\n\t\t\t\n\t\t\t\n\t#at the end of the loop, return the dataframe\n\t#in the outer function, compute general uncertainties with data of the two players combined, \n\t#then evaluate average statistics btw all the common opponents for each player - finally, build the ultimate feature vector\n\t#print(averages)\n\treturn averages" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should work for MySQL messages larger than 100k bytes.
def test_larger_than_100k(self): self.render_config_template( mysql_ports=[3306], mysql_send_response=True ) self.run_packetbeat(pcap="mysql_long.pcap", debug_selectors=["mysqldetailed"]) objs = self.read_output() assert len(objs) == 1 res = objs[0] assert res["mysql.num_rows"] == 400
[ "def check_message(message):\n return False if len(message) > 20000 else True", "def test_message_exactly_buffsize(self):\n buf_message = \"It's 16 bytes eh\"\n self.send_message(buf_message)\n actual_sent, actual_reply = self.process_log()\n expected_sent = self.sending_msg.format(buf_message)\n self.assertEqual(expected_sent, actual_sent)\n expected_reply = self.received_msg.format(buf_message)\n self.assertEqual(expected_reply, actual_reply)", "def check_size(msg):\n\n if len(msg) > TWEET_SIZE:\n return False\n return True", "def is_message_too_long(message):\n if len(message) > 1000:\n raise error.InputError(\"Message is too long.\")", "def max_message_size(self):\n return self.__message_size_helper(\"max_message_size\");", "def read_message_count(self) -> int:", "def max_message_size(self):\n max_message_size = self.member_type.max_message_size()\n if isinstance(self.length, str):\n # A list where all the elements are self.length and there will be max_message_size\n # occurences\n return [self.length] * max_message_size\n return max_message_size * self.length", "def receive_messages(self, batch_size):", "def test_max_msg_length_reached(self):\n msg = '#aaa 99 9999' # The last byte is not the expected tail\n for byte in msg[:-1]:\n self.assertTrue(self.system.parse(byte))\n with self.assertRaises(ValueError):\n self.system.parse(msg[-1])", "def chunk_message(self, msg):\n prev = 0\n while prev < len(msg):\n next = min(prev + self.maxMsgSize, len(msg))\n yield msg[prev:next]\n prev = next", "def readByteString(msg, sock, fixed_header_size=10):\n while True:\n full_msg = b''\n new_msg = True\n\n while True:\n msg = sock.recv(16)\n if new_msg:\n #print(\"new msg len:\",msg[:HEADERSIZE])\n msglen = int(msg[:HEADERSIZE])\n new_msg = False\n\n #print(f\"full message length: {msglen}\")\n\n full_msg += msg\n\n #print(len(full_msg))\n\n if len(full_msg)-HEADERSIZE == msglen:\n #print(\"full msg recvd\")\n #print(full_msg[HEADERSIZE:])\n #print(pickle.loads(full_msg[HEADERSIZE:]))\n new_msg = True\n full_msg = b\"\"\n\n obj = pickle.loads(full_msg)\n\n return obj", "def dbmessage(list=bool, file=\"string\", type=\"string\", monitor=bool):\n pass", "def _nextMessage( self ):\n # receive an update / skip\n try:\n msg = self.sock.recv(1024)\n except SocketError, se:\n # If there was no data on the socket\n # --> not a real error, else kill socket and start a new one\n if se.errno != 11:\n progress(\"Connection failed: \"+str(se))\n self.sock.close()\n self.sock = None\n return ''\n # Use previously buffered data\n msg = self.buf + msg\n # End of dictionary should be end of message\n f = msg.find(\"}\")\n if f<0:\n self.buf = msg\n return ''\n # Pull out the first dictionary\n self.buf = msg[f+1:]\n return msg[:f+1]", "def writeToMySQL(self, connection):\n pass # TODO -- write", "def mqtt_msg(self, msg_size):\r\n if msg_size < MQTT_MSG_MAX_SZ:\r\n self._msg_size_lim = msg_size", "def test_thread_field_too_long_causes_error(self):\n self.json_message[\"thread_id\"] = \"x\" * (MAX_THREAD_LEN + 1)\n expected_error = f\"Thread field length must not be greater than {MAX_THREAD_LEN}\"\n with self.app.app_context():\n g.user = User(self.json_message[\"msg_from\"], \"respondent\")\n with self.assertRaises(ValidationError) as e:\n _ = MessageSchema().load(self.json_message)\n\n self.assertEqual(e.exception.messages, {\"thread_id\": [expected_error]})", "def test_multibyte_delim():\n\n delim = b'\\r\\n'\n for with_delim in (True, False):\n if with_delim:\n cond_delim = b'\\r\\n'\n else:\n cond_delim = b''\n\n empty = b''\n small_one = b'1'\n big_two = b'2' * 2048\n for ms in (3, 5, 1024, None):\n x, y = socket.socketpair()\n bs = BufferedSocket(x)\n\n y.sendall(empty + delim)\n y.sendall(small_one + delim)\n y.sendall(big_two + delim)\n\n kwargs = {'maxsize': ms, 'with_delimiter': with_delim}\n assert bs.recv_until(delim, **kwargs) == empty + cond_delim\n assert bs.recv_until(delim, **kwargs) == small_one + cond_delim\n try:\n assert bs.recv_until(delim, **kwargs) == big_two + cond_delim\n except MessageTooLong:\n if ms is None:\n assert False, 'unexpected MessageTooLong'\n else:\n if ms is not None:\n assert False, 'expected MessageTooLong'\n\n return", "def test_017_queue_insert_metadata_size65536(self):\n url = self.cfg.base_url + '/queues/qtestqueue/metadata'\n doc = functionlib.get_custom_body({\"metadatasize\": 65536})\n result = http.put(url, self.header, doc)\n\n self.assertEqual(result.status_code, 204)\n\n result = http.get(url, self.header)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.json(), json.loads(doc))", "def chunk_messages(report):\n msg = \"\"\n for line in report.splitlines():\n msg += line + \"\\n\"\n if len(msg) > 3500:\n yield msg\n msg = \"\"\n yield msg" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to fetch the peers from a blockchain node
def fetch_peers(): get_chain_address = "{}/chain".format(CONNECTED_NODE_ADDRESS) response = requests.get(get_chain_address) if response.status_code == 200: content = [] chain = json.loads(response.content) for peer in chain["peers"]: content.append(peer) global peers peers = sorted(content)
[ "def get_peerlist():\n response = None\n for seed in SEED_NODES:\n url = \"http://%s/staeon/peerlist?top\" % seed\n print(url)\n try:\n response = requests.get(url).json()\n except (requests.exceptions.ConnectionError, ValueError) as exc:\n print(exc)\n continue\n break\n\n if not response:\n raise Exception(\"Can't get peerlist\")\n\n return response['peers']", "def get_peers(self):\n pass", "def test_get_peers_0(self):\n pass", "def get_blockchain_nodes():\n response = requests.get(API_SERVER + \"/ledger_nodes\", timeout=app.config[\"DEFAULT_API_TIMEOUT\"])\n\n print(API_SERVER + \"/ledger/nodes\")\n\n if response.status_code != 200:\n raise APIError(\"Failed to call the conductor service to get list of blockchain nodes. \" \\\n + \"Server responded with status \" + response.status_code)\n\n try:\n nodes = response.json()\n except:\n raise APIError(\"Failed to parse the JSON in the call to the conductor service \" \\\n + \"to retrieve list of running blockchain nodes.\")\n\n if \"status\" in nodes and nodes[\"status\"] != \"success\":\n raise APIError(\"Failed to call the conductor service to get list of blockchain nodes. \" \\\n + \"Server returned status '\" + nodes[\"status\"] + \"'.\" )\n\n return nodes", "def getpeers(self):\n row = self.row\n col = self.col\n peers = []\n peers.extend([(i,col) for i in range(0,9) if i!= row] )\n peers.extend([(row,j) for j in range(0,9) if j!=col])\n peers.extend(self.getBlockCells())\n return peers", "async def peers(self, address, uid):\n if address[0] in self._blacklist:\n # XXX: pretend everything is ok\n return [\n random.randint(0, 2 ** UID_LENGTH) for x in range(self._replication)\n ]\n # XXX: The code is riddled with unpack/pack calls because Peer\n # stores key/uid as integer and msgpack doesn't accept such\n # big integers hence it is required to pass them as bytes.\n uid = unpack(uid)\n log.debug(\"[%r] find peers uid=%r from %r\", self._uid, uid, address)\n # XXX: if this takes more than 5 seconds (see RPCProtocol) it\n # will timeout in the other side.\n uids = nearest(self._replication, self._peers.keys(), uid)\n out = [self._peers[x] for x in uids]\n return out", "def fetch_blockchain():\n get_chain_address = f\"{CONNECTED_NODE_ADDRESS}/chain\"\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n chain_meta = json.loads(response.content)\n chain_content = []\n # TODO\n for block in chain_meta[\"chain\"]:\n chain_content.append(block)\n\n global blocks_to_show\n blocks_to_show = sorted(chain_content, key=lambda block: block['timestamp'],reverse=True)", "async def jsonrpc_peer_list(self, blob_hash, page=None, page_size=None):\n\n if not is_valid_blobhash(blob_hash):\n # TODO: use error from lbry.error\n raise Exception(\"invalid blob hash\")\n peer_q = asyncio.Queue(loop=self.component_manager.loop)\n if self.component_manager.has_component(TRACKER_ANNOUNCER_COMPONENT):\n tracker = self.component_manager.get_component(TRACKER_ANNOUNCER_COMPONENT)\n tracker_peers = await tracker.get_kademlia_peer_list(bytes.fromhex(blob_hash))\n log.info(\"Found %d peers for %s from trackers.\", len(tracker_peers), blob_hash[:8])\n peer_q.put_nowait(tracker_peers)\n elif not self.component_manager.has_component(DHT_COMPONENT):\n raise Exception(\"Peer list needs, at least, either a DHT component or a Tracker component for discovery.\")\n peers = []\n if self.component_manager.has_component(DHT_COMPONENT):\n await self.dht_node._peers_for_value_producer(blob_hash, peer_q)\n while not peer_q.empty():\n peers.extend(peer_q.get_nowait())\n results = {\n (peer.address, peer.tcp_port): {\n \"node_id\": hexlify(peer.node_id).decode() if peer.node_id else None,\n \"address\": peer.address,\n \"udp_port\": peer.udp_port,\n \"tcp_port\": peer.tcp_port,\n }\n for peer in peers\n }\n return paginate_list(list(results.values()), page, page_size)", "def get_peers(meta_info, peer_id):\n peer_list = []\n info_hash = calc_info_hash(meta_info)\n for announce_list in meta_info['announce-list']:\n for announcer in announce_list:\n if not announcer.startswith(\"udp\"):\n response = query_announcer(announcer, info_hash, peer_id)\n\n peers = response['peers']\n if isinstance(peers, string_type):\n peers = binary_peer_extract(peers)\n\n peer_list.extend(peers)\n return peer_list", "def get_peers(self, **kwargs):\r\n # TODO: This should probably not be in admin. However listing peers does seems slightly administrative.\r\n _result = []\r\n # Filter out the unserializable web socket\r\n for _session in self.root.peers.values():\r\n _new_session = copy.copy(_session)\r\n _new_session[\"web_socket\"] = \"removed for serialization\"\r\n _new_session[\"queue\"] = \"removed for serialization\"\r\n _result.append(_new_session)\r\n\r\n write_to_log(_process_id=self.process_id, _category=EC_NOTIFICATION, _severity=SEV_DEBUG,\r\n _data=\"Returning a list of peers:\" + str(_result))\r\n return _result", "def test_peer_info(self):\n #clear the peers list. \n tracker.info_hash_to_peers.clear()\n\n #send first request from one peer\n send_test_params(TEST_DEFAULTS())\n\n #send second params with a different peer_id to get the first peer back\n params = TEST_DEFAULTS()\n params[b\"peer_id\"] = \"TESTPEERID2\"\n result = send_test_params(params)\n \n #check we got the first peer back\n self.assertTrue(len(result[b\"peers\"]) == 1)\n peer = result[b\"peers\"][0]\n self.assertTrue(peer)\n \n #check we got the right info back\n self.assertTrue(peer[b\"peer id\"] == b\"TESTPEERID\")\n self.assertTrue(peer[b\"ip\"] == b\"127.0.0.1\")\n self.assertTrue(peer[b\"port\"] == 8001)\n self.assertTrue(len(peer) == 3)", "def RefreshPeers(plcs):\n\n for plc in plcs:\n for peer in plcs:\n if peer == plc:\n continue\n\n print plc.config.PLC_NAME, \"refreshing\", peer.config.PLC_NAME\n plc.RefreshPeer(peer.config.PLC_NAME)\n\n peer_id = plc.GetPeers([peer.config.PLC_NAME])[0]['peer_id']\n\n peer_sites = todict(plc.GetSites({'peer_id': peer_id}), 'site_id')\n sites_at_peer = todict(peer.GetSites(), 'site_id')\n\n peer_keys = todict(plc.GetKeys({'peer_id': peer_id}), 'key_id')\n keys_at_peer = todict(peer.GetKeys(), 'key_id')\n\n peer_persons = todict(plc.GetPersons({'peer_id': peer_id}), 'person_id')\n persons_at_peer = todict(peer.GetPersons(), 'person_id')\n\n peer_nodes = todict(plc.GetNodes({'peer_id': peer_id}), 'node_id')\n nodes_at_peer = todict(peer.GetNodes(), 'node_id')\n\n our_nodes = todict(plc.GetNodes({'peer_id': None}), 'node_id')\n our_peer_id_at_peer = peer.GetPeers([plc.config.PLC_NAME])[0]['peer_id']\n our_nodes_at_peer = todict(peer.GetNodes({'peer_id': our_peer_id_at_peer,\n 'peer_node_id': our_nodes.keys()}), 'peer_node_id')\n\n peer_slices = todict(plc.GetSlices({'peer_id': peer_id}), 'peer_slice_id')\n slices_at_peer = todict(peer.GetSlices(), 'slice_id')\n \n for site_id, site in peer_sites.iteritems():\n # Verify that this site exists at the peer\n peer_site_id = site['peer_site_id']\n assert peer_site_id in sites_at_peer\n peer_site = sites_at_peer[peer_site_id]\n\n # And is the same\n for field in ['name', 'abbreviated_name', 'login_base', 'is_public',\n 'latitude', 'longitude', 'url',\n 'max_slices', 'max_slivers',]:\n assert site[field] == peer_site[field]\n\n for key_id, key in peer_keys.iteritems():\n # Verify that this key exists at the peer\n peer_key_id = key['peer_key_id']\n assert peer_key_id in keys_at_peer\n peer_key = keys_at_peer[peer_key_id]\n\n # And is the same\n for field in ['key_type', 'key']:\n assert key[field] == peer_key[field]\n\n for person_id, person in peer_persons.iteritems():\n # Verify that this user exists at the peer\n peer_person_id = person['peer_person_id']\n assert peer_person_id in persons_at_peer\n peer_person = persons_at_peer[peer_person_id]\n\n # And is the same\n for field in ['first_name', 'last_name', 'title', 'email', 'phone',\n 'url', 'bio', 'enabled']:\n assert person[field] == peer_person[field]\n\n for key_id in person['key_ids']:\n # Verify that the user is not associated with any local keys\n assert key_id in peer_keys\n key = peer_keys[key_id]\n peer_key_id = key['peer_key_id']\n\n # Verify that this key exists at the peer\n assert peer_key_id in keys_at_peer\n peer_key = keys_at_peer[peer_key_id]\n\n # And is related to the same user at the peer\n assert peer_key['key_id'] in peer_person['key_ids']\n\n for node_id, node in peer_nodes.iteritems():\n # Verify that this node exists at the peer\n peer_node_id = node['peer_node_id']\n assert peer_node_id in nodes_at_peer\n peer_node = nodes_at_peer[peer_node_id]\n\n # And is the same\n for field in ['boot_state', 'ssh_rsa_key', 'hostname',\n 'version', 'model']:\n assert node[field] == peer_node[field]\n\n # Verify that the node is not associated with any local sites\n assert node['site_id'] in peer_sites\n site = peer_sites[node['site_id']]\n\n # Verify that this site exists at the peer\n peer_site_id = site['peer_site_id']\n assert peer_site_id in sites_at_peer\n peer_site = sites_at_peer[peer_site_id]\n\n # And is related to the same node at the peer\n assert peer_site['site_id'] == peer_node['site_id']\n\n for slice_id, slice in peer_slices.iteritems():\n # Verify that this slice exists at the peer\n peer_slice_id = slice['peer_slice_id']\n assert peer_slice_id in slices_at_peer\n peer_slice = slices_at_peer[peer_slice_id]\n\n # And is the same\n for field in ['name', 'instantiation', 'url', 'description',\n 'max_nodes', 'expires']:\n assert slice[field] == peer_slice[field]\n\n for node_id in slice['node_ids']:\n # Verify that the slice is associated only with\n # the peer's own nodes, or with our nodes as\n # last cached by the peer.\n assert node_id in peer_nodes or node_id in our_nodes_at_peer\n if node_id in peer_nodes:\n node = peer_nodes[node_id]\n peer_node_id = node['peer_node_id']\n elif node_id in our_nodes_at_peer:\n peer_node = our_nodes_at_peer[node_id]\n peer_node_id = peer_node['node_id']\n\n # Verify that this node exists at the peer\n assert peer_node_id in nodes_at_peer\n\n # And is related to the same slice at the peer\n assert peer_node_id in peer_slice['node_ids']", "async def pubsub_peers(self, **kwargs):\n endpoint = 'pubsub/peers'\n args = []\n return await self.client.get_parsed(endpoint, args, kwargs)", "def find_peers_for_blob(self, blob_hash, timeout=None, filter_self=True):\n if blob_hash not in self.peers:\n self.peers[blob_hash] = [(self.dht_node.externalIP, self.dht_node.peerPort)]\n bin_hash = binascii.unhexlify(blob_hash)\n finished_deferred = self.dht_node.iterativeFindValue(bin_hash, exclude=self.peers[blob_hash])\n timeout = timeout or conf.settings['peer_search_timeout']\n if timeout:\n finished_deferred.addTimeout(timeout, self.dht_node.clock)\n try:\n peer_list = yield finished_deferred\n except defer.TimeoutError:\n log.debug(\"DHT timed out while looking peers for blob %s after %s seconds\",\n blob_hash, timeout)\n peer_list = []\n\n peers = set(peer_list)\n results = []\n for node_id, host, port in peers:\n if filter_self and (host, port) == (self.dht_node.externalIP, self.dht_node.peerPort):\n continue\n results.append(self.peer_manager.get_peer(host, port))\n self.peers[blob_hash].append((host, port))\n defer.returnValue(results)", "async def bitswap_ledger(self, peer, **kwargs):\n endpoint = 'bitswap/ledger'\n args = [(peer, 'string')]\n return await self.client.get_parsed(endpoint, args, kwargs)", "def make_peers(cell, n):\n\n row, col = cell\n side_len = n**2 + 1\n\n peers = ({(row, x) for x in range(1, side_len)} |\n {(x, col) for x in range(1, side_len)} |\n get_block_indices(cell, n))\n\n return peers - set([cell])", "def getPeers(self):\n maxdist = float(self.config.scanning.max_active_distance)\n peersquery = self.config.owner.queryRelated(self.session)\n return peersquery.filter(SocialRelationship.pdistance <= maxdist)", "def test_numpeers(self):\n #clear the peers list. \n tracker.info_hash_to_peers.clear()\n #add 49 peers\n for i in range(49):\n params = TEST_DEFAULTS()\n params[\"peer_id\"] += str(i)\n send_test_params(params)\n\n #send without a started event\n params = TEST_DEFAULTS()\n del params[\"event\"]\n result = send_test_params(params)\n\n #check we got 49 peers back\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 49)\n\n #add another peer\n params = TEST_DEFAULTS()\n params[\"peer_id\"] += str(50)\n send_test_params(params)\n\n #send without a started event\n params = TEST_DEFAULTS()\n del params[\"event\"]\n result = send_test_params(params)\n\n #check we got 50 peers back\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 50)\n\n #set numwant to 25, & check we get 25 peers back\n params = TEST_DEFAULTS()\n params[\"numwant\"] = 25\n del params[\"event\"]\n result = send_test_params(params)\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 25)", "def get_folder_peers(self,secret,throw_exceptions=True):\r\n\t\tparams = { 'method': 'get_folder_peers', 'secret' : secret }\r\n\t\treturn self._request(params,throw_exceptions)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Endpoint to send the last block mined to peers
def submit_block(): for peer in peers: if peer == host_address: continue url = "http://{}:8000/add_block".format(peer) headers = {'Content-Type': "application/json"} requests.post(url, data=json.dumps(blocks[0], sort_keys=True), headers=headers) return redirect('/')
[ "def listen_broadcast(self):\n self.send_response(200)\n self.end_headers()\n\n rec_params = urlparse.parse_qs(urlparse.urlparse(self.path).query)\n block_params = {\"index\": int(rec_params[\"index\"][0]),\n \"data\": rec_params[\"data\"][0],\n \"parent_hash\": rec_params[\"parent_hash\"][0],\n \"hash_val\": rec_params[\"hash_val\"][0],\n \"nonce\": int(rec_params[\"nonce\"][0]),\n \"timestamp\": float(rec_params[\"timestamp\"][0])}\n\n if self.dishonest:\n # ignore everything besides the genesis block\n if block_params[\"index\"] != 0:\n return\n\n rec_block = Block(from_dict=block_params)\n print(f\"received block on {self.ip}:{self.port}: {rec_block}\")\n self.network.add_block(rec_block)", "def update_last_block(self, last_block):\n pass", "def view_last_block():\n response = {\n 'chain': [blockchain_db_manager.get_last_block()],\n 'length': 1,\n 'header': 'Last Block'\n }\n return render_template('chain.html', data=response)", "def update_last_block(self, last_block):\n self.client.index(\n self.index, \n 'status', \n {'value': last_block}, \n id='height_all_tsx'\n )", "def broadcast(self, block):\n for node in self.network.directory:\n print(f\"broadcasted block to {node['ip']}:{node['port']}\")\n r = requests.head((\"http://\" + node[\"ip\"] + \":\" + str(node[\"port\"])\n + \"/broadcast\"),\n params={\"data\": block.data,\n \"hash_val\": block.hash_val,\n \"index\": block.index,\n \"nonce\": block.nonce,\n \"parent_hash\": block.parent_hash,\n \"timestamp\": block.timestamp})", "def find_last_block(self):\n pass", "def send_stream_tail(self):\n pass", "def download_started(peer, blocks_left):", "def last_block(self) -> Block:\r\n return self.chain[-1]", "def get_latest_block(self):\n return self.chain[-1]", "def subscribe_to_blocks(ws):\n ws.send('{\"op\":\"blocks_sub\"}')\n # ws.send('{\"op\":\"ping_block\"}')", "def recv(self):", "def mine_block():\n \n # Get details of previous block\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n \n # Find proof of the next block\n proof= blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n \n # Create new block\n block = blockchain.create_block(proof, previous_hash)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'],\n 'timestamp': block['timestamp'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash']}\n \n return jsonify(response), 200", "def BroadcastFinalTxn(self):\n\t\tboolVal , S = consistencyProtocol()\n\t\tif boolVal == False:\n\t\t\treturn S\n\t\tdata = {\"commitmentSet\" : S, \"signature\" : self.sign(S) , \"identity\" : self.identity , \"finalTxnBlock\" : self.finalBlock[\"finalBlock\"] , \"finalTxnBlock_signature\" : self.sign(self.finalBlock[\"finalBlock\"])}\n\t\tprint(\"finalblock-\" , self.finalBlock)\n\t\t# final Block sent to ntw\n\t\tself.finalBlock[\"sent\"] = True\n\t\tBroadcastTo_Network(data, \"finalTxnBlock\")\n\t\tif self.state != ELASTICO_STATES[\"FinalBlockReceived\"]:\n\t\t\tself.state = ELASTICO_STATES[\"FinalBlockSent\"]", "def __send_block(s_data, s_blocks, s_size, s_offset, noexpectphrase=True):\n j = s_offset\n for block in range(0, s_blocks):\n i = block * s_size + s_offset\n j = (block + 1) * s_size + s_offset\n if verbose:\n log.debug(\"{0}:{1}\".format(i, s_data[i:j]))\n if not noexpectphrase:\n self._uut_conn.send(s_data[i:j], expectphrase='.*', timeout=90, regex=True)\n else:\n self._uut_conn.send(s_data[i:j], expectphrase=None, timeout=120, idle_timeout=90, regex=True)\n time.sleep(0.10)\n return j", "def set_last_blocks(self):\n for coin in self.coins:\n number, block_hash = coin.get_last_block_number()\n self.database.insert_block(coin, number, block_hash)\n logger.info('%s: setting %s as last processed block', coin, number)", "def get_last_block(self):\r\n\r\n if len(self.chain) == 0:\r\n return None\r\n return self.chain[-1]", "def current_block_number(self):\n payload = {\"module\": \"proxy\",\n \"action\": \"eth_blockNumber\",\n \"apikey\": self.API_KEY}\n res_json = self.send_request(payload)\n return int(res_json['result'], 16)", "def SendtoFinal(self):\n\t\tfor finalId in self.finalCommitteeMembers:\n\t\t\t# here txn_block is a set\n\t\t\tdata = {\"txnBlock\" : self.txn_block , \"sign\" : self.sign(self.txn_block), \"identity\" : self.identity}\n\t\t\tmsg = {\"data\" : data, \"type\" : \"intraCommitteeBlock\" }\n\t\t\tfinalId.send(msg)\n\t\tself.state = ELASTICO_STATES[\"Intra Consensus Result Sent to Final\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function that hides all widgets except home and selected widget
def hide_all_but(self, widget=None): for i in reversed(range(1, self.layout.count())): item = self.layout.itemAt(i) if isinstance(item, QWidgetItem): item.widget().hide() # or # item.widget().setParent(None) if widget is not None: widget.show()
[ "def hide(self):\n for widget in self.widgets:\n widget.hide()\n if self.label is not None:\n self.label.hide()", "def hide_status(self):\r\n self.hide()\r\n self._searchWidget._checkSensitive.setCheckState(Qt.Unchecked)\r\n self._searchWidget._checkWholeWord.setCheckState(Qt.Unchecked)\r\n self._searchWidget.setVisible(False)\r\n self._replaceWidget.setVisible(False)\r\n self._fileSystemOpener.setVisible(False)\r\n main_container = IDE.get_service(\"main_container\")\r\n widget = None\r\n if main_container:\r\n widget = main_container.get_current_widget()\r\n if widget:\r\n widget.setFocus()", "def Hiding_Themes(self):\n self.groupBox.hide()", "def hide_all(self):\r\n tools_dock = IDE.get_service('tools_dock')\r\n toolbar = IDE.get_service('toolbar')\r\n if (self.lateralPanel.isVisible() or tools_dock.isVisible() or\r\n toolbar.isVisible()):\r\n if self.lateralPanel:\r\n self.lateralPanel.hide()\r\n if tools_dock:\r\n tools_dock.hide()\r\n if toolbar:\r\n toolbar.hide()\r\n else:\r\n if self.lateralPanel:\r\n self.lateralPanel.show()\r\n if tools_dock:\r\n tools_dock.show()\r\n if toolbar:\r\n toolbar.show()", "def hide(objects, allObjects=bool, returnHidden=bool, invertComponents=bool, clearSelection=bool, testVisibility=bool):\n pass", "def hide(self):\n self.is_visible = False", "def hide_popups(self): \n for popup in self.popups:\n popup.set_active(False)\n popup.set_visible(False)", "def _hideAllGuis(self):\n\t\tif self._hud != None:\n\t\t\tself._hud.hide()\n\t\tif self._mainmenu != None:\n\t\t\tself._mainmenu.hide()\n\t\tif self._pausemenu != None:\n\t\t\tself._pausemenu.hide()\n\t\tif self._loadingmenu != None:\n\t\t\tself._loadingmenu.hide()\n\t\tif self._settingsmenu != None:\n\t\t\tself._settingsmenu.hide()\n\t\tif self._aboutmenu != None:\n\t\t\tself._aboutmenu.hide()", "def hideAllPanes(self):\n\t\t\tall_panes = self._mgr.GetAllPanes()\t \n\t\t\tfor ii in xrange(len(all_panes)):\n\t\t\t\t\tif not all_panes[ii].IsToolbar():\n\t\t\t\t\t\t\tall_panes[ii].Hide()", "def hide_current_view(self):\n if self.current_view != DUMMY_NAME:\n _, options_frame, options_file_frame = self.get_plugin_by_tkname(\n self.current_view\n )\n if options_frame.has_options():\n options_frame.grid_forget()\n self.row -= 1\n options_file_frame.grid_forget()\n self.row -= 1\n self.btn_frame.grid_forget()\n self.row -= 1", "def HideGrid(self):\n self.hide_grid = True", "def hide_all_without_collection():\n # Retrieve objects linked to master collection\n objects = bpy.context.scene.collection.objects\n\n for obj in objects:\n obj.hide_render = True\n obj.hide_viewport = True", "def hidePluginMenus(self):\n for menuObject in self._pluginMenus:\n # hide all actions and deactivate their shortcuts\n menuObject.menuAction().setVisible(False)\n for action in menuObject.actions():\n if not hasattr(action,\"_wasVisible\") or action._wasVisible==None:\n action._wasVisible=action.isVisible()\n action.setVisible(False) # setVisible() here hides plugin menu forever on Mac OS X (10.5.7), Qt 4.5.", "def telector_hide():\n\n global labels_ui\n if labels_ui is not None:\n labels_ui.destroy()\n labels_ui = None\n ctx.tags = []", "def hide_item(self):\n self.tree_widget.currentItem().setHidden(True)", "def hide(self):\n self.root.withdraw()", "def remove_dash_box(self, win):\n for key, element in win.AllKeysDict.items(): # all elements with key\n if not isinstance(element,\n (sg.InputText, sg.InputCombo, sg.InputOptionMenu)):\n element.Widget.configure(takefocus=0) # no keyboard focus", "def showHidden(objects, allObjects=bool, below=bool, lastHidden=bool, above=bool):\n pass", "def hide(self):\n self.withdraw()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks if the ball is out of bounds
def out_of_bounds(): return ball.y > 600 or (ball.touches(hoop) and ball.speedy < 0)
[ "def ball_is_further_in(self):\n return ((self.ball_pos.y >= 0) and (self.pos.y > self.ball_pos.y)\n or (self.ball_pos.y < 0 and self.pos.y < self.ball_pos.y))", "def is_out_of_bounds(self, agent):\n x = agent.x\n y = agent.y\n\n if x < 0 or x >= self.width:\n return True\n if y < 0 or y >= self.height:\n return True\n return False", "def outside_arena():\r\n return not (0 < node.x < bounds[0] and 0 < node.y < bounds[1])", "def boundary_hit(self, x, y, width, height):\n if x < 0 or x >= width or y < 0 or y >= height:\n self.game_close = True", "def boundary_check(self):\r\n if self.position[0] < 0:\r\n self.velocity[0] *= -.8\r\n self.position[0] = 0\r\n elif self.position[0] > size[0] - self.offset:\r\n self.velocity[0] *= -.8\r\n self.position[0] = size[0] - self.offset\r\n if self.position[1] < 0:\r\n self.velocity[1] *= -.8\r\n self.position[1] = 0\r\n elif self.position[1] > size[1] - self.offset:\r\n self.velocity[1] *= -.8\r\n self.position[1] = size[1] - self.offset", "def out_of_bounds(position, bounds):\n return (position[0] < 0 or position[0] >= bounds[0] \n or position[1] < 0 or position[1] >= bounds[1])", "def collides(self, paddle: Paddle) -> bool:\n x_ball = self.xcor()\n if abs(x_ball - paddle.xcor()) < 12:\n y_ball = self.ycor()\n if y_ball < paddle.top and y_ball > paddle.bottom:\n if x_ball < 0 and x_ball >= paddle.xcor():\n return True\n elif x_ball > 0 and x_ball <= paddle.xcor():\n return True\n return False", "def outside_window(self):\n if self.ball.y >= self.window.height:\n return True", "def checkHitBall(ball, pad1, pad2, ballDirX):\r\n if ballDirX == -1 and pad1.right >= ball.left and pad1.top <= ball.top and pad1.bottom >= ball.bottom:\r\n return -1\r\n # ballDir = 1 -> pad2 may hit the ball\r\n # ball is hit when pad1.left == ball.right\r\n elif ballDirX == 1 and pad2.left <= ball.right and pad2.top <= ball.top and pad2.bottom >= ball.bottom:\r\n return -1\r\n else:\r\n return 1", "def check_bottom(ball, stats):\n if ball.rect.bottom >= ball.screen_rect.height:\n stats.balls_left -= 1\n ball.to_top()", "def ball_going_quickly(self):\n velocity_threshold = 10\n ball_velocity = self.world.get_ball().velocity.length()\n return ball_velocity > velocity_threshold", "def collision_and_bounce(self):\n ball_upperleft = self.window.get_object_at(self.ball.x, self.ball.y)\n ball_upperright = self.window.get_object_at(self.ball.x + 2*BALL_RADIUS, self.ball.y)\n ball_lowerleft = self.window.get_object_at(self.ball.x ,self.ball.y+2*BALL_RADIUS)\n ball_lowerright = self.window.get_object_at(self.ball.x + 2*BALL_RADIUS,self.ball.y+2*BALL_RADIUS)\n\n if ball_upperleft is not None:\n if ball_upperleft is not self.paddle:\n self.__dy *= -1\n self.window.remove(ball_upperleft)\n self.how_many_bricks -= 1\n print(self.how_many_bricks)\n if ball_upperleft is self.paddle:\n self.__dy = -INITIAL_Y_SPEED\n\n elif ball_upperright is not None:\n if ball_upperright is not self.paddle:\n self.__dy *= -1\n self.window.remove(ball_upperright)\n self.how_many_bricks -= 1\n print(self.how_many_bricks)\n if ball_upperright is self.paddle.x:\n self.__dy = -INITIAL_Y_SPEED\n\n elif ball_lowerleft is not None:\n if ball_lowerleft is not self.paddle:\n self.__dy *= -1\n self.window.remove(ball_lowerleft)\n self.how_many_bricks -= 1\n print(self.how_many_bricks)\n if ball_lowerleft is self.paddle:\n self.__dy = -INITIAL_Y_SPEED\n\n elif ball_lowerright is not None:\n if ball_lowerright is not self.paddle:\n self.__dy *= -1\n self.window.remove(ball_lowerright)\n self.how_many_bricks -= 1\n print(self.how_many_bricks)\n if ball_lowerright is self.paddle:\n self.__dy = -INITIAL_Y_SPEED", "def CheckBounds(self, ):\n ...", "def ball_in_area(self):\n ball_in_y = self.ball.y < self.window.height - self.ball.height\n return ball_in_y", "def ball_collisions(self):\n up_l_corner = self.window.get_object_at(self.ball.x, self.ball.y)\n up_r_corner = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y)\n down_l_corner = self.window.get_object_at(self.ball.x, self.ball.y + self.ball.height)\n down_r_corner = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y + self.ball.height)\n\n # The situation that the ball hits the paddle.\n if down_l_corner == self.paddle:\n self.__dy = self.reverse_dy\n elif down_r_corner == self.paddle:\n self.__dy = self.reverse_dy\n\n # The situation that the ball hits bricks and remove them.\n if up_l_corner is not None and up_l_corner is not self.paddle and up_l_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(up_l_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif up_r_corner is not None and up_r_corner is not self.paddle and up_r_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(up_r_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif down_l_corner is not None and down_l_corner is not self.paddle and down_l_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(down_l_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif down_r_corner is not None and down_r_corner is not self.paddle and down_r_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(down_r_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)", "def collide_paddle(self):\n # just check the bottom side of the ball\n if self.obj3() == self.paddle or self.obj4() == self.paddle:\n return True", "def _maybeBounce(self):\r\n\t\tbounce = False\r\n\t\tif not self._inBoundsHorizontally():\r\n\t\t\t# Bounce off a left or right wall\r\n\t\t\tself.xVelocity *= -1.0\r\n\t\t\tbounce = True\r\n\t\t\t#self.bouncecount = self.bouncecount + 1\r\n\t\tif not self._inBoundsVertically():\r\n\t\t\t# Bounce of the floor or ceiling\r\n\t\t\tself.yVelocity *= -1.0\r\n\t\t\tbounce = True\r\n\t\t\t#self.bouncecount = self.bouncecount + 1\r\n\t\tif bounce:\r\n\t\t\t# Change color if there was a bounce\r\n\t\t\tself.bouncecount = self.bouncecount + 1\r\n\t\t\tself._nextColor()", "def check_bounds(self, next_rect):\r\n if next_rect == self.apple:\r\n # print('Body')\r\n return 1\r\n\r\n if next_rect[0] < 0 or next_rect[1] < 0 or next_rect[0] >= 35 or next_rect[1] >= 30:\r\n return -1\r\n\r\n if next_rect in self.body:\r\n return -2\r\n\r\n return 0", "def step_ball(self):\n s = self.s\n\n if s[S.BALL_VX] > 0.:\n tt_x = (c.RIGHT - s[S.BALL_X]) / s[S.BALL_VX]\n elif s[S.BALL_VX] < 0.:\n tt_x = (c.LEFT - s[S.BALL_X]) / s[S.BALL_VX]\n else:\n tt_x = np.inf\n \n if s[S.BALL_VY] > 0.:\n tt_y = (c.TOP - s[S.BALL_Y]) / s[S.BALL_VY]\n elif s[S.BALL_VY] < 0.:\n tt_y = (c.BOTTOM - s[S.BALL_Y]) / s[S.BALL_VY]\n else:\n tt_y = np.inf\n\n if (tt_x > 1.) and (tt_y > 1.): # no collision\n self.advance_ball(1.)\n\n elif tt_x <= tt_y <= 1.: # collision on X then on Y\n self.advance_ball(tt_x)\n self.hit_x()\n self.advance_ball(tt_y - tt_x)\n self.hit_y()\n self.advance_ball(1. - tt_y)\n\n elif tt_y < tt_x <= 1.: # collision on Y then on X\n self.advance_ball(tt_y)\n self.hit_y()\n self.advance_ball(tt_x - tt_y)\n self.hit_x()\n self.advance_ball(1. - tt_x)\n\n elif tt_x <= 1.: # collision on X\n self.advance_ball(tt_x)\n self.hit_x()\n self.advance_ball(1. - tt_x)\n\n elif tt_y <= 1.: # collision on Y\n self.advance_ball(tt_y)\n self.hit_y()\n self.advance_ball(1. - tt_y)\n\n else: # ???\n raise RuntimeError(\"Weird\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load lines from find.txt to search_dict
def load_db(): if not os.path.isfile("find.txt"): f = open("find.txt", "w") f.write("#test,yano,foobar\n") f.close() search_file = open("find.txt", "r") lines = search_file.readlines() search_file.close() search_dict = dict() for line in lines: result = None a = line.replace(r'\n', '') new = a.split(r',') if len(new) < 2: continue if new[0] not in search_dict: search_dict[new[0]] = dict() if new[1] not in search_dict[new[0]]: search_dict[new[0]][new[1]] = list() if len(new) > 3: result = ",".join(new[2:]) result = result.replace('\n','') elif len(new) == 3: result = new[-1] if len(result) > 0: result = result[:-1] if result: result = (result).decode('utf-8') search_dict[new[0]][new[1]].append(result) return search_dict
[ "def _search_input(self):\n try:\n for line in fileinput.input(files=self.parsed_elements.args.files_names, mode='r'):\n res = self.regex.search_regex_in_data(line)\n for item in res:\n self.results.append({'file_name': fileinput.filename(), 'no_line': fileinput.filelineno(),\n 'start_pos': item.start(), 'matched_text': item.group(), 'line': line.rstrip()})\n except Exception, err:\n raise Exception('OOPS! search in data failed: {0}'.format(err))", "def load_runs(fn):\n runs = defaultdict(dict)\n with open(fn, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n qid, _, docid, _, score, _ = line.strip().split()\n runs[qid][docid] = float(score)\n return runs", "def parse_vsearch(file, strand=\"+\"):\n sdict = {}\n with open(file, 'r') as f:\n for line in f:\n ll = line.strip().split()\n qname = ll[0]\n tname = ll[1]\n pmatch = float(ll[4])\n tcov = float(ll[5])\n gaps = int(ll[8])\n spos = int(ll[12])\n epos = int(ll[13])\n qstrand = ll[14]\n tstrand = ll[15]\n if qstrand == strand and tcov >= 50:\n if gaps < 5:\n td = {'spos': spos, 'epos': epos, 'pmatch': pmatch, 'tcov': tcov,\n 'gaps': gaps, 'qstrand': qstrand, 'tstrand': tstrand}\n if qname in sdict:\n if tname in sdict[qname]:\n if sdict[qname][tname]['pmatch'] < pmatch:\n sdict[qname][tname] = td\n else:\n sdict[qname][tname] = td\n else:\n sdict[qname] = {}\n sdict[qname][tname] = td\n return sdict", "def search(self, line):\n pass", "def parse_reference_file(self, reference_file, encoding):\n\n ref_file = codecs.open(reference_file, \"r\", encoding)\n lines = ref_file.read().split(\"\\n\")\n ref_dic = {}\n\n ref_file.close()\n\n for line in lines:\n if not line == \"\":\n filename_and_keyphrases = line.split(\"\\t\")\n filename = filename_and_keyphrases[0]\n keyphrases = filename_and_keyphrases[1].lower().split(\";\")\n\n # strip keyphrases\n for i, k in enumerate(keyphrases):\n keyphrases[i] = k.strip()\n\n ref_dic[filename] = keyphrases\n\n return ref_dic", "def processf(self):\n if self.content == []: # do the scan if have not done so\n self.scanf()\n \n for line in self.content:\n if len(line) > 0:\n keywords = line[0].split(' ')[0]\n self.contentdict[keywords] = line", "def __load_input_file(self, inputfile):\n f = open(inputfile, 'rU')\n content = f.read()\n \n \n [empty, frequency_related_part, loglikelihood_related_part, cosine_related_part] = content.split('#')\n \n frequency_lines = frequency_related_part.split('\\n')\n self.__load_into_dictionary(frequency_lines, 'frequency')\n \n loglikelihood_lines = loglikelihood_related_part.split('\\n')\n self.__load_into_dictionary(loglikelihood_lines, 'loglikelihood') \n \n cosine_lines = cosine_related_part.split('\\n')\n self.__load_into_dictionary(cosine_lines, 'cosine')", "def __load(self, ifile):\n # load map entries from file\n output = {}\n optmatch = None\n finput = AltFileInput(ifile, encoding = self.encd)\n for line in finput:\n if line:\n optmatch = RE_OPTIONS.match(line)\n if optmatch:\n if self.flags:\n raise RuleFormatError( \\\n msg = \"Multiple flag lines are not supported\", \\\n efile = finput)\n else:\n self.flags = optmatch.group(1)\n self.ignorecase = RegExp(self.flags, \"\").re.flags & re.IGNORECASE\n continue\n # find map entries\n line = skip_comments(line)\n m = MAP_DELIMITER.search(line)\n if m:\n src, trg = self.__normalize_quotes(line[0:m.start()], \\\n line[m.end():])\n if not (src and trg):\n print src.encode('utf-8')\n print trg.encode('utf-8')\n raise RuleFormatError(efile = finput)\n src = re.escape(src)\n if self.ignorecase:\n output[src.lower()] = trg\n else:\n output[src] = trg\n elif line:\n raise RuleFormatError(efile = finput)\n return output", "def load(text):\n\tlines = text.split()\n\tarr = []\n\tfor line in lines:\n\t\tarr.append(reduce(lambda x, y: dict(x.items(), **y), \\\n\t\t\tmap(lambda x: {x.split(\"=\")[0] : x.split(\"=\")[1]}, line.split(\";\"))))\n\t\t# var = dict()\n\t\t# for kv in line.split(\";\"):\n\t\t# \tvar[kv.split(\"=\")[0]] = kv.split(\"=\")[1]\n\t\t# arr.append(var)\n\treturn arr", "def parse(self, procfile):\r\n cfg = OrderedDict()\r\n with open(procfile) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n m = RE_LINE.match(line)\r\n if m:\r\n cfg[m.group(1)] = m.group(2)\r\n return cfg", "def parse(lines):\n results = defaultdict(list)\n for line in lines:\n ruleno, npkt, nbyte, body = re.split('\\W+', line, 3)\n results[ruleno].append((int(npkt), int(nbyte), body))\n return OrderedDict(sorted(results.items(), key=lambda t: t[0]))", "def _keys_in_lines(lines, regexes):\n logging.debug(\"_keys_in_lines\")\n result = []\n # check regexes: must contain the necessary groups\n for regex in regexes:\n for group in ['pre', 'kwd', 'sep', 'val']:\n if not group in regex.groupindex.keys():\n raise ValueError(\"regex \\n%s\\n' \"% regex.pattern\n +\"does not define groups 'pre', 'kwd', 'sep', 'val'\")\n kwd_count = {} # dictionary for counting keyword uses\n for i, line in enumerate(lines):\n matched = False\n logging.debug(\"line %d: %s\", i, line[:-1])\n for ir, regex in enumerate(regexes):\n match = regex.match(line)\n if match:\n matched = True\n line_keyword = match.group('kwd') # with spaces\n keyword = line_keyword.replace(\" \", \"_\") # without spaces\n while \"__\" in keyword:\n keyword = keyword.replace('__', '_')\n if keyword.endswith('_on'):\n keyword = keyword[:-3] + \"_on_off\"\n elif keyword.endswith('_off'):\n keyword = keyword[:-4] + \"_on_off\"\n if keyword in kwd_count:\n kwd_count[keyword] += 1\n keyword = \"%s_%d\" % (keyword, kwd_count[keyword])\n else:\n kwd_count[keyword] = 1\n logging.debug(\"regex %d matched line %d (keyword %s)\", ir, i,\n keyword)\n result.append(keyword)\n if matched:\n break # go to next line\n if not matched:\n logging.debug(\"No regex matched line %d\", i)\n return result", "def parse_content(content):\n\n\tfreqs = {}\n\trecords = set()\n\tfor line_num in range(len(content)):\n\t\ttris_in_line = TRINOMIAL_REGEX.findall(content[line_num])\n\t\tif tris_in_line is None:\n\t\t\tcontinue\n\t\tfor trinomial in tris_in_line:\n\t\t\tif trinomial[2:4] in relevant_counties:\n\t\t\t\tif trinomial in freqs:\n\t\t\t\t\tfreqs[trinomial] = freqs[trinomial] + 1\n\t\t\t\telse:\n\t\t\t\t\tfreqs[trinomial] = 1\n\t\t\t\tr = Record(site_name = trinomial, site_name_line = line_num)\n\n\t\t\t\tsentences, local_trin_count = get_search_space(content, r)\n\t\t\t\tpossible_pairs = get_possible_pairs(sentences, r)\n\n\t\t\t\t#If there are other terms nearby, extract carefully\n\t\t\t\tif local_trin_count > 1:\n\t\t\t\t\toptimal_term = get_optimal_term(\n\t\t\t\t\t\t\tpossible_pairs, r.site_name_line,\n\t\t\t\t\t\t\tsentences, trinomial)\n\t\t\t\t\tif optimal_term is not None:\n\t\t\t\t\t\tr.period_term = optimal_term\t\n\t\t\t\t\t\trecords.add(r)\n\t\t\t\t\telif r.artifacts:\n\t\t\t\t\t\tr.period_term = \"\"\n\t\t\t\t\t\trecords.add(r)\n\n\t\t\t\t#Otherwise, grab everything useful\n\t\t\t\telse:\n\t\t\t\t\tfor pair in possible_pairs:\n\t\t\t\t\t\ttmp = Record( \\\n\t\t\t\t\t\t\tsite_name = r.site_name, \\\n\t\t\t\t\t\t\tsite_name_line = line_num, \\\n\t\t\t\t\t\t\tperiod_term = pair[0], \\\n\t\t\t\t\t\t\tartifacts = r.artifacts, \\\n\t\t\t\t\t\t\tdates = r.dates)\n\t\t\t\t\t\trecords.add(tmp)\n\n\t\t\t\tr.site_name_line = line_num\n\n\told = records\n\trecords = split_artifacts(records)\n\timplement_freqs(freqs, records)\n\treturn records", "def grep_data(fort):\n data = {}\n lookup = \\\n {\n 'TC h' : 'TC',\n 'S elec' : 'S elec',\n 'S trans' : 'S trans',\n 'S rot' : 'S rot',\n 'S vib' : 'S vib',\n 'Stot' : 'S tot',\n 'TC-' : 'TC - TS'\n }\n for line in fort:\n if re.search('ZPVE.*=.*kJ', line):\n data['ZPVE'] = line.split()[2]\n for k, v in lookup.items():\n if k in line:\n data[v] = line.split()[-1]\n return data", "def build_dictionary():\r\n f = open(\"insult_database.txt\", \"r\")\r\n d = f.read().split(\"\\n\")\r\n f.close()\r\n dic = {}\r\n if len(d) > 1:\r\n for n in range(0, len(d)):\r\n ds = d[n].split(\":\")\r\n dic[ds[0]] = ds[1].split(\",\")\r\n return dic", "def makeDict():\n\ttagDict = {}\n\tlines = []\n\twith open(\"tags_meaning\")as file:\n\t\tlines = file.readlines()\n\tfor line in lines:\n\t\tkey = line.split(\" \")[0].strip(\"\\n\")\n\t\ttagDict[key] = 0\n\treturn tagDict", "def extract_pattern_from_file(file,pos_dict,ex_dict,tag,output):\n if file.endswith(\"_pos.txt\"):\n with open(POS + file, 'r', encoding='utf-8') as f:\n for line in f:\n for entry in pos_dict:\n # For each entry find all occurrences in this sentence\n matches = re.finditer(entry, line, re.I)\n for match in matches:\n exclude = 0\n for exItem in ex_dict:\n exMatches = re.finditer(exItem.rstrip('\\n'), line, re.I)\n for exMatch in exMatches:\n # Check if positive match is within range of exclusion match, as the exclusion\n # may contain additional context\n if exMatch and (exMatch.start() <= match.start(1) <= exMatch.end()):\n exclude = 1\n # Save result to list of results with appropriate tag\n if match and exclude is 0:\n # Print match with context\n pre, post, m = \"\", \"\", \"\"\n for w in line[0:match.start()].split():\n pre = pre + w.rsplit(\"_\")[0].split(\":\")[1] + \" \"\n for w in line[match.end():].split():\n if w.startswith(\"_\"):\n continue\n post = post + w.rsplit(\"_\")[0].split(\":\")[1] + \" \"\n for w in match.group(0).split():\n m = m + w.rsplit(\"_\")[0].split(\":\")[1] + \" \"\n print(file + \" | sent. \" + match.group(1) + \": \\t\" + pre + \"\\t\"\n + m + \" \\t\" + post + \"\\t\" + tag + \"\\n\")\n output.write(file + \" | sent. \" + match.group(1) + \": \\t\" + pre + \"\\t\"\n + m + \" \\t\" + post + \"\\t\" + tag + \"\\n\")", "def read_keywords(keywords_file):\n keywords = {}\n\n for line in open(keywords_file):\n\n # Skip blank lines and comments\n if re.match('^s*$', line):\n continue\n if re.match('^\\s*#', line):\n continue\n\n match = re.match('^(\\w+)\\s*\\t(\\w+)\\t?(\\w*)', line)\n if match:\n (kw_name, kw_type, kw_loc) = match.groups()\n if kw_type in keywords:\n keywords[kw_type].add(kw_name)\n else:\n keywords[kw_type] = set([kw_name])\n\n return keywords", "def parseFile(lines: List[str]) -> Dict[str, List[Tuple[int, int]]]:\n points: Dict[str, List[Tuple[int, int]]] = dict()\n newChar = False\n cont = False\n point = []\n sequenceClass = None\n\n for line in lines[1:]:\n if '.COMMENT' in line and 'Class' in line and '[' in line and '#' not in line:\n b = re.findall('.*?\\.COMMENT\\s+Class\\s+\\[(.*?)\\]', line)\n sequenceClass = b[0]\n newChar = True\n point = []\n continue\n if '.PEN_UP' in line:\n cont = False\n if sequenceClass not in points.keys():\n points[sequenceClass] = []\n points[sequenceClass].append(point)\n if '.PEN_DOWN' in line:\n cont = True\n continue\n if newChar and cont:\n b = re.findall('.*?(\\d+)\\s+([-\\d]+).*', line)\n xy = b[0]\n point.append((int(xy[0]), int(xy[1])))\n\n return points" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
save search_dict to find.txt
def save_db(search_dict): search_file = open("find.txt", "w") for channel in search_dict: if channel is not "": for nick in search_dict[channel]: for line in search_dict[channel][nick]: new = "%s,%s,%s\n" % (channel, nick, (line).encode('utf-8')) search_file.write(new) search_file.close()
[ "def writeResultToFile(results, filename='all_searches.txt'):\n with open(filename, 'w') as f:\n for query in results:\n f.writelines(query.__repr__() + '\\n')", "def write_results(self,results_dict):", "def write(self):\n\n d = {} # to contain mappings of term to file cursor value\n with open(self.p_file, \"wb\") as f:\n for word, posting_list in self.dictionary.items():\n cursor = f.tell()\n d[word] = cursor # updating respective (term to file cursor value) mappings\n pickle.dump(posting_list, f, protocol=4)\n\n with open(self.d_file, \"wb\") as f:\n pickle.dump(d, f) # (term to file cursor value) mappings dictionary\n pickle.dump(self.doc_lengths, f) # document lengths regardless of zone/field types\n pickle.dump(self.docid_term_mappings, f) # (doc_id to K most common terms) mappings", "def saveToFile(dict):\n f = codecs.open(database_path, \"w\", \"utf-8\")\n f.write(str(dict))\n f.close()", "def load_db():\n if not os.path.isfile(\"find.txt\"):\n f = open(\"find.txt\", \"w\")\n f.write(\"#test,yano,foobar\\n\")\n f.close()\n search_file = open(\"find.txt\", \"r\")\n lines = search_file.readlines()\n search_file.close()\n search_dict = dict()\n for line in lines:\n result = None\n a = line.replace(r'\\n', '')\n new = a.split(r',')\n if len(new) < 2: continue\n if new[0] not in search_dict:\n search_dict[new[0]] = dict()\n if new[1] not in search_dict[new[0]]:\n search_dict[new[0]][new[1]] = list()\n if len(new) > 3:\n result = \",\".join(new[2:])\n result = result.replace('\\n','')\n elif len(new) == 3:\n result = new[-1]\n if len(result) > 0:\n result = result[:-1]\n if result:\n result = (result).decode('utf-8')\n search_dict[new[0]][new[1]].append(result)\n return search_dict", "def save(self, filename):\n with open(filename, \"w\", encoding=\"utf-8\") as f:\n for word in sorted(self.pdict.keys()):\n for pronunciation in self.pdict[word]:\n print(\"{}\\t{}\".format(word, \" \".join(pronunciation)), file=f)", "def save_dictionary(worddict, wordcount, loc):\n with open(loc, 'w') as f:\n pkl.dump(worddict, f)\n pkl.dump(wordcount, f)", "def save(self):\n self.trans=open(\"Translation.txt\", \"r+\")\n self.trans.truncate(0)\n written=\"\"\n for word in self.dictionary:\n written+=(word+\"-\"+self.dictionary[word]+\"\\n\")\n #self.trans.write(written.encode('utf8'))\n self.trans.write(written)\n self.trans.close()\n self.trans=open(\"Translation.txt\", \"r+\")", "def write_classification_to_file(dir, dic):\r\n with open(os.path.join(dir,\"!prediction.txt\"),'w',encoding=\"utf-8\") as f:\r\n for key in dic:\r\n f.write(key + \" \" + dic[key] + \"\\n\")", "def save_dictionary(self, path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n pass\n # Extract species from all the entries\n species_dict = {}\n entries = self.entries.values()\n for entry in entries:\n for reactant in entry.item.reactants:\n if reactant.label not in species_dict:\n species_dict[reactant.label] = reactant\n\n for product in entry.item.products:\n if product.label not in species_dict:\n species_dict[product.label] = product\n\n with open(path, 'w') as f:\n for label in species_dict.keys():\n f.write(species_dict[label].molecule[0].to_adjacency_list(label=label, remove_h=False))\n f.write('\\n')", "def save(self, filename):\n\n with open(filename, 'w') as dict_file:\n for idx, token in self.id_to_token.items():\n dict_file.write(\"{} {}\\n\".format(idx, token))\n\n logging.info(\"Dictionary '%s' has been saved in file '%s'\", self._name, filename)", "def save(self):\n self.logger.debug('save')\n start = time.time()\n #\n data = sorted(self.recents, key=itemgetter('when'), reverse=True)\n try:\n with closing(open(self.datafile, 'w', encoding='utf-8')) as json_file:\n json.dump(data, json_file)\n # pylint: disable=broad-except\n except Exception as err:\n self.logger.error(\n 'Failed to write last searches file {}: {}', self.datafile, err)\n self.logger.debug('saved search: {} sec', time.time() - start)\n return self", "def save_infolist_to_file(self):\n pass\n # temp_filename_full_path = os.path.join(self.gs_raw_dirpath, self.g_search_key + '_info.txt')\n # with open(temp_filename_full_path, 'w') as f:\n # for n in self.pic_info_list:\n # f.write(n)\n # f.write('\\n')", "def write_results(filename):", "def save_file(self):\n with open(self.filename, \"w\") as outfile:\n # Heading row\n outfile.write(\",\".join(self.dict_keys) + \"\\n\")\n # Data rows\n for item in self.items:\n elements = [item.get(key, \"\") for key in self.dict_keys]\n outfile.write(\",\".join(elements) + \"\\n\")", "def print_lines_to_file(self, output_file: str):\r\n with open(output_file, \"w\") as file:\r\n # Dictionary starts with \"A\"\r\n initial = \"A\"\r\n for entry in sorted(self.words):\r\n if entry:\r\n new_initial = entry.get_initial().upper()\r\n if new_initial != initial:\r\n file.write(\"\\n\")\r\n file.write(str(entry))\r\n initial = new_initial", "def save_index_data_quran_buckwalter(data, to_txt: bool = True):\n # import pickle\n\n with open('data/index_quran_buckwalter.pik', 'wb') as w:\n pickle.dump(data, w)\n\n if to_txt:\n with open('data/index_quran_buckwalter.txt', 'w') as w:\n w.write('souratNum:versetNum:wordPosition : WORDTashkilLatin | '\n 'WORDNoTashkilLatin | LEME | ROOT \\n\\n')\n for key in sorted(data.keys()):\n QtWidgets.QApplication.processEvents()\n w.write(key + \" : \" + data[key][0] + ' | ' + data[key][1])\n if data[key][2] != '':\n w.write(\" | \" + data[key][2])\n if data[key][3] != '':\n w.write(\" | \" + data[key][3] + '\\n')\n else:\n w.write('\\n')\n else:\n w.write('\\n')", "def save_search_to_pickle(items, filename):\n df = pd.DataFrame(items)\n df.to_pickle(path=f\"data/{filename}.pickle\")", "def save_results(results: Dict, result_filename: str):\n\n if not result_filename.endswith('.txt'):\n result_filename += '.txt'\n\n with open(result_filename, 'w') as f:\n for image_id, detections in sorted(results.items(), key=lambda x: x[0]):\n for x, y, w, h, score in detections:\n f.write(f'{image_id},{x:.4f},{y:.4f},{w:.4f},{h:.4f},{score:.8f}\\n')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a graph of facilities from xml file Optional node_style argument (border style, border color) Requires networkx drawing, because nod takes too long
def generate_facilities_graph(graph, xml, style=(1, 'black')): network = etree.parse(xml) facilities = network.findall(".//facility") node_attr = {} node_attr['size'] = style[0] node_attr['color'] = style[1] for facility in facilities: current_node_attr = {} current_node_attr.update(dict(facility.items())) current_node_attr.update(node_attr) current_node_attr['pos'] = correct_pos(current_node_attr) graph.add_node('fac_' + current_node_attr.get('id'), current_node_attr) return graph
[ "def generate_network_graph(graph, xml, node_style=(0, 'white'), edge_style=(1, 'orange')):\n node_attr = {}\n node_attr['size'] = node_style[0]\n node_attr['color'] = node_style[1]\n \n edge_attr = {}\n edge_attr['occupied'] = 0\n edge_attr['size'] = edge_style[0]\n edge_attr['color'] = edge_style[1]\n\n graph = xml_to_graph(xml, graph=graph, node_attr=node_attr, link_attr=edge_attr, pos_function=correct_pos)\n return graph", "def read_graph():\n path = \"./data/train/\"\n for filename in os.listdir(path):\n if filename.endswith(\".gml\"): # read out graph\n G_tmp = nx.read_gml(os.path.join(path, filename), label=\"label\")\n pos_tmp = nx.multipartite_layout(G_tmp, subset_key=\"layer\")\n nx.draw(G_tmp, pos_tmp,\n with_labels=True,\n node_size=5,\n connectionstyle=\"arc3,rad=-0.2\",\n edge_color=[G_tmp[u][v]['blockable'] for u, v in G_tmp.edges],\n width=1,\n font_size=10)\n # print(os.path.join(path, filename))\n # print(G_tmp.nodes(data=True))\n # print(G_tmp.edges(data=True))\n plt.show()", "def write_nodes_gexf(self, out_file):\n viz_color_shape = {'standard' : (42, 55, 235, \"disc\"), 'spheroplast':(255, 255, 0, \"square\"),\n 'curved': (41, 235, 3, \"triangle\"), 'filament': (211, 3, 235, \"diamond\")}\n count = 0\n for key, lst in self.nodeWithTypes.items():\n for elt in lst:\n r, g, b, shape = viz_color_shape[key]\n out_file.write(\" <node id=\\\"%s\\\" label=\\\"%s\\\" >\\n\" % (getNodeLetter(count), key))\n out_file.write(' <viz:color r=\"%d\" g=\"%d\" b=\"%d\" />\\n' % (r, g, b))\n out_file.write(' <viz:position x=\"%f\" y=\"%f\" z=\"0.0\" />\\n' % (elt[0], elt[1]))\n out_file.write(' <viz:shape value=\"%s\" />\\n' % shape)\n out_file.write(' <viz:size value=\"10\"/>\\n')\n out_file.write(\" </node>\\n\")\n count += 1\n out_file.write(\" <node id=\\\"SURFACE\\\" label=\\\"surfaceGhost\\\">\\n\")\n out_file.write(' <viz:color r=\"135\" g=\"135\" b=\"135\" />\\n')\n out_file.write(' <viz:position x=\"0.0\" y=\"0.0\" z=\"0.0\" />\\n')\n out_file.write(' <viz:shape value=\"disc\" />\\n')\n out_file.write(' <viz:size value=\"0.01\"/>\\n')\n out_file.write(\" </node>\\n\")", "def generate_netx(file):\n \n f = csv.reader(open(file))\n net = nx.Graph()\n for edge in f:\n net.add_edge(int(edge[0]),int(edge[1]),weight=float(edge[2]))\n #n_communities = nx.number_connected_components(net) networkx community finding algorithm, didnt work well.\n\n return net", "def fan_graph():\n return nx.read_gml(abs_path('gml/fan.gml'))", "def read_graph(self, path: str):\n self.G = nx.read_gml(path, label=\"label\", destringizer=int)\n self.layer_sizes = self.G.graph['layer_sizes']\n # G_tmp = nx.read_gml(os.path.join(path, filename), label=\"label\")\n # This part should not be delete untile config draw_after_read()\n # pos_tmp = nx.multipartite_layout(G_tmp, subset_key=\"layer\")\n # nx.draw(G_tmp, pos_tmp,\n # with_labels=True,\n # node_size=5,\n # connectionstyle=\"arc3,rad=-0.2\",\n # edge_color=[G_tmp[u][v]['blockable'] for u, v in G_tmp.edges],\n # width=1,\n # font_size=10)\n # print(os.path.join(path, filename))\n # print(G_tmp.nodes(data=True))\n # print(G_tmp.edges(data=True))\n # plt.show()", "def load_graph(fname):\n g = nx.Graph()\n with open(fname) as fl:\n for line in fl:\n u, v = line.split(\" \")\n g.add_edge(int(u), int(v))\n print(\"Loaded graph with {} nodes\".format(len(g.nodes)))\n return g", "def graph_kml(\n FG,\n fname=\"graph.kml\",\n icon=\"http://maps.google.com/mapfiles/kml/shapes/donut.png\",\n size=0.5,\n scale=0.5,\n width=5):\n\n # create a kml file containing the visualisation\n kml = Kml()\n fol = kml.newfolder(name=\"Graph\")\n\n shared_style = Style()\n shared_style.labelstyle.color = \"ffffffff\" # White\n shared_style.labelstyle.scale = size\n shared_style.iconstyle.color = \"ffffffff\" # White\n shared_style.iconstyle.scale = scale\n shared_style.iconstyle.icon.href = icon\n shared_style.linestyle.color = \"ff0055ff\" # Red\n shared_style.linestyle.width = width\n\n nodes = list(FG.nodes)\n\n # each timestep will be represented as a single point\n for log_index, _ in enumerate(list(FG.nodes)):\n\n pnt = fol.newpoint(\n name=\"\",\n coords=[\n (\n nx.get_node_attributes(FG, \"geometry\")[nodes[log_index]].x,\n nx.get_node_attributes(FG, \"geometry\")[nodes[log_index]].y,\n )\n ],\n )\n pnt.style = shared_style\n\n edges = list(FG.edges)\n for log_index, _ in enumerate(list(FG.edges)):\n\n lne = fol.newlinestring(\n name=\"\",\n coords=[\n (\n nx.get_node_attributes(FG, \"geometry\")[edges[log_index][0]].x,\n nx.get_node_attributes(FG, \"geometry\")[edges[log_index][0]].y,\n ),\n (\n nx.get_node_attributes(FG, \"geometry\")[edges[log_index][1]].x,\n nx.get_node_attributes(FG, \"geometry\")[edges[log_index][1]].y,\n ),\n ],\n )\n lne.style = shared_style\n\n kml.save(fname)", "def write_gml(self, g, fname):\n #raise NotImplementedError\n g = g.copy()\n #nodecolors = self.nodecolors()\n #for n, color in nodecolors.iteritems():\n nodecmtys = self.nodecmtys_onetoone()\n for n, c in nodecmtys.iteritems():\n g.node[n]['label'] = str(c)\n networkx.write_gml(g, fname)", "def draw(nodes, out_file=None):\n graph = nx.Graph()\n for node in nodes.values():\n graph.add_node(node.name, time=node.id)\n for neighbor_name in node.neighbors:\n graph.add_edge(node.id, neighbor_name)\n\n # pos = nx.spring_layout(G, scale=20)\n # nx.spring_layout(G, k=0.05, iterations=20)\n options = {\n 'node_size': 10,\n 'font_size': 12,\n 'with_labels': True,\n 'pos': graphviz_layout(graph)\n }\n nx.draw(graph, **options)\n if out_file is None:\n plt.plot()\n plt.show()\n else:\n plt.savefig(out_file)\n LOG.info('The topology figure is saved to %s', out_file)", "def add_graph_attributes(G, filename):\n Ef = dict() # feature -> edges\n Nf = dict() # node -> features\n with open(filename) as f:\n for line in f:\n d = line.split()\n u = int(d[0])\n features = d[1:]\n for f in features:\n Ef.setdefault(f, []).extend(G.in_edges(u)) # add feature-dependent edges\n #G.node[u]['Fu'] = features\n G.nodes[u]['Fu'] = features\n Nf[u] = features\n print('Read graph attributes')\n return Ef, Nf", "def _read_XGMML( self, file_list, nodes ):\n\n return # NOTE: not tested in this version\n\n for f in file_list:\n try:\n g = etree.parse( f )\n except ( IOError, lxml.etree.XMLSyntaxError ):\n continue\n\n for el in g.getroot( ):\n if el.tag[ -4 : ] == \"node\":\n try:\n ( name, molType ) = el.attrib[ \"id\", \"type\" ]\n except KeyError:\n continue\n if name not in nodes.keys( ):\n nodes[ name ] = NetworkNode( name, molType )\n\n elif el.tag[ -4: ] == \"edge\":\n pass", "def draw_network(graph, users, filename):\r\n \r\n labels={}\r\n for u in users:\r\n labels[u['screen_name']]=u['screen_name']\r\n plt.figure(figsize=(10,10))\r\n nx.draw_networkx(graph,labels=labels,with_labels=True,alpha=0.5, width=0.2)\r\n plt.axis('off')\r\n plt.savefig(filename,format=\"PNG\",frameon=None,dpi=300)\r\n plt.show()\r\n pass", "def draw_graph(self, filename: str, scale_x=10, scale_y=10):\n node_dict = self.get_node_names_with_pos()\n nx_graph = nx.DiGraph()\n nx_graph.add_nodes_from(node_dict.keys())\n\n nx_graph.add_edges_from(self.edges)\n\n plt.figure(1, figsize=(scale_x, scale_y))\n nx.draw(nx_graph, node_dict, with_labels=True)\n plt.gca().invert_yaxis()\n Graph.save_fig(filename)\n plt.show()", "def load_csv_nx(file_path = 'data/graph.csv', start_position = 0, end_position = 1, weight_positon = 2):\r\n G = nx.Graph()\r\n # with open(file_path) as f:\r\n f = open(file_path) \r\n \r\n # f = codecs.open(file_path,'r','utf-8')\r\n \r\n for line in f:\r\n if \"start\" in line:\r\n continue\r\n start = line.strip().split(\",\")[start_position].decode('utf-8')\r\n end = line.strip().split(\",\")[end_position].decode('utf-8')\r\n weight = line.strip().split(\",\")[weight_positon].decode('utf-8')\r\n \r\n \r\n\r\n G.add_edge(start, end, weight = int(weight))\r\n\r\n return G", "def create_graph(infile):\n \n G = nx.DiGraph()\n sum_demands = 0\n edge_data = {} # key (node1, node2) : value { 'c': #, 'w': # }\n\n for line in open(infile):\n if line.startswith('c'):\n continue\n\n a = line.split()\n\n if len(a) < 1:\n continue\n # not sure\n\n \n\n N = -1\n M = -1 # irrelevant\n if a[0] == 'p':\n # \"problem line\"\n # p min <nodes> <arcs>\n N = int(a[2])\n M = int(a[3])\n\n elif a[0] == 'n':\n # node line\n # n <id> <flow>\n # we'll consider <flow> to be the demand\n G.add_node(int(a[1]), demand=int(a[2]))\n sum_demands += int(a[2])\n\n elif a[0] == 'a':\n # edge line\n # a <v> <w> <low> <cap> <cost>\n # luckily <low>, lower bound for capacity, is always 0\n # not an input into nx\n # G.add_edge(int(a[1]), int(a[2]), capacity=int(a[4]), weight=int(a[5]))\n edge = (int(a[1]), int(a[2]))\n if edge not in edge_data:\n edge_data[edge] = []\n # else:\n # print \"an edge appeared twice!\", edge\n\n edge_data[edge].append({ 'c': int(a[4]), 'w': int(a[5]) })\n\n # print \"edge data\", edge_data\n\n # now add the edges\n for edge, data in edge_data.items():\n if len(data) == 1:\n # just add the edge directly\n G.add_edge(edge[0], edge[1], capacity=data[0]['c'], weight=data[0]['w'])\n else:\n counter = 1 # ensure unique names\n for d in data:\n # we have to create new nodes, ensure they have unique names\n node1 = \"%s_%s\" % (edge[0], counter)\n node2 = \"%s_%s\" % (edge[1], counter) # 1 -> 2\n # print \"created new nodes:\", node1, node2, d['c'], d['w']\n\n # add new nodes to the graph - they have 0 demand (default)\n G.add_node(node1)\n G.add_node(node2)\n\n # add edges with 0 cost and infinite capacity (this is the default)\n G.add_edge(edge[0], node1)\n G.add_edge(node2, edge[1])\n\n # add the weighted edges with capacity\n G.add_edge(node1, node2, capacity=d['c'], weight=d['w'])\n\n counter += 1\n\n\n # print \"sum demands:\", sum_demands # check = 0\n return G", "def read_netx(self):\n for line in self.adj:\n self.netx.add_node(line[0], type=line[0].split(\"-\")[0])\n self.netx.add_node(line[1], type=line[1].split(\"-\")[0])\n self.netx.add_edge(line[0], line[1], key=line[2])", "def create_netrate_network(fname):\n netrate_net = Graph(directed=True)\n netrate_net.add_vertices(3000)\n \n netrate_file = open(fname,\"r\")\n \n #------ Loop through all rows of the adjecency and add the edges\n node_i = 0 \n inferred_edges = []\n for line in netrate_file:\n #---------- All edge weights are almost 0\n for node_j, weight in enumerate(line.replace(\"\\n\",\"\").split(\",\")): \n if weight!='0':\n inferred_edges.append((node_i,node_j))\n node_i+=1\n \n netrate_net.add_edges(inferred_edges)\n \n return netrate_net", "def load_graph(file_path):\n \n assert ('txt' in file_path), 'Please choose a graph file of type txt'\n\n G = nx.read_edgelist(file_path,create_using=nx.Graph(), nodetype = int)\n return G" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a graph from network xml file Optional node_style argument (size, color, alpha) Optional edge_style argument (width, color) Returns a networkx graph
def generate_network_graph(graph, xml, node_style=(0, 'white'), edge_style=(1, 'orange')): node_attr = {} node_attr['size'] = node_style[0] node_attr['color'] = node_style[1] edge_attr = {} edge_attr['occupied'] = 0 edge_attr['size'] = edge_style[0] edge_attr['color'] = edge_style[1] graph = xml_to_graph(xml, graph=graph, node_attr=node_attr, link_attr=edge_attr, pos_function=correct_pos) return graph
[ "def generate_netx(file):\n \n f = csv.reader(open(file))\n net = nx.Graph()\n for edge in f:\n net.add_edge(int(edge[0]),int(edge[1]),weight=float(edge[2]))\n #n_communities = nx.number_connected_components(net) networkx community finding algorithm, didnt work well.\n\n return net", "def write_nx_graph(graph, filename):\n fx = open(filename, \"w\")\n fx.write(\"digraph grn\\n{\\n\")\n for edge in graph.edges():\n fx.write(\" %s -> %s [label=%d]\\n\" % edge)\n \n fx.write(\"}\")\n fx.close()", "def read_graph(self, path: str):\n self.G = nx.read_gml(path, label=\"label\", destringizer=int)\n self.layer_sizes = self.G.graph['layer_sizes']\n # G_tmp = nx.read_gml(os.path.join(path, filename), label=\"label\")\n # This part should not be delete untile config draw_after_read()\n # pos_tmp = nx.multipartite_layout(G_tmp, subset_key=\"layer\")\n # nx.draw(G_tmp, pos_tmp,\n # with_labels=True,\n # node_size=5,\n # connectionstyle=\"arc3,rad=-0.2\",\n # edge_color=[G_tmp[u][v]['blockable'] for u, v in G_tmp.edges],\n # width=1,\n # font_size=10)\n # print(os.path.join(path, filename))\n # print(G_tmp.nodes(data=True))\n # print(G_tmp.edges(data=True))\n # plt.show()", "def load_graph(file_path):\n \n assert ('txt' in file_path), 'Please choose a graph file of type txt'\n\n G = nx.read_edgelist(file_path,create_using=nx.Graph(), nodetype = int)\n return G", "def read_graph():\n path = \"./data/train/\"\n for filename in os.listdir(path):\n if filename.endswith(\".gml\"): # read out graph\n G_tmp = nx.read_gml(os.path.join(path, filename), label=\"label\")\n pos_tmp = nx.multipartite_layout(G_tmp, subset_key=\"layer\")\n nx.draw(G_tmp, pos_tmp,\n with_labels=True,\n node_size=5,\n connectionstyle=\"arc3,rad=-0.2\",\n edge_color=[G_tmp[u][v]['blockable'] for u, v in G_tmp.edges],\n width=1,\n font_size=10)\n # print(os.path.join(path, filename))\n # print(G_tmp.nodes(data=True))\n # print(G_tmp.edges(data=True))\n plt.show()", "def load_graph(fname):\n g = nx.Graph()\n with open(fname) as fl:\n for line in fl:\n u, v = line.split(\" \")\n g.add_edge(int(u), int(v))\n print(\"Loaded graph with {} nodes\".format(len(g.nodes)))\n return g", "def build_graph_from_edgelist(path, nodes = None):\n G = nx.Graph()\n if nodes:\n G.add_nodes_from(range(nodes))\n\n with open(path, 'r') as in_file:\n for line in in_file:\n tokens = line.split()\n node1 = int(tokens[0])\n node2 = int(tokens[1])\n if len(tokens) > 2:\n w = float(tokens[2])\n if w > 0.0 or w < 0.0:\n G.add_edge(node1, node2)\n G[node1][node2]['weight'] = w\n else:\n G.add_edge(node1, node2)\n\n return G", "def gen_networkx_graph(self):\n\n\t\tgraph = nx.DiGraph()\n\n\t\t# add each node into the graph - node number used to map the nodes\n\t\t# in CPPN to the corresponding nodes in the graph\n\t\tfor node in self.nodes:\n\t\t\tgraph.add_node(node.getNodeNum())\n\n\t\t# create all connections in graph\n\t\tfor con in self.connections:\n\t\t\tif(con.getStatus()):\n\t\t\t\tgraph.add_edge(con.getNodeIn().getNodeNum(), \n\t\t\t\t\t\t\tcon.getNodeOut().getNodeNum(),\n\t\t\t\t\t\t\ti=str(con.getInnovationNumber()))\n\n\t\treturn graph", "def create_network(edgelist):\n g = nx.Graph(edgelist)\n # nx.draw(G, with_labels=True)\n # plt.show() # this displays the graph - turn on as required.\n return g", "def create_graph(infile):\n \n G = nx.DiGraph()\n sum_demands = 0\n edge_data = {} # key (node1, node2) : value { 'c': #, 'w': # }\n\n for line in open(infile):\n if line.startswith('c'):\n continue\n\n a = line.split()\n\n if len(a) < 1:\n continue\n # not sure\n\n \n\n N = -1\n M = -1 # irrelevant\n if a[0] == 'p':\n # \"problem line\"\n # p min <nodes> <arcs>\n N = int(a[2])\n M = int(a[3])\n\n elif a[0] == 'n':\n # node line\n # n <id> <flow>\n # we'll consider <flow> to be the demand\n G.add_node(int(a[1]), demand=int(a[2]))\n sum_demands += int(a[2])\n\n elif a[0] == 'a':\n # edge line\n # a <v> <w> <low> <cap> <cost>\n # luckily <low>, lower bound for capacity, is always 0\n # not an input into nx\n # G.add_edge(int(a[1]), int(a[2]), capacity=int(a[4]), weight=int(a[5]))\n edge = (int(a[1]), int(a[2]))\n if edge not in edge_data:\n edge_data[edge] = []\n # else:\n # print \"an edge appeared twice!\", edge\n\n edge_data[edge].append({ 'c': int(a[4]), 'w': int(a[5]) })\n\n # print \"edge data\", edge_data\n\n # now add the edges\n for edge, data in edge_data.items():\n if len(data) == 1:\n # just add the edge directly\n G.add_edge(edge[0], edge[1], capacity=data[0]['c'], weight=data[0]['w'])\n else:\n counter = 1 # ensure unique names\n for d in data:\n # we have to create new nodes, ensure they have unique names\n node1 = \"%s_%s\" % (edge[0], counter)\n node2 = \"%s_%s\" % (edge[1], counter) # 1 -> 2\n # print \"created new nodes:\", node1, node2, d['c'], d['w']\n\n # add new nodes to the graph - they have 0 demand (default)\n G.add_node(node1)\n G.add_node(node2)\n\n # add edges with 0 cost and infinite capacity (this is the default)\n G.add_edge(edge[0], node1)\n G.add_edge(node2, edge[1])\n\n # add the weighted edges with capacity\n G.add_edge(node1, node2, capacity=d['c'], weight=d['w'])\n\n counter += 1\n\n\n # print \"sum demands:\", sum_demands # check = 0\n return G", "def load_graph(g_file=None, g_type=None, g_nodes=None, g_new_edges=None, g_seed=None):\n\tif g_file is not None:\n\t\tG = read_graph(g_file)\n\telse:\n\t\tdatasets_dir = \"datasets/\"\n\t\tif g_type == \"barabasi_albert\":\n\t\t\tG = nx.generators.barabasi_albert_graph(g_nodes, g_new_edges, seed=g_seed)\n\t\telif g_type == \"wiki\":\n\t\t\tG = read_graph(datasets_dir + \"wiki-Vote.txt\", directed=True)\n\t\telif g_type == \"amazon\":\n\t\t\tG = read_graph(datasets_dir + \"amazon0302.txt\", directed=True)\n\t\telif g_type == \"twitter\":\n\t\t\tG = read_graph(datasets_dir + \"twitter_combined.txt\", directed=True)\n\t\telif g_type == \"facebook\":\n\t\t\tG = read_graph(datasets_dir + \"facebook_combined.txt\", directed=False)\n\t\telif g_type == \"CA-GrQc\":\n\t\t\tG = read_graph(datasets_dir + \"CA-GrQc.txt\", directed=True)\n\t\telif g_type == \"epinions\":\n\t\t\tG = read_graph(datasets_dir + \"soc-Epinions1.txt\", directed=True)\n\t\telif g_type == \"tiny_wiki\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_wiki_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_amazon\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_amazon_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_CA-GrQc\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_CA-GrQc_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_wiki_community\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_wiki_community_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_amazon_community\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_amazon_community_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_CA-GrQc_community\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_CA-GrQc_community_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\treturn G", "def create_netrate_network(fname):\n netrate_net = Graph(directed=True)\n netrate_net.add_vertices(3000)\n \n netrate_file = open(fname,\"r\")\n \n #------ Loop through all rows of the adjecency and add the edges\n node_i = 0 \n inferred_edges = []\n for line in netrate_file:\n #---------- All edge weights are almost 0\n for node_j, weight in enumerate(line.replace(\"\\n\",\"\").split(\",\")): \n if weight!='0':\n inferred_edges.append((node_i,node_j))\n node_i+=1\n \n netrate_net.add_edges(inferred_edges)\n \n return netrate_net", "def load_graph(filename):\n with open(filename, 'r') as f:\n data = f.read()\n\n split_filename = filename.split('.')\n num_players = int(split_filename[0])\n num_seeds = int(split_filename[1])\n unique_id = int(split_filename[2])\n\n graph_dict = json.loads(data)\n G = nx.Graph(graph_dict)\n\n # Get rid of isolated nodes.\n G.remove_nodes_from(list(nx.isolates(G)))\n\n return G, num_players, num_seeds, unique_id", "def generate_graph(edge_list_file):\n\n # open and read contents of text file, close the file\n file = open(edge_list_file, 'r')\n lines = file.readlines()\n file.close()\n\n # create a list of nodes\n nodes = set()\n for line in lines:\n node1, node2 = [int(a) for a in line.strip().split()]\n nodes.add(node1)\n nodes.add(node2)\n nodes = list(nodes)\n nodes.sort()\n num_nodes = len(nodes)\n\n # create adjacency list\n adjacency_list = [[] for node in nodes]\n for line in lines:\n node1, node2 = [int(a) for a in line.strip().split()]\n adjacency_list[node1].append(node2)\n adjacency_list[node2].append(node1)\n\n # return number of nodes, list of nodes and adjacency list\n return num_nodes, nodes, adjacency_list", "def load_network(filename):\r\n # Make a new network.\r\n network = Network()\r\n\r\n # Read the data.\r\n with open(filename, \"r\") as input:\r\n all_text = input.read()\r\n all_lines = all_text.split(\"\\n\")\r\n\r\n # Get the number of nodes.\r\n num_nodes = int(all_lines[0])\r\n\r\n # Create the nodes.\r\n for i in range(num_nodes):\r\n network.all_nodes.append(Node(\"*\", (-1, -1), i))\r\n\r\n # Read the nodes.\r\n for i in range(1, num_nodes + 1):\r\n node = network.all_nodes[i - 1]\r\n node_fields = all_lines[i].split(\",\")\r\n\r\n # Get the node's text and coordinates.\r\n name = node_fields[0]\r\n location = (\r\n int(node_fields[1]),\r\n int(node_fields[2])\r\n )\r\n node.name = name\r\n node.text = name\r\n node.location = location\r\n\r\n # Get the node's links.\r\n for j in range(3, len(node_fields), 3):\r\n # Get the next link.\r\n index = int(node_fields[j])\r\n link = Link(node, network.all_nodes[index])\r\n link.cost = int(node_fields[j + 1])\r\n link.capacity = int(node_fields[j + 2])\r\n node.links.append(link)\r\n\r\n return network", "def read_network(self, filename):\n # **to do**\n # **hint: inspect 'network.txt' so that you understand the file structure**\n # **hint: each source-destination node pair needs to be joined\n\n # THE PSEUDOCODE FOR THIS METHOD HAS ALREADY BEEN WRITTEN BELOW\n\n # open the file\n # the 'with' construct automagically manages closing the file\n with open(filename, 'r') as fp:\n \n # loop over the lines of the file\n for line in fp:\n # - strip() is a useful method that removes white-space from the \n # beginning and end of the string\n ln = line.strip()\n\n # divide the string using the split() method for strings\n # - extract the source node\n # - extract the remaining arcs\n node_list = ln.split(\",\")\n \n \n # YOU WILL NEED TO THINK CAREFULLY ABOUT WHAT THIS TRY/EXCEPT BLOCK DOES\n # if node doesn't exist, add to network\n try:\n # the output is a node object, the input is a string\n # this command raises an ERROR if the node DOESN'T exist\n source_name = node_list.pop(0)\n source_node = self.get_node(source_name)\n except NetworkError:\n # this command gets executed if an error is raised above\n self.add_node(source_name)\n\n # get the source node OBJECT, using the source node STRING\n source_node = self.get_node(source_name)\n\n # read the arc information and add it to network\n for item in node_list:\n # parse arc information\n info = item.split(\";\") \n try: # Check if node is already defined\n new_node = self.get_node(info[0])\n except NetworkError: # if not add it and get it\n self.add_node(info[0])\n new_node = self.get_node(info[0])\n\n # get destination node object and link it to source node\n self.join_nodes(source_node, new_node, info[1])\n\n # delete the placeholder command below when you start writing your code", "def draw_semantic_network(\n graph,\n *,\n node_weights=None,\n spread=3.0,\n draw_nodes=False,\n base_node_size=300,\n node_alpha=0.25,\n line_width=0.5,\n line_alpha=0.1,\n base_font_size=12,\n save=False,\n):\n try:\n plt\n except NameError:\n raise ImportError(\n \"`matplotlib` is not installed, so `textacy.viz` won't work; \"\n \"install it individually via `$ pip install matplotlib`, or \"\n \"along with textacy via `pip install textacy[viz]`.\"\n )\n with plt.rc_context(RC_PARAMS):\n fig, ax = plt.subplots(figsize=(12, 12))\n\n pos = nx.layout.spring_layout(graph, k=spread / math.sqrt(len(graph.nodes())))\n _ = nx.draw_networkx_edges(\n graph, ax=ax, pos=pos, width=line_width, alpha=line_alpha, arrows=False\n )\n\n if node_weights is None:\n if draw_nodes is True:\n _ = nx.draw_networkx_nodes(\n graph,\n ax=ax,\n pos=pos,\n alpha=node_alpha,\n linewidths=0.5,\n node_size=base_node_size,\n )\n _ = nx.draw_networkx_labels(\n graph,\n pos,\n ax=ax,\n font_size=base_font_size,\n font_color=\"black\",\n font_family=\"sans-serif\",\n )\n else:\n max_node_weight = max(node_weights.values())\n if draw_nodes is True:\n node_sizes = [\n base_node_size * pow(node_weights[node] / max_node_weight, 0.75)\n for node in graph.nodes()\n ]\n _ = nx.draw_networkx_nodes(\n graph,\n ax=ax,\n pos=pos,\n node_size=node_sizes,\n alpha=node_alpha,\n linewidths=0.5,\n )\n for node, weight in node_weights.items():\n _ = nx.draw_networkx_labels(\n graph,\n pos,\n labels={node: node},\n ax=ax,\n font_color=\"black\",\n font_family=\"sans-serif\",\n font_size=base_font_size * pow(weight / max_node_weight, 0.15),\n )\n\n ax.set_frame_on(False)\n ax.set_xticklabels([\"\" for _ in range(len(ax.get_xticklabels()))])\n ax.set_yticklabels([\"\" for _ in range(len(ax.get_yticklabels()))])\n\n if save:\n fig.savefig(save, bbox_inches=\"tight\", dpi=100)\n\n return ax", "def draw(nodes, out_file=None):\n graph = nx.Graph()\n for node in nodes.values():\n graph.add_node(node.name, time=node.id)\n for neighbor_name in node.neighbors:\n graph.add_edge(node.id, neighbor_name)\n\n # pos = nx.spring_layout(G, scale=20)\n # nx.spring_layout(G, k=0.05, iterations=20)\n options = {\n 'node_size': 10,\n 'font_size': 12,\n 'with_labels': True,\n 'pos': graphviz_layout(graph)\n }\n nx.draw(graph, **options)\n if out_file is None:\n plt.plot()\n plt.show()\n else:\n plt.savefig(out_file)\n LOG.info('The topology figure is saved to %s', out_file)", "def as_nx_object(infile):\n edges, nodes = [], []\n with open(infile, 'r') as fIn:\n for line in fIn:\n if line:\n splitLine = line.split(None)\n if type(splitLine) is list and len(splitLine) > 0:\n if splitLine[0] == 'connect':\n e1 = splitLine[1].split('[')[1].split(']')[0]\n if e1 not in nodes:\n nodes.append(e1)\n e1 = e1 + '.' + splitLine[1].split('(')[1].split(')')[0]\n e2 = splitLine[2].split('[')[1].split(']')[0]\n if e2 not in nodes:\n nodes.append(e2)\n e2 = e2 + '.' + splitLine[2].split('(')[1].split(')')[0]\n edges.append([e1, e2])\n # Add the \"intrinsic\" edges that connect the 0th end to 1th \n # end of each node\n for s in nodes:\n edges.append([s+'.'+'0', s+'.'+'1'])\n G = nx.Graph()\n G.add_edges_from(edges)\n return G, edges, nodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function takes previous hidden state and memory tuple with input and outputs current hidden state.
def Lstm(self, previous_hidden_memory_tuple, x): previous_hidden_state,c_prev=tf.unstack(previous_hidden_memory_tuple) #Input Gate i= tf.sigmoid( tf.matmul(x,self.Wi)+tf.matmul(previous_hidden_state,self.Ui) + self.bi ) #Forget Gate f= tf.sigmoid( tf.matmul(x,self.Wf)+tf.matmul(previous_hidden_state,self.Uf) + self.bf ) #Output Gate o= tf.sigmoid( tf.matmul(x,self.Wog)+tf.matmul(previous_hidden_state,self.Uog) + self.bog ) #New Memory Cell c_= tf.nn.tanh( tf.matmul(x,self.Wc)+tf.matmul(previous_hidden_state,self.Uc) + self.bc ) #Final Memory cell c= f*c_prev + i*c_ #Current Hidden state current_hidden_state = o*tf.nn.tanh(c) return tf.stack([current_hidden_state,c])
[ "def reset_hidden(self, hidden_state, done):\n if hidden_state.dim() != done.dim():\n if done.dim() == 2:\n done = done.unsqueeze(0)\n elif done.dim() == 1:\n done = done.unsqueeze(0).unsqueeze(2)\n hidden_state = hidden_state * (1 - done)\n return hidden_state", "def reset_hidden_states(self, device):\n\n if self.rnn_model == \"LSTM\":\n \n self.previous_h_t, self.previous_c_t = self.init_hidden()\n self.previous_h_t = self.previous_h_t.to(device)\n self.previous_c_t = self.previous_c_t.to(device)\n\n else:\n \n self.previous_h_t = self.init_hidden()\n self.previous_h_t = self.previous_h_t.to(device)", "def decoder_state(self, z):\r\n\r\n batch_size = z.size(0)\r\n\r\n state_shape = (batch_size, self.hidden_size)\r\n \r\n #raise NotImplementedError()\r\n c0 = variable(torch.zeros(state_shape))\r\n c0 = cuda(c0)\r\n \r\n return z, c0", "def update(self, previous_state, sess=None):\n user_da = previous_state['user_da'][-1]\n new_inform_slots = user_da['inform'].keys()\n current_slots_inform = copy.deepcopy(previous_state['current_slots']['inform_slots'])\n # current_slots = copy.deepcopy(previous_state['current_slots'])\n for slot in new_inform_slots:\n current_slots_inform[slot] = new_inform_slots['slot']\n\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state']['inform_slots'] = current_slots_inform\n kb_result_dict = self.kb_query.query(new_state)\n new_state['kb_result_dict'] = kb_result_dict\n return new_state", "def forward(self, history_tensor, prev_hidden_state): \n e = self.input_lookup(history_tensor)\n x = e.view(e.shape[0], e.shape[1], e.shape[2])\n\n h, _ = self.lstm(x, self.init_hidden())\n\n o = self.output(h[-1])\n #y = self.softmax(o)\n y = F.log_softmax(o, dim=1)\n y = y.squeeze()[-1]\n\n return y", "def _step(m_, x_, h_, c_, a_, as_, ct_, pctx_, dp_=None, dp_att_=None):\n # attention computation\n # [described in equations (4), (5), (6) in\n # section \"3.1.2 Decoder: Long Short Term Memory Network]\n pstate_ = tensor.dot(h_, tparams[_p(prefix,'Wd_att')]) + tensor.dot(ct_, tparams[_p(prefix, 'Wct_att')])\n pctx_ = pctx_ + pstate_[:,None,:]\n pctx_list = []\n pctx_list.append(pctx_)\n pctx_ = tanh(pctx_)\n alpha = tensor.dot(pctx_, tparams[_p(prefix,'U_att')])+tparams[_p(prefix, 'c_tt')]\n alpha_pre = alpha\n alpha_shp = alpha.shape\n\n alpha = tensor.nnet.softmax(alpha.reshape([alpha_shp[0],alpha_shp[1]])) # softmax\n ctx_ = (context * alpha[:,:,None]).sum(1) # current context\n alpha_sample = alpha # you can return something else reasonable here to debug\n\n preact = tensor.dot(h_, tparams[_p(prefix, 'U')])\n preact += x_\n preact += tensor.dot(ctx_, tparams[_p(prefix, 'Wc')])\n\n # Recover the activations to the lstm gates\n # [equation (1)]\n i = _slice(preact, 0, dim)\n f = _slice(preact, 1, dim)\n o = _slice(preact, 2, dim)\n if options['use_dropout_lstm']:\n i = i * _slice(dp_, 0, dim)\n f = f * _slice(dp_, 1, dim)\n o = o * _slice(dp_, 2, dim)\n i = tensor.nnet.sigmoid(i)\n f = tensor.nnet.sigmoid(f)\n o = tensor.nnet.sigmoid(o)\n c = tensor.tanh(_slice(preact, 3, dim))\n\n # compute the new memory/hidden state\n # if the mask is 0, just copy the previous state\n c = f * c_ + i * c\n c = m_[:,None] * c + (1. - m_)[:,None] * c_\n\n h = o * tensor.tanh(c)\n h = m_[:,None] * h + (1. - m_)[:,None] * h_\n\n rval = [h, c, alpha, alpha_sample, ctx_]\n rval += [pstate_, pctx_, i, f, o, preact, alpha_pre]+pctx_list\n return rval", "def init_state(self, src, memory_bank, encoder_final):\n def _fix_enc_hidden(hidden):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n if self.bidirectional_encoder:\n hidden = torch.cat([hidden[0:hidden.size(0):2],\n hidden[1:hidden.size(0):2]], 2)\n # add by wchen, only use the last \"self.num_layers\" encoder layers' final hidden\n enc_layers = hidden.size(0)\n if enc_layers >= self.num_layers:\n hidden = hidden[enc_layers - self.num_layers:]\n else:\n # broadcast the hidden of the last encoder layer to initialize every layer of the decoder\n hidden = [hidden[-1]] * self.num_layers\n hidden = torch.stack(hidden, dim=0)\n if hidden.dim() == 2:\n hidden = hidden.unsqueeze(0)\n return hidden\n\n if isinstance(encoder_final, tuple): # LSTM\n self.state[\"hidden\"] = tuple([_fix_enc_hidden(enc_hid)\n for enc_hid in encoder_final])\n else: # GRU\n self.state[\"hidden\"] = (_fix_enc_hidden(encoder_final), )\n\n # Init the input feed.\n batch_size = self.state[\"hidden\"][0].size(1)\n h_size = (batch_size, self.hidden_size)\n self.state[\"input_feed\"] = \\\n self.state[\"hidden\"][0].data.new(*h_size).zero_().unsqueeze(0)\n self.state[\"coverage\"] = None\n # add for review mechanism\n self.state[\"previous_hiddens\"] = []\n self.state[\"zero_vec\"] = self.state[\"hidden\"][0].data.new(*h_size).zero_()", "def update_tags(self):\n # 1. Decay old tags:\n\n # Input to hidden:\n self.xy_reg_tags = self.xy_reg_tags * self.L * self.gamma\n self.xy_mem_tags = self.xy_mem_tags * self.L * self.gamma\n\n # Hidden to output:\n self.yz_reg_tags = self.yz_reg_tags * self.L * self.gamma\n self.yz_mem_tags = self.yz_mem_tags * self.L * self.gamma\n\n # 2. Update tags:\n\n # Output to hidden:\n self.yz_reg_tags[:, self.prev_action] += np.hstack((np.ones(self.bias_hidden), self.y_reg))\n self.yz_mem_tags[:, self.prev_action] += np.hstack((np.ones(self.bias_mem_hidden), self.y_mem))\n\n # Input to hidden:\n # Here feedback and traces interact to form tag update:\n\n # Regular units:\n\n # Compute derivatives for regular units\n d_hr = self.reg_transform.derivative(self.y_reg)\n\n # Feedback from output layer to regular hidden units:\n fb_reg = self.weights_yz_reg[self.bias_hidden:, self.prev_action]\n\n # Actual update:\n fbxderiv_reg = d_hr * fb_reg\n self.xy_reg_tags += self.xy_reg_traces * fbxderiv_reg\n\n # Memory units:\n\n # Compute derivatives for memory units\n d_hm = self.mem_transform.derivative(self.y_mem)\n\n # Feedback from output layer to memory hidden units:\n fb_mem = self.weights_yz_mem[self.bias_mem_hidden:, self.prev_action]\n\n # Actual update:\n fbxderiv_mem = d_hm * fb_mem\n self.xy_mem_tags += self.xy_mem_traces * fbxderiv_mem", "def memory_state(self, input_, batch_size):\n\n\t\toptions = self.options\n\n\t\t# Feed encoded thoughts into memory cell\n\t\tlstm = tf.contrib.rnn.LayerNormBasicLSTMCell(options[\"sample_size\"])\n\n\t\t# Run thoughts through the cell\n\t\toptions = {\n\t\t\t\"dtype\": tf.float32\n\t\t}\n\n\t\toutput, output_state = tf.contrib.rnn.static_rnn(lstm, tf.unstack(input_), **options)\n\n\t\tlast_output = tf.gather(output, batch_size)\n\n\t\treturn tf.expand_dims(last_output, 0)", "def backstep(self):\n\n self.input.setDelta(self.output.getNetDelta())\n self.output.value = self.history.pop()", "def layered_state_tuple(num_layers: int, batch_size: int, hidden_size: int) -> tuple:\r\n hidden_state_shape = [num_layers, batch_size, hidden_size]\r\n hidden_state_placeholder = tf.placeholder(\r\n dtype=tf.float32,\r\n shape=[num_layers, batch_size, hidden_size],\r\n name=\"hidden_state_placeholder\")\r\n unpacked_hidden_state = tf.unstack(hidden_state_placeholder, axis=0, name=\"unpack_hidden_state\")\r\n hidden_state = tuple(unpacked_hidden_state)\r\n return hidden_state, hidden_state_placeholder, hidden_state_shape", "def _remember(self, observationOld, actionOld, observationNew, reward):\n self._memory.append([observationOld, actionOld, observationNew, reward])\n #self._", "def call(self, x, s_prev, hidden_states):\n attention = SelfAttention(s_prev.shape[1])\n context, weights = attention(s_prev, hidden_states)\n\n x = self.embedding(x)\n\n x = tf.concat([tf.expand_dims(context, 1), x], axis=-1)\n\n output, state = self.gru(x)\n\n output = tf.reshape(output, (-1, output.shape[2]))\n\n x = self.F(output)\n\n return x, state", "def d2_restore_state(program, pos1=12, pos2=2):\n program[1] = pos1\n program[2] = pos2", "def init_state(self, src, memory_bank, encoder_final):\n def _fix_enc_hidden(hidden):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n assert self.bidirectional_encoder\n if self.bidirectional_encoder:\n hidden = torch.cat([hidden[0:hidden.size(0):2],\n hidden[1:hidden.size(0):2]], 2)\n # add by wchen, only use the last \"self.num_layers\" encoder layers' final hidden\n enc_layers = hidden.size(0)\n if enc_layers >= self.num_layers:\n hidden = hidden[enc_layers - self.num_layers:]\n else:\n # broadcast the hidden of the last encoder layer to initialize every layer of the decoder\n hidden = [hidden[-1]] * self.num_layers\n hidden = torch.stack(hidden, dim=0)\n if hidden.dim() == 2:\n hidden = hidden.unsqueeze(0)\n return hidden\n\n if isinstance(encoder_final, tuple): # LSTM\n self.state[\"hidden\"] = tuple([_fix_enc_hidden(enc_hid)\n for enc_hid in encoder_final])\n else: # GRU\n self.state[\"hidden\"] = (_fix_enc_hidden(encoder_final), )\n\n # Init the input feed.\n batch_size = self.state[\"hidden\"][0].size(1)\n h_size = (batch_size, self.hidden_size)\n self.state[\"input_feed\"] = \\\n self.state[\"hidden\"][0].data.new(*h_size).zero_().unsqueeze(0)\n self.state[\"coverage\"] = None", "def state_copy(self):\n list1 = []\n list2 = []\n for x in self.state[0]:\n list1.append(x)\n for x in self.state[1]:\n list2.append(x)\n ret = []\n ret.append(list1)\n ret.append(list2)\n return ret", "def get_hidden_values(self, input):\n# print T.dot(input, self.W).eval()\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden(self, layer):", "def call(self, x, s_prev, hidden_states):\n context_vector, attention_weights = self.attention(s_prev,\n hidden_states)\n\n x = self.embedding(x)\n x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)\n output, state = self.gru(x)\n output = tf.reshape(output, (-1, output.shape[2]))\n y = self.F(output)\n\n return y, state" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function takes hidden state and returns output
def get_output(self, hidden_state): output = tf.nn.relu(tf.matmul(hidden_state, self.Wo) + self.bo) return output
[ "def get_hidden(self, layer):", "def reset_hidden(self, hidden_state, done):\n if hidden_state.dim() != done.dim():\n if done.dim() == 2:\n done = done.unsqueeze(0)\n elif done.dim() == 1:\n done = done.unsqueeze(0).unsqueeze(2)\n hidden_state = hidden_state * (1 - done)\n return hidden_state", "def get_hidden_values(self, input):\n# print T.dot(input, self.W).eval()\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def run_hidden(self, data):\r\n\r\n num_examples = data.shape[0]\r\n\r\n #print (num_examples) ;\r\n\r\n # Create a matrix, where each row is to be the visible units (plus a bias unit)\r\n # sampled from a training example.\r\n visible_states = np.ones((num_examples, self.num_visible + 1))\r\n\r\n #print (visible_states.shape);\r\n\r\n # Insert bias units of 1 into the first column of data.\r\n #data = np.insert(data, 0, 1, axis = 1)\r\n\r\n data[:,0] = 1 ;\r\n\r\n #print (data.shape) ;\r\n #print (self.weights.shape)\r\n\r\n # Calculate the activations of the visible units.\r\n visible_activations = np.dot(data, self.weights.T)\r\n # Calculate the probabilities of turning the visible units on.\r\n visible_probs = self._logistic(visible_activations)\r\n # Turn the visible units on with their specified probabilities.\r\n #visible_states[:,:] = visible_probs > np.random.rand(num_examples, self.num_visible + 1)\r\n visible_states[:,:] = visible_probs ; \r\n # Always fix the bias unit to 1.\r\n # visible_states[:,0] = 1\r\n\r\n # Ignore the bias units.\r\n visible_states = visible_states[:,1:]\r\n return visible_states", "def show_state(self):\n\n pass", "def compute_visible(self, h): \n hidden = tf.placeholder(tf.float32, [None, self.n_hidden], name=\"hidden\")\n compute = sample(tf.sigmoid(tf.matmul(hidden, tf.transpose(self.W)) + self.vb))\n \n x = self.sess.run(compute, feed_dict={hidden:h})\n return x", "def apply_state(self, state):", "def setHidden( self, state ):\r\n\t\tself._nativePointer.ishidden = state\r\n\t\treturn True", "def anihilation(i,state_in):\n if not (state_in[i] == 0):\n coef = np.sqrt(state_in[i])\n state_out=state_in.copy()\n state_out[i]=state_out[i]-1\n stop = False\n return state_out,coef,stop\n else:\n #print('This state cant be lowered at', i,'!', )\n stop = True \n state_out= []\n coef=0\n return state_out,coef,stop", "def _compute_hidden_acts(self, X, W, b):\n A_h = self.activation(T.dot(X, W) + b)\n return A_h", "def get_output(self, input_, mask_, hidden_init):\n # input_ are (n_batch, n_timesteps, n_features)\n # change to (n_timesteps, n_batch, n_features)\n input_ = input_.dimshuffle(1, 0, 2)\n # mask_ are (n_batch, n_timesteps)\n masks = masks.dimshuffle(1, 0, 'x')\n sequence_length = input_.shape[0]\n batch_num = input_.shape[1]\n\n # precompute input\n if self.precompute:\n additional_dims = tuple(input.shape[k] for k in range(2, input.ndim)) # (output_dim,)\n input = T.reshape(input, (sequence_length*batch_num,) + additional_dims)\n input = T.dot(input, self.W)\n additional_dims = tuple(input.shape[k] for k in range(1, input.ndim)) # (output_dim,)\n input = T.reshape(input, (sequence_length, batch_num,) + additional_dims)\n\n # step function\n def step(input_, hidden):\n if self.precompute:\n return self.out_activation.get_output(input_ + T.dot(hidden, self.U) + self.b)\n else:\n return self.out_activation.get_output(T.dot(input_, self.W) + T.dot(hidden, self.U) + self.b)\n\n # step function, with mask\n def step_masked(input_, mask_, hidden):\n hidden_computed = step(input_, hidden)\n return T.switch(mask_, hidden_computed, hidden)\n\n # main operation\n if self.unroll:\n counter = range(self.gradient_steps)\n if self.backward:\n counter = counter[::-1] # reversed index\n iter_output = []\n outputs_info = [hidden_init]\n for index in counter:\n step_input = [input_[index], mask_[index]] + outputs_info\n step_output = step_masked(*step_input)\n iter_output.append(step_output)\n outputs_info = [iter_output[-1]]\n hidden_output = T.stack(iter_output, axis=0)\n\n else:\n hidden_output = theano.scan(fn=step_masked,\n sequences=[input_, mask_],\n outputs_info=[hidden_init],\n go_backwards=self.backward,\n n_steps = None,\n truncate_gradient=self.gradient_steps)[0] # only need outputs, not updates\n\n # computed output are (n_timesteps, n_batch, n_features)\n # select only required\n if self.output_return_index is None:\n hidden_output_return = hidden_output\n else:\n hidden_output_return = hidden_output[self.output_return_index]\n # change to (n_batch, n_timesteps, n_features)\n hidden_output_return = hidden_output_return.dimshuffle(1, 0, *range(2, hidden_output_return.ndim))\n\n # backward order straight\n if self.backward:\n hidden_output_return = hidden_output_return[:, ::-1]\n\n return hidden_output_return", "def hidden(self, hidden):\n \n self._hidden = hidden", "def _make_outputs(self, hidden):\n if self.return_sequences:\n outputs = self.final_linear(hidden)\n\n # If rectilinear and return sequences, return every other value\n if (self.interpolation == \"rectilinear\") and self.return_filtered_rectilinear:\n outputs = outputs[:, ::2]\n else:\n outputs = self.final_linear(hidden[:, -1, :])\n return outputs", "def switchingFunction(self, state):\n\n x, y, z, dx, dy, dz, m, L1, L2, L3, L4, L5, L6, L7 = state\n\n Lv_, lv = self.unitVector(np.array([L4, L5, L6]))\n\n S = -lv * self.ve / m - L7 + 1\n\n return S", "def runningState(utility, time, retFig=False):\n states = np.array(utility.gen_t.get_memory()).astype(bool).astype(int)\n\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=time,\n y=states,\n line={'color': COL_BAL,\n 'width': 1},\n name=\"charge\",\n )\n )\n fig.update_layout(height=600, width=600,\n title_text=\"On/Off states of Utility\")\n fig.update_xaxes(title_text=\"Time\")\n fig.update_yaxes(title_text=\"State\")\n if retFig:\n return fig\n else: # show figure\n fig.show()", "def discretize_state(self, state):\n x, x_dot, phi, phi_dot = state\n if x > 1.:\n x = 1\n elif x < -1.:\n x = -1\n else: \n x = 0\n\n if x_dot < -0.1:\n x_dot = -2\n elif x_dot > 0.1:\n x_dot = 2\n elif x_dot < -0.03:\n x_dot = -1\n elif x_dot > 0.03:\n x_dot = 1\n else:\n x_dot = 0\n\n if phi > 0.1:\n phi = 1\n elif phi < -0.1:\n phi = -1\n else: \n phi = 0\n\n if phi_dot < -0.1:\n phi_dot = -2\n elif phi_dot > 0.1:\n phi_dot = 2\n elif phi_dot < -0.03:\n phi_dot = -1\n elif phi_dot > 0.03:\n phi_dot = 1\n else:\n phi_dot = 0\n \n return (x, x_dot, phi, phi_dot)", "def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n values = initializer(shape=(self.batch, self.units))\n\n return values", "def __getstate__(self):\n W_list = []\n bhid_list = []\n bvis_list = []\n for layer in self.dA_layers:\n W, bhid, bvis = layer.get_params()\n W_list.append(W.get_value(borrow=True))\n bhid_list.append(bhid.get_value(borrow=True))\n bvis_list.append(bvis.get_value(borrow=True))\n \n return (self.n_layers, self.n_outs, W_list, bhid_list, bvis_list, self.corruption_levels, self.layer_types, self.use_loss, self.dropout_rates, self.opt_method)", "def wifi_hidden(self, state: object = None):\n if state is None:\n self._logger.info(\"Retrieving WiFi hidden SSID status...\")\n return \"on\" if int(self._device_info().get(\"hideSSID\")) == 1 else \"off\"\n if (isinstance(state, str) and state.lower() == 'off') or not state:\n return self._wifi_hidden_off()\n self._wifi_hidden_on()", "def init_hidden(self):\n weight = next(self.parameters())\n nlayers = self.eta_nlayers\n nhid = self.eta_hidden_size\n return (weight.new_zeros(nlayers, 1, nhid), weight.new_zeros(nlayers, 1, nhid))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the number of dots in nth pentagonal number.
def pentagonal(n: int) -> int: # Find the pentagonal number to nth degree. pentagonal_number = (n * ((3 * n) - 1) // 2) # Find the total number of dots. dots = ((n-1) ** 2) dots += pentagonal_number return dots
[ "def pentagonal_numbers(N, K):\n for n in range(K+1, N):\n # test whether (3n^2 - n)/2 - (3n^2 - 6nK - n + 3K^2 + K)/2 is pentagonal\n P_n_minus_P_n_K = (6*n*K - 3*K**2 - K)//2\n # if is_pentagonal(P_n_minus_P_n_K):\n # print(pentagonal_number(n))\n P_n_plus_P_n_K = pentagonal_number(n) + pentagonal_number(n-K)\n # if is_pentagonal(P_n_plus_P_n_K):\n # print(pentagonal_number(n))\n if is_pentagonal(P_n_minus_P_n_K) and is_pentagonal(P_n_plus_P_n_K):\n print(P_n_minus_P_n_K)", "def num_triangles(p):\n num = 0\n for a in range(1, p):\n numer = p*p - 2*p*a\n denom = 2*(p-a)\n b = numer // denom\n if a > b or b > (p - a - b):\n break\n elif b * denom == numer:\n num += 1\n return num", "def number_of_pennies(x, y=0):\r\n return x*100+y", "def find_dots(self,maze):\n x_ind = 0\n y_ind = 0\n for line in maze:\n x_ind = 0;\n for char in line:\n if char == '.':\n self.dots.append([y_ind,x_ind,False])\n x_ind += 1\n y_ind += 1", "def count_symid_head_dots(symid: SymId) -> int:\n i = 0\n while len(symid) > i and symid[i] == '.':\n i += 1\n return i", "def _countPaths(self, cells, n, m, x, y):\n \n if (self.dp):\n if ((x, y) in self.dpMap): return self.dpMap[(x, y)]\n if (cells[x][y] == True): return 0\n if (x == n - 1 and y == m - 1): return 1\n \n \n k = 0\n if (x + 1 < n): k += self._countPaths(cells, n, m, x + 1, y)\n if (y + 1 < m): k += self._countPaths(cells, n, m, x, y + 1)\n\n if (self.dp):\n self.dpMap[(x, y)] = k # Memorization for dynamic programming\n return k", "def ncells_per_point(self):\r\n if self.ncells_per_point_ is not None:\r\n return self.ncells_per_point_\r\n else:\r\n self.ncells_per_point_ = np.zeros(len(self.points), dtype=int)\r\n for celltype in self.cells:\r\n for cell in self.cells[celltype]:\r\n self.ncells_per_point_[cell] += 1\r\n return self.ncells_per_point_", "def pi_approx(n):\n\n x_val = []\n y_val = []\n\n for i in range(0, n):\n x = 0.5 * cos(2 * pi * i / float(n))\n x_val.append(x)\n y = 0.5 * sin(2 * pi * i / float(n))\n y_val.append(y)\n\n return pathlength(x_val, y_val)", "def count_non_divisors(n, p):\n result = 0\n\n # The recursive formula can be deduced by computing the (mod p) values of\n # Pascal's triangle, and noticing the pattern (at least up until 21X in base p)\n roots = 1\n\n p_digits = convert_base(n, p)\n p_pownds = count_power_non_divisors(p)\n\n for d, nd_count in reversed(zip(p_digits, p_pownds)):\n result += roots * nd_count * sum_up_to(d)\n roots *= d + 1\n\n return result", "def isPentagonal(number):\n\n if number == 1:\n return True\n else:\n n = ((24 * number + 1) ** .5 + 1)\n if n % 6 == 0:\n return True\n else:\n return False", "def pi_nth_digit(n):\n return '%.*f' % (n,pi)", "def coord_num(phi):\r\n\r\n n = 20.0 - 34.0*phi + 14.0*phi**2.0\r\n\r\n return n", "def get_dots(self):\n return Tile.number_to_dots(self.number)", "def n_segments(tree):\n return sum(1 for _ in tr.isegment(tree))", "def p63():\n count = 0\n for n in range(1000):\n for i in itertools.count(1):\n digits = len(str(i ** n))\n if digits == n:\n count += 1\n print(\"%d: %d\" % (n, i ** n))\n elif digits > n:\n break\n return count", "def num_perception_points(radius):\n return sum(num_perpception_layer_points(layer) for layer in range(radius))", "def trailingZeroes(self, n):\n count = 0\n if n == 0:\n return 0\n maxk = math.floor(math.log(n) / math.log(5.))\n while maxk >= 1:\n maxm = n / math.pow(5, maxk)\n count += math.floor(maxm)\n maxk -= 1\n return int(count)", "def montePi(tortle, num_darts):\n inside_count = 0\n for i in range(num_darts):\n throwDart(tortle)\n if isInCircle(tortle, (0,0), 1):\n inside_count += 1\n return((float(inside_count) / float(num_darts)) * 4) #function returned 0 after dividing integers so I converted both to float.", "def get_number_of_particles(pdg_arr):\n n = 0\n p = 0\n piminus = 0\n piplus = 0\n pi0 = 0\n gamma = 0\n deuteron = 0\n triton = 0\n he3 = 0\n alpha = 0\n li6 = 0\n li7 = 0\n li8 = 0\n li9 = 0\n be7 = 0\n be8 = 0\n be9 = 0\n be10 = 0\n b8 = 0\n b9 = 0\n b10 = 0\n b11 = 0\n c9 = 0\n c10 = 0\n c11 = 0\n c12 = 0\n\n # loop over pdg_array:\n for ind in range(len(pdg_arr)):\n\n if pdg_arr[ind] == 2112:\n n += 1\n elif pdg_arr[ind] == 2212:\n p += 1\n elif pdg_arr[ind] == -211:\n piminus += 1\n elif pdg_arr[ind] == 211:\n piplus += 1\n elif pdg_arr[ind] == 111:\n pi0 += 1\n elif pdg_arr[ind] == 22:\n gamma += 1\n elif pdg_arr[ind] == 1000010020:\n deuteron += 1\n elif pdg_arr[ind] == 1000010030:\n triton += 1\n elif pdg_arr[ind] == 1000020030:\n he3 += 1\n elif pdg_arr[ind] == 1000020040:\n alpha += 1\n elif pdg_arr[ind] == 1000030060:\n li6 += 1\n elif pdg_arr[ind] == 1000030070:\n li7 += 1\n elif pdg_arr[ind] == 1000030080:\n li8 += 1\n elif pdg_arr[ind] == 1000030090:\n li9 += 1\n elif pdg_arr[ind] == 1000040070:\n be7 += 1\n elif pdg_arr[ind] == 1000040080:\n be8 += 1\n elif pdg_arr[ind] == 1000040090:\n be9 += 1\n elif pdg_arr[ind] == 1000040100:\n be10 += 1\n elif pdg_arr[ind] == 1000050080:\n b8 += 1\n elif pdg_arr[ind] == 1000050090:\n b9 += 1\n elif pdg_arr[ind] == 1000050100:\n b10 += 1\n elif pdg_arr[ind] == 1000050110:\n b11 += 1\n elif pdg_arr[ind] == 1000060090:\n c9 += 1\n elif pdg_arr[ind] == 1000060100:\n c10 += 1\n elif pdg_arr[ind] == 1000060110:\n c11 += 1\n elif pdg_arr[ind] == 1000060120:\n c12 += 1\n elif (pdg_arr[ind] == -321 or pdg_arr[ind] == 321 or pdg_arr[ind] == 3122 or pdg_arr[ind] == 3112 or\n pdg_arr[ind] == 3222 or pdg_arr[ind] == -311 or pdg_arr[ind] == 311 or pdg_arr[ind] == -2112 or\n pdg_arr[ind] == -2212 or pdg_arr[ind] == -11 or pdg_arr[ind] == 11 or pdg_arr[ind] == 3212 or\n pdg_arr[ind] == 130):\n # Kaon, Sigma, Lambda, ...\n continue\n else:\n print(\"new PDG: {0:.0f}\".format(pdg_arr[ind]))\n\n return (n, p, piminus, piplus, pi0, gamma, deuteron, triton, he3, alpha, li6, li7, li8, li9, be7, be8, be9, be10,\n b8, b9, b10, b11, c9, c10, c11, c12)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function asks a user to enter four numbers (width and height) for two envelops. If the entered values are valid it creates two class objects and calls the function 'compare_envelops' to make a decision if it is possible to fit one envelop into another one.
def main(): run = True while run: try: width1, height1 = [float(x) if validation(float(x)) else print(HELP_MSG) for x in get_user_input().split(',')] width2, height2 = [float(x) if validation(float(x)) else print(HELP_MSG) for x in get_user_input().split(',')] e1 = Envelop(height1, width1) e2 = Envelop(height2, width2) if e1.__lt__(e2): print(YES_SECOND_ENVELOP) elif e2.__lt__(e1): print(YES_FIRST_ENVELOP) else: print(NO_ANSWER) except ValueError: print(HELP_MSG) run = is_exit()
[ "def evaluate_input(window, answer1, answer2, cube, cubies):\n\n string_of_answer1 = answer1.get()\n string_of_answer2 = answer2.get()\n\n if len(string_of_answer1) > 0 and len(string_of_answer2) > 0:\n # Converting the strings for a less difficult evaluation\n\n # Lower case\n string_of_answer1 = string_of_answer1.lower()\n string_of_answer2 = string_of_answer2.lower()\n\n # Removing white spaces\n string_of_answer1 = string_of_answer1.replace(\" \", \"\")\n string_of_answer2 = string_of_answer2.replace(\" \", \"\")\n\n # Casting the Strings to an list\n list_of_answer1 = list(string_of_answer1)\n list_of_answer2 = list(string_of_answer2)\n\n # defining side_idx as index for the side of the cube and piece_idx for the piece\n # color is a string which represents the color\n side_idx = -1\n piece_idx = -1\n color = \"\"\n\n allowed_sides = [\"u\", \"f\", \"l\", \"r\", \"d\", \"b\"]\n\n if not list_of_answer1[0].isalpha() or list_of_answer1[0] not in allowed_sides:\n messagebox.showerror(\"Invalid Side\", \"Please choose one of the allowed sides.\")\n answer1.delete(0, END)\n\n # Evaluation of the first answer\n if \"u\" == list_of_answer1[0]:\n side_idx = 0\n\n if \"f\" == list_of_answer1[0]:\n side_idx = 1\n\n if \"r\" == list_of_answer1[0]:\n side_idx = 2\n\n if \"b\" == list_of_answer1[0]:\n side_idx = 3\n\n if \"l\" == list_of_answer1[0]:\n side_idx = 4\n\n if \"d\" == list_of_answer1[0]:\n side_idx = 5\n\n # Evaluation of the second answer\n\n # Which piece shall be chosen?\n if \"0\" == list_of_answer2[0]:\n piece_idx = 0\n\n if \"1\" == list_of_answer2[0]:\n piece_idx = 1\n\n if \"2\" == list_of_answer2[0]:\n piece_idx = 2\n\n if \"3\" == list_of_answer2[0]:\n piece_idx = 3\n\n if \"4\" == list_of_answer2[0]:\n messagebox.showerror(\"Invalid Index\",\n \"The index 4 is invalid. The cross of the cube can not be modified!\")\n\n if \"5\" == list_of_answer2[0]:\n piece_idx = 5\n\n if \"6\" == list_of_answer2[0]:\n piece_idx = 6\n\n if \"7\" == list_of_answer2[0]:\n piece_idx = 7\n\n if \"8\" == list_of_answer2[0]:\n piece_idx = 8\n\n if not list_of_answer2[1].isalpha() or not list_of_answer2[0].isdigit():\n messagebox.showerror(\"Invalid Index\",\n \"Invalid position. Please choose an index between 0 and 8.\")\n\n allowed_colors = [\"y\", \"b\", \"r\", \"g\", \"o\", \"w\"]\n\n if list_of_answer2[1].isalpha() and list_of_answer2[1] not in allowed_colors:\n messagebox.showerror(\"Invalid Color\", \"Please choose one of the allowed colors.\")\n\n # Choosing the color.\n if \"y\" == list_of_answer2[1]:\n color = \"yellow\"\n\n if \"b\" == list_of_answer2[1]:\n color = \"blue\"\n\n if \"r\" == list_of_answer2[1]:\n color = \"red\"\n\n if \"g\" == list_of_answer2[1]:\n color = \"green\"\n\n if \"o\" == list_of_answer2[1]:\n color = \"orange\"\n\n if \"w\" == list_of_answer2[1]:\n color = \"white\"\n\n # Coloring the piece, except its a centre piece\n if \"4\" != list_of_answer2[0]:\n if side_idx != -1 and piece_idx != -1:\n ids = get_id_from_cubies(cubies)\n temp_id = ids[side_idx][piece_idx]\n number = int(temp_id[0:2])\n number_color = int(temp_id[3])\n\n if 1 == number_color:\n cubies[number-1].color1 = color\n if 2 == number_color:\n cubies[int(temp_id[0:2])-1].color2 = color\n if 3 == number_color:\n cubies[int(temp_id[0:2])-1].color3 = color\n\n set_colors(window, get_colors_from_cubies(cubies), cube)\n\n # Deleting answer 2 for better user experience\n answer2.delete(0, END)", "def __init__(self, p1, p2, width): # the ellipse class accepting two points and the width \n self.p1 = p1\n self.p2 = p2\n self.width = width\n self.centervalx = (p1.x + p2.x) / 2\n self.centervaly = (p1.y + p2.y) / 2\n self.center = Point(self.centervalx, self.centervaly)\n self.area = self.width / 2 \n\n if self.centervaly == self.p1.y: # If the centaer y is equal to the y value of point 1\n self.b = math.sqrt(math.pow(self.area, 2) - math.pow(self.p1.x -self.centervalx, 2)) # Calculating for values to send to rectangle class\n self.rect = Rectangle(self.centervalx - self.area, self.centervaly + self.b, self.centervaly - self.b, self.centervalx + self.area) # Calling the rectangle class with thedimension of the rectangle\n else:\n self.b = math.sqrt(math.pow(self.area,2) - math.pow(self.p1.y)- self.centervaly, 2)\n self.rect = Rectangle(self.centervalx - self.b, self.centervaly + self.area, self.centervaly - self.area, self.centervalx + self.b)\n self.area = math.pi * self.area * self.b # Calcualting the area of the ellipse", "def check_if_setup_correct(win, width, height):\n try:\n int(width.get())\n int(height.get())\n win.destroy()\n except:\n winwarning = Tk()\n winwarning.title(\"Warning\")\n warningLabel = Label(winwarning, text=\"The values you have entered are not correct, please try again. (Width and height must be integer)\")\n warningOkButton = Button(winwarning, text=\"OK\", command=winwarning.destroy)\n warningLabel.grid()\n warningOkButton.grid()", "def test_validate_schema_2(self):\n\n # prepare\n validator = EsdlValidator()\n\n # execute, validate against 1 schema\n result = validator.validate(self.esdlHybrid, [self.schemaTwo])\n validationProducer = result.schemas[0].validations[0]\n validationStorage = result.schemas[0].validations[1]\n validationGasHeater = result.schemas[0].validations[2]\n validationHeatpump = result.schemas[0].validations[3]\n validationCostsInRange = result.schemas[0].validations[4]\n\n # assert\n self.assertEqual(validationProducer.checked, 3, \"there should be 3 checked since there are only 3 producers\")\n self.assertEqual(len(validationProducer.errors), 2, \"there should be 2 errors since 1 producer validates ok\")\n self.assertEqual(validationProducer.errors[0], \"Consumer missing power and marginal costs or no energy profile connected: property port.profile value is None\", \"Warning should say: Consumer missing power and marginal costs or no energy profile connected: property port.profile value is None\")\n\n self.assertEqual(validationStorage.checked, 1, \"there should be 1 checked storage\")\n self.assertEqual(len(validationStorage.errors), 0, \"there should be 0 errors, storage should be correct\")\n\n self.assertEqual(validationGasHeater.checked, 1, \"there should be 1 checked GasHeater\")\n self.assertEqual(len(validationGasHeater.warnings), 0, \"there should be 0 warnings, gasheater should be correct\")\n\n self.assertEqual(validationHeatpump.checked, 1, \"there should be 1 checked HeatPump\")\n self.assertEqual(len(validationHeatpump.warnings), 1, \"there should be 1 warnings, heatpump should be missing a control strategy\")\n\n self.assertEqual(validationCostsInRange.checked, 3, \"there should be 3 checked costs\")\n self.assertEqual(len(validationCostsInRange.warnings), 1, \"there should be 1 warnings\")", "def main():\n print(\"Welcome to the basic estimating program for Spalding Carpet \"\n \"Cleaners!\")\n print(\"Cleaners measure areas of room at a time, then multiply that by a\")\n print(\"price seen fit for labor/size of job.\")\n carpetChoice = input(\"Would user like to measure carpet?(yes or no):\")\n yes = ['y', 'ye', 'yes', 'yess', 'yyes', 'yees']\n if carpetChoice in yes:\n not_number = True\n initialCarpet = 0\n while not_number:\n try:\n initialCarpet = float(input(\"How many square feet of carpet \"\n \"would user like \"\n \"cleaned?(Whole number/decimal):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n priceOfCarpet = carpet_estimate(initialCarpet)\n tileChoice = input(\"Are there also areas of tile and grout?\"\n \"(yes or no):\")\n if tileChoice in yes:\n not_number = True\n initialTile = 0\n while not_number:\n try:\n initialTile = float(input(\"How many square feet of tile \"\n \"would user like cleaned?(\"\n \"Whole number/decimal):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n priceOfTile = tile_estimate(initialTile)\n upholsteryChoice = input(\"Would user like to estimate upholstery\"\n \" as well?(yes or no):\")\n if upholsteryChoice in yes:\n not_number = True\n initialUpholstery = 0\n while not_number:\n try:\n initialUpholstery = int(input(\"How many sofas would \"\n \"user like cleaned?\"\n \"(3 bottom cushions)\"\n \"(Enter whole number or\"\n \" 0):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n totalUpholsteryPrice = upholstery_estimate(initialUpholstery)\n totalEstimate = (priceOfCarpet + priceOfTile +\n totalUpholsteryPrice)\n print(\"Your estimate for carpet is: $\" +\n format(priceOfCarpet, \".2f\"),\n \",tile is: ${0}, and upholstery is: ${1}\".\n format(format(priceOfTile, \".2f\"),\n format(totalUpholsteryPrice, \".2f\")))\n print(\"Coming to a combined total of\",\n format(totalEstimate, '.2f'), \"dollars.\")\n else:\n print(\"User selected no upholstery.\")\n print(\"Your estimate for carpet is: $\" +\n (format(priceOfCarpet, \".2f\")),\n \"and tile is: $\" + format(priceOfTile, '.2f'))\n print(\"Coming to a combined total of\",\n format(priceOfCarpet + priceOfTile, \".2f\"), \"dollars.\")\n else:\n print(\"User selected no tile.\")\n upholsteryChoice = input(\"Would user like to measure \"\n \"upholstery?(yes or no):\")\n if upholsteryChoice in yes:\n not_number = True\n initialUpholstery = 0\n while not_number:\n try:\n initialUpholstery = int(input(\"How many sofas would \"\n \"user like cleaned?\"\n \"(3 bottom cushions)\"\n \"(Enter whole number or \"\n \"0):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n totalUpholsteryPrice = upholstery_estimate(initialUpholstery)\n totalUpholsteryPlusCarpet = \\\n totalUpholsteryPrice + priceOfCarpet\n print(\"Your estimate for carpet is: $\" +\n format(priceOfCarpet, '.2f'),\n \",upholstery is:$\" + format(totalUpholsteryPrice, \".2f\"))\n print(\"Coming to a combined total of\",\n format(totalUpholsteryPlusCarpet, '.2f'), \"dollars.\")\n else:\n print(\"User selected no upholstery.\")\n print(\"Your estimate for carpet is\",\n format(priceOfCarpet, \".2f\"), \"dollars.\")\n elif carpetChoice != 'yes':\n print(\"User selected no carpet.\")\n tileChoice = input(\"Would user like to measure tile and grout?\"\n \"(yes or no):\")\n if tileChoice in yes:\n not_number = True\n initialTile = 0\n while not_number:\n try:\n initialTile = float(input(\"How many square feet of tile \"\n \"would user like cleaned?(\"\n \"Whole number/integer):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n totalTilePrice = tile_estimate(initialTile)\n upholsteryChoice = input(\"Would user also like to measure\"\n \" upholstery?(yes or no):\")\n if upholsteryChoice in yes:\n not_number = True\n initialUpholstery = 0\n while not_number:\n try:\n initialUpholstery = int(input(\"How many sofas would\"\n \" user like cleaned? (\"\n \"3 bottom cushions)(\"\n \"Enter whole number or \"\n \"decimal):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n totalUpholsteryPrice = upholstery_estimate(initialUpholstery)\n totalTilePlusUpholstery = totalUpholsteryPrice + totalTilePrice\n print(\"Your estimate price for tile is: $\" +\n format(totalTilePrice, '.2f'),\n \"and upholstery is: $\" +\n format(totalUpholsteryPrice, \".2f\"))\n print(\"For a combined total of\",\n format(totalTilePlusUpholstery, \".2f\"), \"dollars.\")\n else:\n print(\"User selected no upholstery.\")\n print(\"Your estimate price for tile is\",\n format(totalTilePrice, \".2f\"), \"dollars.\")\n else:\n print(\"User selected no tile.\")\n upholsteryChoice = input(\"Would user like to measure \"\n \"upholstery?(yes or no):\")\n if upholsteryChoice in yes:\n not_number = True\n initialUpholstery = 0\n while not_number:\n try:\n initialUpholstery = int(\n input(\"How many sofas would user like cleaned?\"\n \"(3 bottom cushions)(Enter whole number or\"\n \" 0):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n totalUpholsteryPrice = upholstery_estimate(initialUpholstery)\n print(\"Your estimate price for upholstery is: $\" +\n format(totalUpholsteryPrice, \".2f\"), \"dollars.\")\n else:\n print(\"User selected no upholstery.\")\n print(\"I apologize for being no use.\")\n print(\"Have a nice day!\")", "def run_creating_picture_boxes(self, w_box, h_box, s_box, p_box=None):\n input_boxes = [w_box, h_box, s_box]\n if p_box is not None:\n input_boxes.append(p_box)\n # variables necessary for re-typing data in input box\n new_width = new_height = new_steps = new_probability = None\n while True:\n # mouse position and quit button exception\n mouse_pos = pg.mouse.get_pos()\n for event in pg.event.get():\n if event.type == pg.QUIT:\n raise gexc.QuitButtonError()\n pg.quit()\n # getting width data\n width = w_box.click_on_box(event, mouse_pos)\n if width is not None:\n # displaying \"OK!\" if enter is typed\n self.display_text(\"OK!\", 950, 250)\n new_width = width\n # getting height data\n height = h_box.click_on_box(event, mouse_pos)\n if height is not None:\n # displaying \"OK!\" if enter is typed\n self.display_text(\"OK!\", 950, 350)\n new_height = height\n # getting steps data\n steps = s_box.click_on_box(event, mouse_pos)\n if steps is not None:\n # displaying \"OK!\" if enter is typed\n self.display_text(\"OK!\", 950, 450)\n new_steps = steps\n # getting probability data\n if p_box is not None:\n probability = p_box.click_on_box(event, mouse_pos)\n if probability is not None:\n # displaying \"OK!\" if enter is typed\n self.display_text(\"OK!\", 950, 550)\n new_probability = probability\n # covering all boxes for backspace\n self.cover_box(700, 250, 200, 40)\n self.cover_box(700, 350, 200, 40)\n self.cover_box(700, 450, 200, 40)\n if p_box is not None:\n self.cover_box(700, 550, 200, 40)\n # showing all boxes\n for input_box in input_boxes:\n input_box.display_box(self.screen)\n pg.display.update()\n # if all input boxes were entered function returns data from them\n if new_width and new_height and new_steps:\n if p_box is None or new_probability:\n return new_width, new_height, new_steps, new_probability", "def main():\n user_height = input_height()\n check_height(user_height)", "def main():\n p1 = Point(0,0) \n p2 = Point(0,0)\n p3 = Point(0,0)\n e1 = Ellipse(p1, p2, 2)\n e2 = Ellipse(p2, p3, 3)\n overlap = ComputeRectangleValues(e1, e2)\n\n success = ComputeOverlapOfEllipses(overlap, 10000)\n print(\"The overlapping area of an ellipse is: \" + str(success.hits))", "def test_check_width(self):\n s1 = Square(2)\n self.assertEqual(s1.width, 2)\n\n s2 = Square(10)\n self.assertEqual(s2.width, 10)\n\n s3 = Square(2, 0, 0, 12)\n self.assertEqual(s3.width, 2)", "def upholstery_estimate(start_upholstery):\n not_number = True\n numberOfLoveSeats = 0\n while not_number:\n try:\n numberOfLoveSeats = int(\n input(\"How many love seats would user like cleaned?\"\n \"(2 bottom cushions)(Enter whole number or 0):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n not_number = True\n numberOfRecliners = 0\n while not_number:\n try:\n numberOfRecliners = int(\n input(\"How many recliners would user like cleaned?/\"\n \"(1 bottom cushion)(Enter whole number or 0):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n not_number = True\n numberOfPillows = 0\n while not_number:\n try:\n numberOfPillows = int(\n input(\"How many pillows would user like cleaned?\"\n \"(Enter whole number or 0):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n not_number = True\n numberOfOttomans = 0\n while not_number:\n try:\n numberOfOttomans = int(\n input(\"How many ottomans would user like cleaned?\"\n \"(Enter whole number or 0):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n not_number = True\n numberOfDiningChairs = 0\n while not_number:\n try:\n numberOfDiningChairs = int(\n input(\"How many dining room chairs would user\"\n \" like cleaned?(Enter whole number or 0):\"))\n not_number = False\n except ValueError:\n print(\"Your input is invalid, please try again.\")\n biggerThanThat = input(\"Do you have any couches larger than previously\"\n \" described?(yes or no):\")\n yes = ['y', 'ye', 'yes', 'yess', 'yyes', 'yees']\n if biggerThanThat in yes:\n biggerCushions = int(input(\"How many bottom cushions does it have?:\"))\n totalUpholsteryPrice = (start_upholstery * 85) + \\\n (numberOfLoveSeats * 65) + \\\n (numberOfRecliners * 45) + \\\n (20 * biggerCushions + 25) + \\\n (numberOfDiningChairs * 10) + \\\n (numberOfPillows * 5) + (numberOfOttomans * 15)\n else:\n print(\"You selected no larger couches.\")\n totalUpholsteryPrice = (start_upholstery * 85) + \\\n (numberOfLoveSeats * 65) + \\\n (numberOfRecliners * 45) + \\\n (numberOfDiningChairs * 10) + \\\n (numberOfPillows * 5) + (numberOfOttomans * 15)\n return totalUpholsteryPrice", "def in_out(xs: Union[int, float], ys: Union[int, float], side: Union[int, float]):\n xUser = float(input(\"Enter the x coordinate: \"))\n yUser = float(input(\"Enter the y coordinate: \"))\n\n # Check whether the user input x coordinate is to the right of the left side of the square and to the left of the\n # right side of the square\n checkX = ((xs <= xUser) & (xUser <= (xs + side)))\n # Check whether the user input y coordinate is above the bottom side of the square and below the\n # top side of the square\n checkY = ((ys <= yUser) & (yUser <= (ys + side)))\n\n # The question asks that the true or false be printed\n print(checkX & checkY)", "def min_max_auto_replaced(box):\r\n width = box.width\r\n height = box.height\r\n min_width = box.min_width\r\n min_height = box.min_height\r\n max_width = max(min_width, box.max_width)\r\n max_height = max(min_height, box.max_height)\r\n\r\n # (violation_width, violation_height)\r\n violations = (\r\n 'min' if width < min_width else 'max' if width > max_width else '',\r\n 'min' if height < min_height else 'max' if height > max_height else '')\r\n\r\n # Work around divisions by zero. These are pathological cases anyway.\r\n # TODO: is there a cleaner way?\r\n if width == 0:\r\n width = 1e-6\r\n if height == 0:\r\n height = 1e-6\r\n\r\n # ('', ''): nothing to do\r\n if violations == ('max', ''):\r\n box.width = max_width\r\n box.height = max(max_width * height / width, min_height)\r\n elif violations == ('min', ''):\r\n box.width = min_width\r\n box.height = min(min_width * height / width, max_height)\r\n elif violations == ('', 'max'):\r\n box.width = max(max_height * width / height, min_width)\r\n box.height = max_height\r\n elif violations == ('', 'min'):\r\n box.width = min(min_height * width / height, max_width)\r\n box.height = min_height\r\n elif violations == ('max', 'max'):\r\n if max_width / width <= max_height / height:\r\n box.width = max_width\r\n box.height = max(min_height, max_width * height / width)\r\n else:\r\n box.width = max(min_width, max_height * width / height)\r\n box.height = max_height\r\n elif violations == ('min', 'min'):\r\n if min_width / width <= min_height / height:\r\n box.width = min(max_width, min_height * width / height)\r\n box.height = min_height\r\n else:\r\n box.width = min_width\r\n box.height = min(max_height, min_width * height / width)\r\n elif violations == ('min', 'max'):\r\n box.width = min_width\r\n box.height = max_height\r\n elif violations == ('max', 'min'):\r\n box.width = max_width\r\n box.height = min_height", "def compare_S2array(vmin=40,vmax=80, deltaX=40, deltadeltaX=10):\n\n\n\tgalaxynameM51 = 'M51'\n\tgalaxynameM33 = 'M33'\n\tfilenameM51 = 'paws_norot'\n\tfilenameM33 = 'm33.co21_iram_CLEANED'\n\tdrawmap = False\n\tif deltadeltaX == 1:\n\t\tsavename = 'savedfile_M51andM33_comparison_MAXRES'\n\telse:\n\t\tsavename = 'savedfile_M51andM33_comparison'\n\n\tyminM51 = 200\t\t\t# 'ymin' for M51.\n\tymaxM51 = 400\t\t\t# etc.\n\txminM51 = 360\n\txmaxM51 = 560\n\t\n\tyminM33 = 525\t\t\t# 'ymin' for M33.\n\tymaxM33 = 725\t\t\t# etc.\n\txminM33 = 288\n\txmaxM33 = 488\n\n\n\tsubcubeM51 = Cubes.cubegen(vmin,vmax,yminM51,ymaxM51,xminM51,xmaxM51, filenameM51, drawmap)\t# Subcube for M51.\n\tsubcubeM51_n = Cubes.cubegen(0,20,yminM51,ymaxM51,xminM51,xmaxM51, filenameM51, drawmap)\t# Noise subcube for M51.\n\n\tsubcubeM33 = Cubes.cubegen(vmin,vmax,yminM33,ymaxM33,xminM33,xmaxM33, filenameM33, drawmap)\t# Subcube for M33.\n\tsubcubeM33_n = Cubes.cubegen(0,20,yminM33,ymaxM33,xminM33,xmaxM33, filenameM33, drawmap)\t# Noise subcube for M33.\n\n\tS2_M51 = Cubes.structgen(subcubeM51,deltaX,0,deltadeltaX,1,False) - Cubes.structgen(subcubeM51_n,deltaX,0,deltadeltaX,1,False)\n\tS2_M33 = Cubes.structgen(subcubeM33,deltaX,0,deltadeltaX,1,False) - Cubes.structgen(subcubeM33_n,deltaX,0,deltadeltaX,1,False)\n\n\n\n\t# File-saving.\n\tf = file(savename+\".bin\",\"wb\")\n\tnp.save(f,S2_M51)\n\tnp.save(f,S2_M33)\n\tf.close()", "def test_boyd_7(self):\n # FIXME(Ole): This test fails (20 Feb 2009)\n\n g=9.81\n\n\n inlet_depth=0.150\n outlet_depth=0.15\n inlet_velocity=1.00\n outlet_velocity=0.5\n \n culvert_length=10.0\n culvert_width=3.6\n culvert_height=1.20\n culvert_blockage = 0.00\n culvert_barrels = 1.0\n \n culvert_type='box'\n manning=0.013\n sum_loss=1.5\n\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n culvert_slope=1 # % Downward\n z_in = 10.0\n z_out = z_in-culvert_length*culvert_slope/100\n E_in = z_in+inlet_depth + 0.5*inlet_velocity**2/g\n E_out = z_out+outlet_depth + 0.5*outlet_velocity**2/g\n delta_total_energy = E_in-E_out\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n\n\n Q_expected = 0.5526\n v_expected = 1.146\n d_expected = 0.1339\n \n if verbose:\n print(50*'=')\n print('width ',culvert_width)\n print('depth ',culvert_height)\n print('blockage',culvert_blockage)\n print('flow_width ',culvert_width)\n print('length ' ,culvert_length)\n print('driving_energy ',inlet_specific_energy)\n print('delta_total_energy ',delta_total_energy)\n print('outlet_enquiry_depth ',outlet_depth)\n print('sum_loss ',sum_loss)\n print('manning ',manning)\n \n Q, v, d, flow_area, case= boyd_box_function(culvert_width, \n culvert_height,\n culvert_blockage, \n culvert_barrels, \n culvert_width, \n culvert_length, \n inlet_specific_energy, \n delta_total_energy, \n outlet_depth, \n sum_loss,\n manning)\n \n# Q, v, d = boyd_generalised_culvert_model(inlet_depth,\n# outlet_depth,\n# inlet_velocity,\n# outlet_velocity,\n# inlet_specific_energy, \n# delta_total_energy, \n# g,\n# culvert_length,\n# culvert_width,\n# culvert_height,\n# culvert_type,\n# manning,\n# sum_loss)\n if verbose:\n print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST01 Q-v-d',Q,v,d))\n print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', Q_expected, v_expected, d_expected))\n\n assert numpy.allclose(Q, Q_expected, rtol=1.0e-1) #inflow\n assert numpy.allclose(v, v_expected, rtol=1.0e-1) #outflow velocity\n assert numpy.allclose(d, d_expected, rtol=1.0e-1) #depth at outlet used to calc v ", "def create_emulators ( sun_angles, x_min, x_max, n_train=250, n_validate=1000 ):\n \n from gp_emulator import GaussianProcess, lhd\n import scipy.stats as ss\n\n n_params = x_min.size\n ## Putting boundaries on parameter space is useful\n #x_min = np.array ( 7*[ 0.001,] )\n #x_max = np.array ( 7*[0.95, ] )\n #x_max[-1] = 10.\n #x_min[1] = 0.\n #x_max[1] = 5.\n #x_min[4] = 0.\n #x_max[4] = 5.\n # First we create the sampling space for the emulators. In the\n # absence of any further information, we assume a uniform \n # distribution between x_min and (x_max - x_min):\n dist = []\n for k in xrange( n_params ):\n dist.append ( ss.uniform ( loc=x_min[k], \\\n scale = x_max[k] - x_min[k] ) )\n # The training dataset is obtaiend by a LatinHypercube Design\n x_train = lhd(dist=dist, size=n_train )\n # The validation dataset is randomly drawn from within the \n # parameter boundaries\n x_validate = np.random.rand ( n_validate, n_params )*(x_max - x_min) + \\\n x_min\n emu_vis = {}\n emu_nir = {}\n # We next loop over the input sun angles\n for sun_angle in sun_angles:\n # If we've done this sun angle before, skip it\n if not emu_vis.has_key ( sun_angle ):\n albedo_train = []\n albedo_validate = []\n # The following loop creates the validation dataset\n for i in xrange( n_validate ):\n [a_vis, a_nir] = two_stream_model ( x_validate[i,:], \\\n sun_angle )\n albedo_validate.append ( [a_vis, a_nir] )\n # The following loop creates the training dataset\n for i in xrange ( n_train ):\n [a_vis, a_nir] = two_stream_model ( x_train[i,:], \\\n sun_angle )\n albedo_train.append ( [a_vis, a_nir] )\n\n albedo_train = np.array ( albedo_train )\n albedo_validate = np.array ( albedo_validate )\n # The next few lines create and train the emulators\n # GP for visible\n gp_vis = GaussianProcess ( x_train, albedo_train[:,0])\n theta = gp_vis.learn_hyperparameters(n_tries=4)\n \n # GP for NIR\n gp_nir = GaussianProcess ( x_train, albedo_train[:,1])\n theta = gp_nir.learn_hyperparameters(n_tries=4)\n pred_mu, pred_var, par_dev = gp_vis.predict ( x_validate )\n r_vis = (albedo_validate[:,0] - pred_mu)\n pred_mu, pred_var, par_dev = gp_nir.predict ( x_validate )\n r_nir = (albedo_validate[:,1] - pred_mu)\n # Report some goodness of fit. Could do with more\n # stats, but for the time being, this is enough.\n print \"Sun Angle: %g, RMSE VIS: %g, RMSE NIR: %g\" % \\\n ( sun_angle, r_vis.std(), r_nir.std() )\n emu_vis[sun_angle] = gp_vis\n emu_nir[sun_angle] = gp_nir\n emulators = {}\n for sun_angle in emu_vis.iterkeys():\n emulators[sun_angle] = [ emu_vis[sun_angle], emu_nir[sun_angle] ]\n return emulators", "def test_boyd_8(self):\n # FIXME(Ole): This test fails (20 Feb 2009)\n\n g=9.81\n culvert_slope=1 # Downward\n\n inlet_depth=0.500\n outlet_depth=0.700\n inlet_velocity=1.50\n outlet_velocity=0.50\n \n culvert_length=10.0\n culvert_width=3.60\n culvert_height=1.20\n culvert_blockage = 0.00\n culvert_barrels = 1.0\n \n culvert_type='box'\n manning=0.013\n sum_loss=1.5\n\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n z_in = 0.0\n z_out = z_in-culvert_length*culvert_slope/100\n E_in = z_in+inlet_depth + 0.5*inlet_velocity**2/g\n E_out = z_out+outlet_depth + 0.5*outlet_velocity**2/g\n delta_total_energy = E_in-E_out\n \n \n Q_expected = 0.224\n v_expected = 0.152\n d_expected = 0.409\n\n if verbose:\n print(50*'=')\n print('width ',culvert_width)\n print('depth ',culvert_height)\n print('blockage',culvert_blockage)\n print('flow_width ',culvert_width)\n print('length ' ,culvert_length)\n print('driving_energy ',inlet_specific_energy)\n print('delta_total_energy ',delta_total_energy)\n print('outlet_enquiry_depth ',outlet_depth)\n print('sum_loss ',sum_loss)\n print('manning ',manning)\n \n Q, v, d, flow_area, case= boyd_box_function(culvert_width, \n culvert_height,\n culvert_blockage, \n culvert_barrels, \n culvert_width, \n culvert_length, \n inlet_specific_energy, \n delta_total_energy, \n outlet_depth, \n sum_loss,\n manning)\n\n# Q, v, d = boyd_generalised_culvert_model(inlet_depth,\n# outlet_depth,\n# inlet_velocity,\n# outlet_velocity,\n# inlet_specific_energy, \n# delta_total_energy, \n# g,\n# culvert_length,\n# culvert_width,\n# culvert_height,\n# culvert_type,\n# manning,\n# sum_loss)\n if verbose:\n print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST02 Q-v-d',Q,v,d))\n print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', Q_expected, v_expected, d_expected))\n\n assert numpy.allclose(Q, Q_expected, rtol=1.0e-1) #inflow\n assert numpy.allclose(v, v_expected, rtol=1.0e-1) #outflow velocity\n assert numpy.allclose(d, d_expected, rtol=1.0e-1) #depth at outlet used to calc v ", "def main():\n length = input(\"Enter side lengths: \")\n if equilateral(length):\n print(\"The triangle is an equilateral triangle.\")\n else:\n print(\"Sadly, the triangle is not equilateral.\\\n Find a better triangle.\")", "def make_objects(parameters):\n\n # to hold all the objects\n objects_dict = {}\n\n # make the composition space object\n if 'CompositionSpace' in parameters:\n composition_space = general.CompositionSpace(\n parameters['CompositionSpace'])\n else:\n print('Input file must contain a \"CompositionSpace\" block.')\n print(\"Quitting...\")\n quit()\n\n objects_dict['composition_space'] = composition_space\n\n # make the constraints object\n if 'Constraints' in parameters:\n constraints = development.Constraints(parameters['Constraints'],\n composition_space)\n else:\n constraints = development.Constraints('default', composition_space)\n\n objects_dict['constraints'] = constraints\n\n # make the geometry object\n if 'Geometry' in parameters:\n geometry = development.Geometry(parameters['Geometry'])\n else:\n geometry = development.Geometry('default')\n\n objects_dict['geometry'] = geometry\n\n # make the development object\n if 'Development' in parameters:\n developer = development.Developer(parameters['Development'], geometry)\n else:\n developer = development.Developer('default', geometry)\n\n objects_dict['developer'] = developer\n\n # make the redundancy guard object\n if 'RedundancyGuard' in parameters:\n redundancy_guard = development.RedundancyGuard(\n parameters['RedundancyGuard'])\n else:\n redundancy_guard = development.RedundancyGuard('default')\n\n objects_dict['redundancy_guard'] = redundancy_guard\n\n # make the id generator\n id_generator = general.IDGenerator()\n objects_dict['id_generator'] = id_generator\n\n # make the organism creators\n initial_organism_creators = make_organism_creators(parameters,\n composition_space)\n\n # if more than one organism creator, sort them so that the attempts-based\n # ones are at the front and the successes-based ones are at the back\n if len(initial_organism_creators) > 1:\n initial_organism_creators.sort(key=lambda x: x.is_successes_based)\n\n objects_dict['organism_creators'] = initial_organism_creators\n\n # the number of energy calculations to run at a time\n if 'NumCalcsAtOnce' not in parameters:\n num_calcs_at_once = 1\n elif parameters['NumCalcsAtOnce'] in (None, 'default'):\n num_calcs_at_once = 1\n else:\n num_calcs_at_once = parameters['NumCalcsAtOnce']\n\n objects_dict['num_calcs_at_once'] = num_calcs_at_once\n\n # get the run title\n if 'RunTitle' not in parameters:\n run_dir_name = 'garun'\n elif parameters['RunTitle'] in (None, 'default'):\n run_dir_name = 'garun'\n else:\n run_dir_name = 'garun_' + str(parameters['RunTitle'])\n\n objects_dict['run_dir_name'] = run_dir_name\n\n # make the energy calculator\n energy_calculator = make_energy_calculator(parameters, geometry,\n composition_space)\n objects_dict['energy_calculator'] = energy_calculator\n\n # make the stopping criteria\n stopping_criteria = make_stopping_criteria(parameters, composition_space)\n objects_dict['stopping_criteria'] = stopping_criteria\n\n # default fractions for the variations\n default_variation_fractions = {}\n default_variation_fractions['structure_mut'] = 0.1\n default_variation_fractions['num_stoichs_mut'] = 0.1\n if len(composition_space.get_all_swappable_pairs()) > 0:\n default_variation_fractions['mating'] = 0.7\n default_variation_fractions['permutation'] = 0.1\n else:\n default_variation_fractions['mating'] = 0.8\n default_variation_fractions['permutation'] = 0.0\n\n # make the variations\n variations_list = make_variations(parameters, default_variation_fractions,\n composition_space)\n\n # check that at least one variation has been used\n if len(variations_list) == 0:\n print('At least one variation must be used. Either leave entire '\n '\"Variations\" block blank to use default variations, or specify '\n 'at least one variation within the \"Variations\" block.')\n print('Quitting...')\n quit()\n\n # check that the variations' fraction variables sum to 1\n frac_sum = 0.0\n for variation in variations_list:\n frac_sum = frac_sum + variation.fraction\n if frac_sum < 0.999 or frac_sum > 1.001:\n print(\"The Variations' fraction values must sum to 1.\")\n print('Quitting...')\n quit()\n\n objects_dict['variations'] = variations_list\n\n # make the pool and selection\n if 'Pool' not in parameters:\n pool = general.Pool(None, composition_space, run_dir_name)\n else:\n pool = general.Pool(parameters['Pool'], composition_space,\n run_dir_name)\n\n if 'Selection' not in parameters:\n selection = general.SelectionProbDist(None, pool.size)\n else:\n selection = general.SelectionProbDist(parameters['Selection'],\n pool.size)\n\n pool.selection = selection\n objects_dict['pool'] = pool\n\n return objects_dict", "def input_frame_dims():\r\n width = None\r\n height = None\r\n while width or height is None:\r\n try:\r\n width = int(input(\"Enter width in inches (use whole numbers only): \").strip())\r\n height = int(input(\"Enter height in inches (use whole numbers only): \").strip())\r\n return width, height\r\n except(ValueError):\r\n print(\"Dimensions must be entered in whole numbers only. Please try again.\\n\")\r\n except:\r\n print(\"Unknown error occurred. No data captured. Please try again.\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function checks and returns 'True' if the entered values are numbers greater than 0. Otherwise it prints a message with the further instructions and returns 'False'.
def validation(value): if isinstance(value, float) and value > 0: return True else: print(HELP_MSG) return False
[ "def validation(value):\n\n if isinstance(value, int) and value > 0:\n return True\n else:\n print(HELP_MSG)\n return False", "def has_zero(self):\n if self.min() <= 0 and self.max() >= 0:\n return True\n else:\n return False", "def check_input(self, inp):\r\n try:\r\n current = float(inp)\r\n if current >= 0 and self.condition(current):\r\n self.previous = inp\r\n return True\r\n else:\r\n return False\r\n except ValueError:\r\n return False", "def validate_k_value(self):\n valid = (self.k_value >= 0) and (is_number(self.k_value))\n # set flag\n self.k_value_valid = valid\n return valid", "def is_valid_non_negative_integer(request_prompt, error_prompt):\n valid = False\n while not valid:\n value_string = input(request_prompt)\n try:\n value = int(value_string)\n if value < 0:\n print(error_prompt)\n else:\n valid = True\n except (ValueError):\n print(error_prompt)\n return value", "def is_valid_positive_integer(request_prompt, error_prompt):\n valid = False\n while not valid:\n value_string = input(request_prompt)\n try:\n value = int(value_string)\n if value <= 0:\n print(error_prompt)\n else:\n valid = True\n except (ValueError):\n print(error_prompt)\n return value", "def check_user_input(self) -> bool:\n print('check_user_input')\n if not self.isEnabled:\n return False\n # We check amount > 0 only for now.\n if int(self.unregister_amount.text()) <= 0:\n QtWidgets.QMessageBox.warning(\n self, u'錯誤', u'除帳數量不可小於等於0!')\n return False\n return True", "def check_var_values_num(self):\n color_error = \"red\"\n color_valid = \"black\"\n valid = True\n error_message = \"\"\n message_sp_flux = \"The unit is already provided in the Sources (Fixed Parameter Configuration).\"\n\n for key in self.var_param_entries_num.keys():\n entries = self.var_param_entries_num[key]\n entry_min = entries[1]\n entry_max = entries[2]\n entry_steps = entries[3]\n entry_units = entries[4]\n\n entry_min.config(highlightbackground=color_valid, highlightcolor=color_valid, highlightthickness=0)\n entry_max.config(highlightbackground=color_valid, highlightcolor=color_valid, highlightthickness=0)\n entry_steps.config(highlightbackground=color_valid, highlightcolor=color_valid, highlightthickness=0)\n entry_units.config(highlightbackground=color_valid, highlightcolor=color_valid, highlightthickness=0)\n\n min_value = entry_min.get()\n max_value = entry_max.get()\n steps_value = entry_steps.get()\n units_value = entry_units.get()\n\n if not min_value.replace('.', '', 1).isdigit():\n if \"- Min must be numeric\" not in error_message:\n error_message += \"- Min must be numeric\" + \"\\n\"\n valid = False\n entry_min.config(highlightbackground=color_error, highlightcolor=color_error, highlightthickness=2)\n if not max_value.replace('.', '', 1).isdigit():\n if \"- Max must be numeric\" not in error_message:\n error_message += \"- Max must be numeric\" + \"\\n\"\n valid = False\n entry_max.config(highlightbackground=color_error, highlightcolor=color_error, highlightthickness=2)\n if not steps_value.isdigit():\n if \"- Steps must be a positive integer\" not in error_message:\n error_message += \"- Steps must be a positive integer\" + \"\\n\"\n valid = False\n entry_steps.config(highlightbackground=color_error, highlightcolor=color_error, highlightthickness=2)\n elif int(steps_value) < 2:\n if \"- Steps must be positive and greater than 1\" not in error_message:\n error_message += \"- Steps must be positive and greater than 1\" + \"\\n\"\n valid = False\n entry_steps.config(highlightbackground=color_error, highlightcolor=color_error, highlightthickness=2)\n if \"sp_flux\" in self.var_param_entries_num.keys():\n if units_value:\n if \"- Units for sp_flux must be empty. \" + message_sp_flux not in error_message:\n error_message += \"- Units for sp_flux must be empty. \" + message_sp_flux\n valid = False\n entry_units.config(highlightbackground=color_error, highlightcolor=color_error,\n highlightthickness=2)\n\n if not valid:\n self.valid_num = False\n tkMessageBox.showerror(\"Invalid Input\", error_message)\n else:\n self.valid_num = True", "def checkFloat(self, value):\n try:\n if float(value) >= 0.0:\n return True\n else:\n return False\n except ValueError:\n return False", "def print_sign(number):\n\n if number > 0:\n #Evaluates whether number is greater than zero\n print \"Higher than 0\"\n #Prints a message that the number is greater than zero\n elif number < 0:\n #Evaluates whether number is less than zero\n print \"Lower than 0\"\n #Prints a message that the number is less than zero\n else:\n #Captures the only other remaining scenario (number == 0)\n print \"Zero\"\n #Prints a message that the number is zero", "def validate_input(self) -> bool:\n if not 5 <= self.number <= 20:\n raise NotValidInput(\"Your input is not valid\")\n return True", "def positive(x):\r\n return x > 0", "def is_positive(x: int) -> bool:\n return x > 0", "def is_all_negative(arr):\n for e in arr:\n if e >= 0:\n return False\n return True", "def check_null_or_valid(row_data):\n no_na = row_data.fillna(0)\n numeric = pd.to_numeric(no_na, errors='coerce')\n # print(numeric)\n ge0 = numeric >= 0\n return ge0", "def CheckValue(self): \n try:\n if self.TargetWells<=0 or self.TargetWells>=(Config.NbWellsWidth*Config.NbWellsLength): # Check if the value is in the limit depending of the file Config.py\n self.Coord[\"Outrange\"]=\"Values out of the limits\" # If they are not we save the result in self.Coord[\"Outrange\"]\n print('The Wells '+str(self.TargetWells)+\" is out of range. Please check Config.py\") # We print a message to the user\n else :\n self.Coord[\"Outrange\"]=\"Values within limits\" # If they are in the limits we save the answer in self.Coord[\"Outrange\"]\n \n except TypeError: # In case the user has written a <str> instead of a number\n print('Please write numbers') \n self.Coord[\"Outrange\"]=\"Values out of the limits\"", "def test_zero_result(self):\n list_numbers = [-1, 0, -5, -6, -3]\n self.assertEqual(max_integer(list_numbers), 0)", "def is_valid_length(user_input):\r\n if user_input > 0 and user_input % 0.5 == 0:\r\n return True\r\n else:\r\n print(\"Error with your input.\")\r\n return False", "def is_positive(number):\n if number > 0:\n return True\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the slope and aspect from the input DEM
def get_slope_aspect(input_dem, output_folder, mask): arcpy.env.overwriteOutput = True input_folder = os.path.dirname(input_dem) arcpy.env.workspace = input_folder dem_slope = pjoin(input_folder, 'slope') dem_aspect = pjoin(input_folder, 'aspect') # Derive slope and aspect ... cal_slope_aspect(input_dem, dem_slope, dem_aspect) aspect_rec_init = pjoin(input_folder, 'aspect_r_i') slope_rec_init = pjoin(input_folder, 'slope_r_i') aspect_rec = pjoin(output_folder, 'aspect_r') slope_rec = pjoin(output_folder, 'slope_r') # Derive reclassifed aspect... arcpy.AddMessage('Remap the aspect into classes of 1 ~ 9 ...') reclassify(dem_aspect, "-1 0 9;0 22.5 1;22.5 67.5 2;67.5 112.5 3;\ 112.5 157.5 4;157.5 202.5 5;202.5 247.5 6;247.5 292.5 7;\ 292.5 337.5 8;337.5 360 1", aspect_rec_init) value_max = arcpy.GetRasterProperties_management( dem_slope, "MAXIMUM").getOutput(0) if float(value_max) < 20: value_max = 20.0001 # remap is minimum inclusive but maxmum exclusive. using .0001 to comform # to the standard minimum is exclusive and maximum is inclusive remap = "0 0 1;0.0001 5 2;5.0001 10 3;10.0001 15 4;\ 15.0001 20 5;20.0001 " + \ str(math.ceil(float(value_max))) + " 6" arcpy.AddMessage('Remap the slope into classes of 1 ~ 6 ...') # Derive reclassifed slope... reclassify(dem_slope, remap, slope_rec_init) extract_by_mask(aspect_rec_init, mask, aspect_rec) extract_by_mask(slope_rec_init, mask, slope_rec) g_list = arcpy.ListRasters('g_g*') if len(g_list) != 0: for g_file in g_list: arcpy.Delete_management(g_file) if arcpy.Exists(dem_slope): arcpy.Delete_management(dem_slope) if arcpy.Exists(dem_aspect): arcpy.Delete_management(dem_aspect) if arcpy.Exists(slope_rec_init): arcpy.Delete_management(slope_rec_init) if arcpy.Exists(aspect_rec_init): arcpy.Delete_management(aspect_rec_init) return slope_rec, aspect_rec
[ "def calculate_slope_and_aspect(dem: np.ndarray) -> tuple[np.ndarray, np.ndarray]:\n # TODO: Figure out why slope is called slope_px. What unit is it in?\n # TODO: Change accordingly in the get_horizontal_shift docstring.\n\n # Calculate the gradient of the slope\n gradient_y, gradient_x = np.gradient(dem)\n\n slope_px = np.sqrt(gradient_x ** 2 + gradient_y ** 2)\n aspect = np.arctan(-gradient_x, gradient_y)\n aspect += np.pi\n\n return slope_px, aspect", "def slopeAspect(self, DMT, x_size, y_size):\n\n\t\ttry:\n\t\t\tx, y = np.gradient(DMT)\n\t\t\tslope = np.arctan(np.sqrt((x / x_size) ** 2.0 + (y / y_size) **\n\t\t\t 2.0)) * 180 / np.pi\n\t\t\taspect = 270 + np.arctan(x / y) * 180 / np.pi\n\t\t\taspect = np.where(y > 0, aspect, aspect - 180)\n\n\t\t\t# Replacing nan values to 0 and inf to value\n\t\t\tslope = np.nan_to_num(slope)\n\t\t\taspect = np.nan_to_num(aspect)\n\t\t\tdel x\n\t\t\tdel y\n\t\texcept ArithmeticError:\n\t\t\traise ArithmeticError(\"Slope and aspect has not been calculated.\")\n\n\t\treturn slope, aspect", "def calc_slope_and_aspect(self, dem_data, grid_res, onlytopo=False):\n # simplify notation\n i = self.i_idx\n j = self.j_idx\n valid = self.valid_idx\n if onlytopo:\n i = i[self.topography_encounter_idx]\n j = j[self.topography_encounter_idx]\n valid = valid[self.topography_encounter_idx]\n\n d = dem_data\n\n # Sobel filter in Y direction\n gy = np.ma.zeros(self.N) + np.nan\n gy[self.valid_idx] = (d[i[valid] - 1, j[valid] - 1] +\n 2 * d[i[valid], j[valid] - 1] +\n d[i[valid] + 1, j[valid] - 1] -\n d[i[valid] - 1, j[valid] + 1] -\n 2 * d[i[valid], j[valid] + 1] -\n d[i[valid] + 1, j[valid] + 1]) / (8 * grid_res)\n\n # Sobel filter in X direction\n gx = np.ma.zeros(self.N) + np.nan\n gx[valid] = (d[i[valid] + 1, j[valid] - 1] +\n 2 * d[i[valid] + 1, j[valid]] +\n d[i[valid] + 1, j[valid] + 1] -\n d[i[valid] - 1, j[valid] - 1] -\n 2 * d[i[valid] - 1, j[valid]] -\n d[i[valid] - 1, j[valid] + 1]) / (8 * grid_res)\n\n slope = np.ma.zeros(self.N)\n slope[valid] = np.arctan(np.sqrt(gy[valid]**2 +\n gx[valid]**2)) * 180 / np.pi\n aspect = np.ma.zeros(self.N) + np.nan\n aspect[valid] = (np.arctan2(gy[valid],\n -gx[valid]) + np.pi) * 180 / np.pi\n\n slope.mask = ~valid\n aspect.mask = ~valid\n self.slope = slope\n self.aspect = aspect", "def aspect(dem):\n\n x, y = np.gradient(dem)\n aspect = np.arctan2(x, y)\n aspect += np.pi\n aspect = np.degrees(aspect)\n return aspect", "def slope(p1,p2):\n return (p2[1] - p1[1])/(p2[0] - p1[0])", "def get_slope(self, area) -> 'GeoData':\n ((x_min, x_max), (y_min, y_max)) = area\n\n # extract DEM on a slightly large area to avoid border effects\n dem = self.get_elevation([[x_min - self._elevation_map.pixel_size,\n x_max + self._elevation_map.pixel_size],\n [y_min - self._elevation_map.pixel_size,\n y_max + self._elevation_map.pixel_size]])\n z = dem.data.view(np.float64)\n assert dem.data.shape == z.shape, 'Apparently, the returned DEM is not an array of float'\n\n def rolled(x_roll, y_roll):\n \"\"\"Returns a view of the DEM array rolled on X/Y axis\"\"\"\n return np.roll(np.roll(z, x_roll, axis=0), y_roll, axis=1)\n\n # compute elevation change on x and y direction, cf:\n # http://desktop.arcgis.com/fr/arcmap/10.3/tools/spatial-analyst-toolbox/how-slope-works.htm\n dzdx = rolled(-1, -1) + 2 * rolled(-1, 0) + rolled(-1, 1) - \\\n rolled(1, -1) - 2 * rolled(1, 0) - rolled(1, -1)\n dzdx /= (8 * dem.cell_width)\n dzdy = rolled(1, 1) + 2 * rolled(0, 1) + rolled(-1, 1) - \\\n rolled(1, -1) - 2 * rolled(0, -1) - rolled(-1, -1)\n dzdy /= (8 * dem.cell_width)\n\n # get percentage of slope and the direction of raise and save them as GeoData\n slope_percent = np.sqrt(np.power(dzdx, 2) + np.power(dzdy, 2)) * 100\n raise_dir = np.arctan2(dzdy, dzdx)\n sp = dem.clone(np.array(slope_percent, dtype=[('slope', 'float64')]))\n rd = dem.clone(np.array(raise_dir, dtype=[('raise_dir', 'float64')]))\n\n # combine slope and raise direction into one GeoData and fit it to the area originally asked\n result = sp.combine(rd)\n result.data = result.data[1:dem.data.shape[0]-1, 1:dem.data.shape[1]-1, ...]\n result.max_x -= 2\n result.max_y -= 2\n return result", "def average_slope(gdf, dem, dem_affine, absolute=True, as_pct=True):\n\n # coords of the starts of the line\n x1, y1 = _get_nth_points_in_lines(gdf, 0)\n r1, c1 = raster.xy_to_rowcol(x1, y1, dem_affine)\n\n # coords of the ends of the lines\n x2, y2 = _get_nth_points_in_lines(gdf, -1)\n r2, c2 = raster.xy_to_rowcol(x2, y2, dem_affine)\n\n slope = (dem[r2, c2] - dem[r1, c1]) / gdf['geometry'].length\n if absolute:\n slope = numpy.abs(slope)\n\n factor = 1\n if as_pct:\n factor = 100\n\n return slope * factor", "def _get_slope(self):\n return self._slope", "def derive_slope(rise, run):\n difference_run = np.diff(run)\n difference_rise = np.diff(rise)\n derivative = difference_rise/difference_run\n\n return derivative", "def calc_slope(self):\n sigma_x = np.std(self.x)\n sigma_y = np.std(self.y)\n if sigma_x == 0 or sigma_y == 0:\n self.slope = 0\n else:\n r = np.corrcoef(x=self.x, y=self.y)[1, 0]\n self.slope = r * sigma_y / sigma_x", "def slopeFromOrigin(self):\n if self.x:\n return self.y/self.x", "def compute_slope_intercept(x, y):\r\n mean_x = np.mean(x)\r\n mean_y = np.mean(y) \r\n m = sum([(x[i] - mean_x) * (y[i] - mean_y) for i in range(len(x))]) \\\r\n / sum([(x[i] - mean_x) ** 2 for i in range(len(x))])\r\n # y = mx + b => y - mx\r\n b = mean_y - m * mean_x\r\n return m, b", "def slope_intercept(p1,p2):\n\tm = float(p1[1]-p2[1]) / (p1[0]-p2[0])\n\tb = p1[1] - m*p1[0]\n\treturn (m,b)", "def parameters(self):\n return (self.slope,)", "def calc_average_line_slope(self) -> int:\n\n avg_slope = 0\n\n i = 1\n while i < len(self.valleys):\n u = self.valleys[i - 1]\n d = self.valleys[i]\n avg_slope += self.calc_range_slope(u, d)\n i += 1\n\n return int(avg_slope // (len(self.valleys) - 1))", "def surface_margin_deph(A_approx_deph, A_real_deph): \n return (A_approx_deph - A_real_deph) * 100 / A_approx_deph", "def linear_deviation_abs(self):\n return np.mean(np.abs(self.deltas))", "def _mdens_deriv(self, m):\n return -2.0 * m * numpy.exp(-(m**2) / self._twosigma2) / self._twosigma2", "def se_over_slope(x, y, estimated, model):\n assert len(y) == len(estimated)\n assert len(x) == len(estimated)\n EE = ((estimated - y)**2).sum()\n var_x = ((x - x.mean())**2).sum()\n SE = np.sqrt(EE/(len(x)-2)/var_x)\n return SE/model[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the common area of vegetation, slope and aspect to calculate BAL.
def find_common_area(veg_class, slope, aspect): output_folder = os.path.dirname(veg_class) arcpy.env.overwriteOutput = True # set directory work_folder = output_folder os.chdir(work_folder) arcpy.env.workspace = work_folder # get the common area of veg and dem # get the extent of inputs slope_poly = "slope_poly.shp" veg_class_poly = "veg_class_poly.shp" get_footprint(slope, slope_poly) get_footprint(veg_class, veg_class_poly) mask_com = 'mask_com.shp' arcpy.Intersect_analysis([slope_poly, veg_class_poly], mask_com) veg_class_com = pjoin(output_folder, 'veg_c') slope_com = pjoin(output_folder, 'slope_c') aspect_com = pjoin(output_folder, 'aspect_c') extract_by_mask(veg_class, mask_com, veg_class_com) extract_by_mask(slope, mask_com, slope_com) extract_by_mask(aspect, mask_com, aspect_com) if arcpy.Exists(slope_poly): arcpy.Delete_management(slope_poly) if arcpy.Exists(veg_class_poly): arcpy.Delete_management(veg_class_poly) if arcpy.Exists(mask_com): arcpy.Delete_management(mask_com) if arcpy.Exists(veg_class): arcpy.Delete_management(veg_class) if arcpy.Exists(slope): arcpy.Delete_management(slope) if arcpy.Exists(aspect): arcpy.Delete_management(aspect) return veg_class_com, slope_com, aspect_com
[ "def get_ab_area(self):\n\t\treturn la.norm(cross(self.a, self.b))/2", "def get_ab_area(self):\n\t\treturn la.norm(cross(self.a, self.b))", "def get_ah_area(self):\n\t\treturn la.norm(cross(self.a, self.h))", "def bal_calc(vegetation, dem, fdi, output_folder, remap, mask):\n arcpy.env.overwriteOutput = True\n\n arcpy.AddMessage('Reclassify the vegetation map ... ')\n veg_class = reclass_veg(vegetation, dem, output_folder, remap, mask)\n\n arcpy.AddMessage('Reclassify slope and aspect ... ')\n slope, aspect = get_slope_aspect(dem, output_folder, mask)\n\n if arcpy.Exists(mask):\n arcpy.Delete_management(mask)\n\n # extract the common area between vegtation, slope and aspect\n arcpy.AddMessage('Get common area of input data ... ')\n veg_class_com, slope_com, aspect_com = find_common_area(veg_class,\n slope, aspect)\n\n arcpy.AddMessage('Calculate the BAL ... ')\n bal_cal(veg_class_com, slope_com, aspect_com, fdi)", "def get_bh_area(self):\n\t\treturn la.norm(cross(self.b, self.h))", "def get_bc_area(self):\n\t\treturn la.norm(cross(self.b, self.c))", "def abg(p1, p2, p3):\n x1, y1 = p1\n x2, y2 = p2\n x3, y3 = p3\n alpha = [\n x2*y3 - x3*y2,\n x3*y1 - x1*y3,\n x1*y2 - x2*y1\n ]\n beta = [\n y2 - y3,\n y3 - y1,\n y1 - y2\n ]\n gamma = [\n x3 - x2,\n x1 - x3,\n x2 - x1\n ]\n area = 0.5 * sum(alpha)\n return alpha, beta, gamma, area", "def get_side_area(self):\n\t\treturn PI * self.get_forming * (self.r_1 + self.r_2)", "def compute_area(self) -> None:\n for ann in tqdm(\n self.anns.values(), desc=\"process images\", disable=not self.verbose\n ):\n ann[\"area\"] = ann[\"bbox\"][2] * ann[\"bbox\"][3]", "def avg_Ao(self):\n ...", "def dualArea(self):\n fl = list(self.adjacentFaces())\n area_star = 0.\n for ff in fl:\n area_star += ff.area/3.\n\n return area_star", "def calculate_body_measurements(self):\n self.l = self.armature.data.bones[\"UpperLeg.L\"].head_local.z\n self.l2 = self.armature.data.bones[\"Torso\"].tail_local.z\n self.pelvis_shift = self.armature.data.bones[\"Pelvis_Main\"].head_local.z - self.armature.data.bones[\"Pelvis_Main\"].tail_local.z\n self.head_height = self.armature.data.bones[\"Head\"].head_local.z\n\n bones = self.armature.data.bones\n if True and self.animal_classification == 'Unguligrade':\n self.ankle_toe = (bones[\"Foot.L\"].head_local - bones[\"Toe.L\"].tail_local).length\n self.hip_toe = (bones[\"UpperLeg.L\"].head_local - bones[\"Toe.L\"].tail_local).length\n else:\n self.ankle_toe = (bones[\"Foot.L\"].head_local - bones[\"Toe.L\"].head_local).length\n self.hip_toe = (bones[\"UpperLeg.L\"].head_local - bones[\"Toe.L\"].head_local).length\n self.hip_ankle = (bones[\"UpperLeg.L\"].head_local - bones[\"Foot.L\"].head_local).length\n\n if True and self.animal_classification == 'Unguligrade':\n self.wrist_finger = (bones[\"Hand.L\"].head_local - bones[\"Finger.L\"].tail_local).length\n self.shoulder_finger = (bones[\"UpperArm.L\"].head_local - bones[\"Finger.L\"].tail_local).length\n else:\n self.wrist_finger = (bones[\"Hand.L\"].head_local - bones[\"Finger.L\"].head_local).length\n self.shoulder_finger = (bones[\"UpperArm.L\"].head_local - bones[\"Finger.L\"].head_local).length\n self.shoulder_wrist = (bones[\"UpperArm.L\"].head_local - bones[\"Hand.L\"].head_local).length\n\n self.foot_length = bones[\"Foot.L\"].length\n if self.animal_classification == 'Unguligrade':\n self.foot_length += bones[\"Toe.L\"].length\n self.leg_length = self.foot_length + bones[\"LowerLeg.L\"].length + bones[\"UpperLeg.L\"].length\n\n self.hand_length = bones[\"Hand.L\"].length\n if self.animal_classification == 'Unguligrade':\n self.hand_length += bones[\"Finger.L\"].length\n self.arm_length = self.hand_length + bones[\"LowerArm.L\"].length + bones[\"UpperArm.L\"].length\n self.neck_y = bones[\"Torso\"].tail_local.y", "def bounding_box_area(self):\n return (self.bounding_box[1][0] - self.bounding_box[0][0]) * (self.bounding_box[1][1] - self.bounding_box[0][1])", "def _calculate_overlap_area(x1, y1, x2, y2, width, height):\n\n dx = min(x1 + width, x2 + width) - max(x1, x2)\n dy = min(y1 + height, y2 + height) - max(y1, y2)\n\n if dx > 0 and dy > 0:\n return dx * dy\n else:\n return 0", "def get_area(shape: gpd.geodataframe.GeoDataFrame) -> float:\n return round(sum(shape.area) / 10 ** 6, 4)", "def get_area_rectangle(w, h):\n return -1.0", "def calculate_area(self):\n return Circle.PI * self.radius**2", "def f7_11(self, bbox):\n h = bbox[0].Y - bbox[1].Y\n w = bbox[1].X - bbox[0].X\n if h*w > 0:\n logBboxArea = math.log( h*w)\n else:\n logBboxArea = -1000000\n bcp_logger.debug(\"Log Bbox Area: %s\" % (logBboxArea))\n return logBboxArea", "def envelope_area(self):\n return self.side_a * self.side_b" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calcuate BAL based on vegetation map and DEM.
def bal_calc(vegetation, dem, fdi, output_folder, remap, mask): arcpy.env.overwriteOutput = True arcpy.AddMessage('Reclassify the vegetation map ... ') veg_class = reclass_veg(vegetation, dem, output_folder, remap, mask) arcpy.AddMessage('Reclassify slope and aspect ... ') slope, aspect = get_slope_aspect(dem, output_folder, mask) if arcpy.Exists(mask): arcpy.Delete_management(mask) # extract the common area between vegtation, slope and aspect arcpy.AddMessage('Get common area of input data ... ') veg_class_com, slope_com, aspect_com = find_common_area(veg_class, slope, aspect) arcpy.AddMessage('Calculate the BAL ... ') bal_cal(veg_class_com, slope_com, aspect_com, fdi)
[ "def calc_dem_bent(self, dem_data, alt):\n self.dem = np.ma.zeros(self.N)\n self.dem[self.valid_idx] = (dem_data[self.i_idx_valid,\n self.j_idx_valid])\n self.dem.mask = ~self.valid_idx\n self.dem_bent = self.dem - alt", "def run():\n # add subfolders into path\n cmd_folder = os.path.realpath(\n os.path.abspath(\n os.path.split(\n inspect.getfile(\n inspect.currentframe()))[0]))\n if cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n\n cmd_subfolder = pjoin(cmd_folder, \"utilities\")\n if cmd_subfolder not in sys.path:\n sys.path.insert(0, cmd_subfolder)\n\n # get input parameters from toolbox interface\n dem = arcpy.GetParameterAsText(0)\n veg = arcpy.GetParameterAsText(1)\n remap = arcpy.GetParameterAsText(2)\n output_folder = arcpy.GetParameterAsText(3)\n fdi = arcpy.GetParameter(4)\n extent = arcpy.GetParameter(5)\n\n dem_sr = arcpy.Describe(dem).spatialReference\n arcpy.AddMessage(\"DEM's spatial reference type is {0}\".format(dem_sr.type))\n\n if dem_sr.type == \"Projected\":\n # find effective AOI based on the input parameters\n mask = find_aoi(extent, dem, veg)\n\n try:\n # calculate the BAL for the effective AOI\n bal_calc(veg, dem, fdi, output_folder, remap, mask)\n arcpy.AddMessage(\"Successfully completed BAL calculation!\")\n except Exception as err:\n # Report any exceptions back\n arcpy.AddError(err)\n\n else:\n arcpy.AddError(\"To go ahead, the DEM needs to be projected first\")", "def calculate_desired_alaz(self):\n target = self.coordselector.currentText()\n\n # Convert from QString to String to not confuse ephem\n leftcoord = str(self.inputleftcoord.text())\n rightcoord= str(self.inputrightcoord.text())\n\t\n\t# Calculate offset values in desired unit\n offsetsys = self.coordselector_steps.currentText()\n # ASSUME HORIZONTAL FOR NOW\n # Read specified offset grid\n # Convert from QString to String to not confuse ephem,then to decimal degrees in case the string had XX:XX:XX.XX format.\n try:\n offset_left = float(ephem.degrees(str(self.offset_left.text())))*180.0/np.pi\n offset_right= float(ephem.degrees(str(self.offset_right.text())))*180.0/np.pi\n nsteps_left = max(float(self.nsteps_left.text()),1)\n nsteps_right= max(float(self.nsteps_right.text()),1)\n\t gs_left = offset_left*(nsteps_left-1)\n\t gs_right = offset_right*(nsteps_right-1)\n if gs_left ==0:\n offsets_left = np.array([0.0])\n else:\n\t offsets_left = np.linspace(-0.5*gs_left,0.5*gs_left,num=nsteps_left)\n if gs_right ==0:\n offsets_right = np.array([0.0])\n else:\n\t offsets_right = np.linspace(-0.5*gs_right,0.5*gs_right,num=nsteps_right)\n except ValueError, IndexError:\n offsets_left = np.array([0.0])\n offsets_right = np.array([0.0])\n \n # Reset values in case they are \"The Sun\"\n # since otherwise errors appear when switchin from \"The Sun\"-mode \n # to something else\n try:\n ephem.degrees(leftcoord)\n ephem.degrees(rightcoord)\n except ValueError:\n leftcoord = 0.0\n rightcoord = 0.0\n\n self.telescope.site.date = ephem.now()\n if target == 'Horizontal':\n alt = ephem.degrees(leftcoord)\n az = ephem.degrees(rightcoord)\n # Make sure azimut is given in interval 0 to 360 degrees.\n #az = (float(rightcoord) %360.0)* np.pi/180.0\n # Save as targetpos, will be minor offset because of radec_of conversion\n # Note reverse order of az, alt in this radec_of-function.\n #(ra, dec) = self.telescope.site.radec_of(az, alt)\n #pos = ephem.FixedBody()\n #pos._ra = ra\n #pos._dec = dec\n #pos._epoch = self.telescope.site.date\n #pos.compute(self.telescope.site)\n # Do not set position to tracking target in this case, because of radec_of discrepancy.\n # Instead set to given values manually\n alt_deg = float(alt)*180.0/np.pi\n az_deg = float(az)*180.0/np.pi\n \n elif target == 'Stow':\n # Read stow position from file\n (alt_deg,az_deg)=self.telescope.get_stow_alaz()\n\n else:\n # If given system is something else, we do not have to use radec_of and we get\n # http://stackoverflow.com/questions/11169523/how-to-compute-alt-az-for-given-galactic-coordinate-glon-glat-with-pyephem\n if target == 'The Sun':\n pos = ephem.Sun()\n pos.compute(self.telescope.site) # Needed for the sun since depending on time\n elif target == 'The Moon':\n pos = ephem.Moon()\n pos.compute(self.telescope.site) # Needed for the moon since depending on time\n elif target == 'Cas. A':\n pos = ephem.Equatorial(ephem.hours('23:23:26'), ephem.degrees('58:48:0'), epoch=ephem.J2000)\n # Coordinate from http://en.wikipedia.org/wiki/Cassiopeia_A\n elif target == 'Galactic':\n pos = ephem.Galactic(ephem.degrees(leftcoord), ephem.degrees(rightcoord))\n elif target == 'Eq. J2000':\n pos = ephem.Equatorial(ephem.hours(leftcoord), ephem.degrees(rightcoord), epoch=ephem.J2000)\n elif target == 'Eq. B1950':\n pos = ephem.Equatorial(ephem.hours(leftcoord), ephem.degrees(rightcoord), epoch=ephem.B1950)\n elif target == 'Ecliptic':\n pos = ephem.Ecliptic(ephem.degrees(leftcoord), ephem.degrees(rightcoord)) # Use some epoch?\n #pos = ephem.Ecliptic(ephem.degrees(leftcoord), ephem.degrees(rightcoord), epoch=ephem.J2000)\n # Calculate alt, az, via fixedbody since only fixed body has alt, az\n # First needs to make sure we have equatorial coordinates\n eqpos = ephem.Equatorial(pos)\n fixedbody = ephem.FixedBody()\n fixedbody._ra = eqpos.ra\n fixedbody._dec = eqpos.dec\n fixedbody._epoch = eqpos.epoch\n fixedbody.compute(self.telescope.site)\n alt = fixedbody.alt\n az = fixedbody.az\n alt_deg = float(alt)*180.0/np.pi\n az_deg = float(az)*180.0/np.pi\n\n # If horizontal offset, and if az-scale checkbox selected,\n # then scale current az offset value with cos(alt)\n if self.scale_az_offset.isChecked() and offsetsys == 'Horizontal':\n offsets_right *= (1.0/np.cos((alt_deg+offsets_left[self.leftiter])*(np.pi/180.0)))\n\n #TODO: Implement non-horizontal offset\n checklpos = alt_deg + offsets_left\n checkrpos = az_deg + offsets_right\n\n if self.allow_flip.isChecked():\n flipleft = [180-lp for lp in checklpos]\n flipright = [(rp+180)%360 for rp in checkrpos]\n \n # Check if directions are reachable\n can_reach_all = True\n can_flipreach_all = True\n for i in range(len(checklpos)):\n for j in range(len(checkrpos)):\n # Check if the desired direction is best reached via simple alt, az\n # or at 180-alt, az+180.\n reach = self.telescope.can_reach(checklpos[i], checkrpos[j]) \n flipreach = self.telescope.can_reach(flipleft[i], flipright[j])\n if not reach:\n can_reach_all = False\n if not flipreach:\n can_flipreach_all = False\n\n # If flip direction cannot be reached, return original one.\n # (even if this one may not be reached)\n if not can_flipreach_all:\n leftpos = checklpos\n rightpos = checkrpos\n\n # But, if flip direction can be reached, but not original one,\n # then we have to go to flipdirection to point to this position\n # E.g. in mecanically forbidden azimuth range\n elif can_flipreach_all and (not can_reach_all):\n leftpos = flipleft\n rightpos = flipright\n # If both directions are valid, which is the most common case,\n # then we find the closest one (in azimuth driving, not in angular distance)\n # to the current pointing\n elif can_flipreach_all and can_reach_all: \n (calt_deg, caz_deg) = self.telescope.get_current_alaz()\n flipd = self.telescope.get_azimuth_distance(caz_deg, flipright[self.rightiter])\n noflipd = self.telescope.get_azimuth_distance(caz_deg, checkrpos[self.rightiter])\n if flipd<noflipd:\n # Flip is closer, so do it\n leftpos = flipleft\n rightpos = flipright\n else:\n # No flip is closer, so don't flip\n leftpos = checklpos\n rightpos = checkrpos\n else:\n leftpos = checklpos\n rightpos = checkrpos\n # Update coordinates\n self.leftpos = leftpos\n self.rightpos = rightpos\n return (self.leftpos[self.leftiter], self.rightpos[self.rightiter])", "def AlbiniNetFuelLoading(self) :\n self.netFuelLoading = self.ovendryLoading * (1-self.totMineralContent)", "def update_step():\n # for all states\n for x in range(0, mapper.MAX_CELLS_X):\n for y in range(0, mapper.MAX_CELLS_Y):\n for a in range(0, mapper.MAX_CELLS_A):\n if loc.bel_bar[x, y, a]>0.0001:\n loc.bel[x, y, a] = np.prod(loc.gaussian(loc.obs_range_data, mapper.obs_views[x, y, a, :], loc.sensor_sigma)) * loc.bel_bar[x, y, a]\n loc.bel = loc.bel / np.sum(loc.bel) # normalize belief grid", "def get_olr_alb(model, region):\n if INCLUDE_SHOCK: \n ind0=0\n else:\n ind0 = 8*2 # exclude first two days\n if model.lower()==\"nicam\":\n if region.lower()==\"twp\":\n print(\"Getting olr and albedo for NICAM TWP:\")\n st= time.time()\n olr = xr.open_dataset(ap.TWP_NICAM_OLR)['sa_lwu_toa'][11::12,:,:,:]\n swu = xr.open_dataset(ap.TWP_NICAM_SWU)['ss_swu_toa'][11::12,:,:,:]\n swd = xr.open_dataset(ap.TWP_NICAM_SWD)['ss_swd_toa'][11::12,:,:,:]\n print(\"... calculating albedo for shape\",olr.shape,swu.shape,swd.shape)\n elif (region.lower()==\"nau\") or (region.lower()==\"nauru\"):\n print(\"Getting olr and albedo for NICAM NAURU:\")\n st= time.time()\n olr = xr.open_dataset(ap.NAU_NICAM_OLR)['sa_lwu_toa'][11::12,:,:,:]\n swu = xr.open_dataset(ap.NAU_NICAM_SWU)['ss_swu_toa'][11::12,:,:,:]\n swd = xr.open_dataset(ap.NAU_NICAM_SWD)['ss_swd_toa'][11::12,:,:,:]\n print(\"... calculating albedo for shape\",olr.shape,swu.shape,swd.shape)\n elif region.lower()==\"shl\":\n print(\"Getting olr and albedo for NICAM SAHEL:\")\n st= time.time()\n olr = xr.open_dataset(ap.SHL_NICAM_OLR)['sa_lwu_toa'][11::12,:,:,:]\n swu = xr.open_dataset(ap.SHL_NICAM_SWU)['ss_swu_toa'][11::12,:,:,:]\n swd = xr.open_dataset(ap.SHL_NICAM_SWD)['ss_swd_toa'][11::12,:,:,:]\n print(\"... calculating albedo for shape\",olr.shape,swu.shape,swd.shape)\n else: print(\"Region not supported (try TWP, NAU, SHL)\")\n alb = swu/swd\n alb = alb[ind0:]\n olr = olr[ind0:]\n del swu, swd\n print(\"... calculated albedo and opened olr (%s seconds elapsed)...\"%str(time.time()-st))\n elif model.lower()==\"fv3\":\n if region.lower()==\"twp\":\n print(\"Getting olr and albedo for FV3 TWP:\")\n olr = xr.open_dataset(ap.TWP_FV3_OLR)[\"flut\"][11::12,:,:]\n swu = xr.open_dataset(ap.TWP_FV3_SWU)[\"fsut\"][11::12,:,:]\n swu = swu[ind0:]\n swd = get_swd(\"FV3\", \"TWP\")[11::12,:,:]\n alb = swu.values/swd\n print(olr.shape, alb.shape)\n elif region.lower()==\"nau\":\n print(\"Getting olr and albedo for FV3 NAU:\")\n olr = xr.open_dataset(ap.NAU_FV3_OLR)[\"flut\"][11::12,:,:]\n swu = xr.open_dataset(ap.NAU_FV3_SWU)[\"fsut\"][11::12,:,:]\n swu = swu[ind0:]\n swd = get_swd(\"FV3\", \"NAU\")[11::12,:,:]\n alb = swu.values/swd\n print(olr.shape, alb.shape)\n elif region.lower()==\"shl\":\n print(\"Getting olr and albedo for FV3 SHL:\")\n olr = xr.open_dataset(ap.SHL_FV3_OLR)[\"flut\"][11::12,:,:]\n swu = xr.open_dataset(ap.SHL_FV3_SWU)[\"fsut\"][11::12,:,:]\n swu = swu[ind0:]\n swd = get_swd(\"FV3\", \"SHL\")[11::12,:,:]\n alb = swu.values/swd\n print(olr.shape, alb.shape)\n else: \n raise Exception(\"Region not supported. Try 'TWP', 'NAU', 'SHL'.\")\n alb = alb\n olr = olr[ind0:]\n elif model.lower()==\"icon\":\n if region.lower()==\"twp\":\n print(\"Getting olr and albedo for ICON TWP:\")\n olr = xr.open_dataset(ap.TWP_ICON_OLR)[\"ATHB_T\"]\n swu = xr.open_dataset(ap.TWP_ICON_SWU)[\"ASOU_T\"]\n swn = xr.open_dataset(ap.TWP_ICON_SWN)[\"ASOB_T\"]\n swd = swn + swu.values\n del swn\n alb = swu/swd.values\n alb = alb.where((alb.values>0)&(swd.values>0)&(alb.values<1))\n elif region.lower()==\"nau\":\n print(\"Getting olr and albedo for ICON NAU:\")\n rad = xr.open_dataset(ap.NAU_ICON_RAD)\n olr = reshape.reshape(\"ATHB_T\", rad, dim=2)\n swu = reshape.reshape(\"ASOU_T\", rad, dim=2)\n swn = reshape.reshape(\"ASOB_T\", rad, dim=2)\n del rad\n olr_un = util.undomean(olr, xy=False)\n swu_un = util.undomean(swu, xy=False)\n del swu\n swn_un = util.undomean(swn, xy=False)\n del swn\n olr = xr.DataArray(olr_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.t.values,\\\n \"cell\":olr.cell})\n swu = xr.DataArray(swu_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.time.values,\\\n \"cell\":olr.cell})\n swn = xr.DataArray(swn_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.time.values,\\\n \"cell\":olr.cell})\n swd = swn + swu\n del swn\n alb = swu/swd\n elif region.lower()==\"shl\":\n print(\"Getting olr and albedo for ICON SHL:\")\n rad = xr.open_dataset(ap.SHL_ICON_RAD)\n olr = reshape.reshape(\"ATHB_T\", rad, dim=2)\n swu = reshape.reshape(\"ASOU_T\", rad, dim=2)\n swn = reshape.reshape(\"ASOB_T\", rad, dim=2)\n olr_un = util.undomean(olr, xy=False)\n swu_un = util.undomean(swu, xy=False)\n swn_un = util.undomean(swn, xy=False)\n olr = xr.DataArray(olr_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.t.values,\\\n \"cell\":olr.cell})\n swu = xr.DataArray(swu_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.time.values,\\\n \"cell\":olr.cell})\n swn = xr.DataArray(swn_un, dims=[\"time\",\"cell\"], \\\n coords={\"time\":olr.time.values,\\\n \"cell\":olr.cell})\n swd = swn + swu\n del swn\n alb = swu/swd\n else: \n raise Exception(\"Region not supported. Try 'TWP', 'NAU', 'SHL'.\")\n alb = alb.where((alb<1)&(alb>0))\n alb = alb[11::12]\n olr = olr[11::12]\n alb = alb[ind0:]\n olr = olr[ind0:]\n print(olr.shape, alb.shape)\n elif model.lower()==\"sam\":\n if region.lower()==\"twp\":\n print(\"Getting olr and albedo for SAM TWP:\")\n olr = xr.open_dataset(ap.TWP_SAM_OLR)[\"LWNTA\"][5::6,:,:]\n swn = xr.open_dataset(ap.TWP_SAM_SWN)[\"SWNTA\"][5::6,:,:]\n olr = olr[ind0:]\n swn = swn[ind0:]\n swd = get_swd(\"SAM\", \"TWP\")[5::6,:,:]\n elif region.lower()==\"nau\":\n print(\"Getting olr and albedo for SAM NAU:\")\n olr = xr.open_dataset(ap.NAU_SAM_OLR)[\"LWNTA\"][5::6,:,:]\n swn = xr.open_dataset(ap.NAU_SAM_SWN)[\"SWNTA\"][5::6,:,:]\n olr = olr[ind0:]\n swn = swn[ind0:]\n swd = get_swd(\"SAM\", \"NAU\")[5::6,:,:]\n elif region.lower()==\"shl\":\n print(\"Getting olr and albedo for SAM SHL:\")\n olr = xr.open_dataset(ap.SHL_SAM_OLR)[\"LWNTA\"][5::6,:,:]\n swn = xr.open_dataset(ap.SHL_SAM_SWN)[\"SWNTA\"][5::6,:,:]\n olr = olr[ind0:]\n swn = swn[ind0:]\n swd = get_swd(\"SAM\", \"SHL\")[5::6,:,:]\n else: \n raise Exception(\"Region not supported. Try 'TWP', 'NAU', 'SHL'.\")\n print(swd.shape, swn.shape)\n swu = swd.values - swn.values\n print(\"... subtracted...\")\n alb = swu/swd.values\n print(\"... calculated alb...\")\n alb = xr.DataArray(alb, dims=olr.dims, coords=olr.coords, attrs={'long_name':'albedo at TOA (aver)',\n 'units':'None'})\n print(\"... made xarray...\")\n alb = alb.where((alb.values>0)&(swd.values>0))\n print(\"... made sure alb values are valid...\")\n print(\"... calculated mean\", alb.mean().values, \"...\")\n print(\"... returning olr and albedo\", olr.shape, alb.shape, \"...\")\n else: raise Exception(\"Model not supported at this time (try 'NICAM', 'FV3', 'ICON', 'SAM')\")\n return olr, alb", "def estimate_gates(self):\n comp_ex = self.all_bead_exp\n # Generate the bead gating\n g = flow.GaussianMixture2DOp(name = \"Debris_Filter\",\n xchannel = \"FSC-A\",\n xscale = \"logicle\",\n ychannel = \"SSC-A\",\n yscale = \"logicle\",\n num_components = 4,\n sigma = 3)\n g.estimate(comp_ex)\n\n bead_coords = g.default_view().plot(comp_ex,get_coords = 3)\n bead_coords = [tuple(l) for l in bead_coords]\n self.bead_gate = flow.PolygonOp(name = \"Bead\", xchannel = \"FSC-A\", ychannel = \"SSC-A\", vertices=bead_coords)\n\n # Create polygon gate out of gaussian gate\n return(self.bead_gate)", "def _calculate_durational_alr(self):\n # calculate present value of future benefits\n benefit_cols = [\"BENEFIT_COST\", \"LIVES_MD\", \"DISCOUNT_MD\"]\n pvfb = calc_pv(self.frame[benefit_cols].prod(axis=1))\n\n # calculate present value of future premium\n premium_cols = [\"GROSS_PREMIUM\", \"LIVES_BD\", \"DISCOUNT_BD\"]\n pvfp = calc_pv(self.frame[premium_cols].prod(axis=1))\n\n # calculate present value of future net benefifts\n pvfnb = calc_pvfnb(pvfb, pvfp, net_benefit_method=self.net_benefit_method)\n\n # calculate alr at end of duration\n alr_bd = (\n (pvfb - pvfnb) / self.frame[\"LIVES_BD\"] / self.frame[\"DISCOUNT_BD\"]\n ).clip(lower=0)\n\n # assign values to frame\n self.frame[\"PVFB\"] = pvfb\n self.frame[\"PVFP\"] = pvfp\n self.frame[\"PVFNB\"] = pvfnb\n self.frame[\"ALR_BD\"] = alr_bd\n self.frame[\"ALR_ED\"] = alr_bd.shift(-1, fill_value=0)", "def build_bkg(self):\n try:\n self.param_bphi.x\n print(\"Bphi already built!\")\n except:\n self.calc_field()\n\n print(\"Build bkg\")\n\n R_temp = np.linspace(self.eqdsk.rboxleft, self.eqdsk.rboxleft+self.eqdsk.rboxlength+self.extend_psi_R, self.nR)\n z_temp = np.linspace(-self.eqdsk.zboxlength/2., self.eqdsk.zboxlength/2., self.nz)\n #R_temp = np.linspace(float(np.around(np.min(self.R_w), decimals=2)), float(np.around(np.max(self.R_w), decimals=2)), self.nR)\n #z_temp = np.linspace(float(np.around(np.min(self.z_w), decimals=2)), float(np.around(np.max(self.z_w), decimals=2)), self.nz)\n\n psitemp = self.psi_coeff(R_temp, z_temp)\n\n bphitemp = self.param_bphi(R_temp, z_temp)\n\n self.bkg={'type':'magn_bkg', 'phi0':0, 'nsector':0, 'nphi_per_sector':1,\\\n 'ncoil':0, 'zero_at_coil':1,\\\n 'R':R_temp,'z':z_temp, \\\n 'phimap_toroidal':0, 'phimap_poloidal':0, \\\n 'psi':[],\\\n 'Bphi':bphitemp, 'BR':self.Br, 'Bz':self.Bz, \\\n 'Bphi_pert':self.Bphi_pert, 'BR_pert':self.BR_pert, 'Bz_pert':self.Bz_pert} \n\n self.bkg['psi'] = psitemp*2*np.pi #in ASCOT Bfield, the psi is divided by 2*pi and reverses sign. This prevents it from happening \n print(\"remember: I am multiplying psi times 2pi since in ascot it divides by it!\")", "def L_B(self, Lbol):\n #Coefficients from Table 1 for the UV luminosity.\n c1, k1, c2, k2 = 6.25, -0.37, 9.00, -0.012\n #Implementation of equation (5).\n x = Lbol/(1e10*L_sun)\n bc = c1*x**k1 + c2*x**k2\n return Lbol/bc", "def GET_CYL():\n nmax = 46\n gamval = np.zeros(nmax)\n lamval = np.zeros(nmax)\n bval = np.zeros(nmax)\n\n gamval[0] = 1.00001\n gamval[1] = 1.0001\n gamval[2] = 1.001\n gamval[3] = 1.005\n gamval[4] = 1.01\n gamval[5] = 1.03\n gamval[6] = 1.05\n gamval[7] = 1.07\n gamval[8] = 1.10\n gamval[9] = 1.15\n gamval[10] = 1.2\n gamval[11] = 1.3\n gamval[12] = 1.4\n gamval[13] = 1.5\n gamval[14] = 1.66667\n gamval[15] = 1.7\n gamval[16] = 1.8\n gamval[17] = 1.9\n gamval[18] = 1.92\n gamval[19] = 2.0\n gamval[20] = 2.0863\n gamval[21] = 2.0883\n gamval[22] = 2.125\n gamval[23] = 2.2\n gamval[24] = 2.3676\n gamval[25] = 2.3678\n gamval[26] = 2.4\n gamval[27] = 2.6\n gamval[28] = 2.8\n gamval[29] = 2.83920\n gamval[30] = 2.83929\n gamval[31] = 3.0\n gamval[32] = 3.4\n gamval[33] = 4.0\n gamval[34] = 5.0\n gamval[35] = 6.0\n gamval[36] = 7.0\n gamval[37] = 8.0\n gamval[38] = 10.0\n gamval[39] = 15.0\n gamval[40] = 20.0\n gamval[41] = 30.0\n gamval[42] = 50.0\n gamval[43] = 100.0\n gamval[44] = 1000.0\n gamval[45] = 9999.0\n\n lamval[0] = 1.0022073240\n lamval[1] = 1.0068195769\n lamval[2] = 1.0202846866\n lamval[3] = 1.0414733956\n lamval[4] = 1.0553973808\n lamval[5] = 1.0850737604\n lamval[6] = 1.1023892512\n lamval[7] = 1.1150692073\n lamval[8] = 1.1296268597\n lamval[9] = 1.1475773258\n lamval[10] = 1.1612203175\n lamval[11] = 1.1817213587\n lamval[12] = 1.1971414294\n lamval[13] = 1.2095591324\n lamval[14] = 1.2260537880\n lamval[15] = 1.2288931032\n lamval[16] = 1.2367055181\n lamval[17] = 1.2436278359\n lamval[18] = 1.2449208188\n lamval[19] = 1.2498244759\n lamval[20] = 1.2546830116\n lamval[21] = 1.2547907910\n lamval[22] = 1.2567323668\n lamval[23] = 1.2604989804\n lamval[24] = 1.2680643171\n lamval[25] = 1.2680727188\n lamval[26] = 1.2694076380\n lamval[27] = 1.2769816100\n lamval[28] = 1.2835139723\n lamval[29] = 1.2846912316\n lamval[30] = 1.2846938989\n lamval[31] = 1.2892136582\n lamval[32] = 1.2986950941\n lamval[33] = 1.3095267323\n lamval[34] = 1.3220499813\n lamval[35] = 1.3305627751\n lamval[36] = 1.3367301837\n lamval[37] = 1.3414054776\n lamval[38] = 1.3480251307\n lamval[39] = 1.3569909807\n lamval[40] = 1.3615356210\n lamval[41] = 1.3661223915\n lamval[42] = 1.3698225859\n lamval[43] = 1.3726158889\n lamval[44] = 1.3751432790\n lamval[45] = 1.3753967176\n\n bval[0] = 0.521740\n bval[1] = 0.554609\n bval[2] = 0.625514\n bval[3] = 0.697737\n bval[4] = 0.724429\n bval[5] = 0.731819\n bval[6] = 0.708880\n bval[7] = 0.682234\n bval[8] = 0.644590\n bval[9] = 0.593262\n bval[10] = 0.554542\n bval[11] = 0.502117\n bval[12] = 0.469268\n bval[13] = 0.447230\n bval[14] = 0.423698\n bval[15] = 0.420261\n bval[16] = 0.411663\n bval[17] = 0.405047\n bval[18] = 0.403911\n bval[19] = 0.399877\n bval[20] = 0.396295\n bval[21] = 0.396220\n bval[22] = 0.394904\n bval[23] = 0.392529\n bval[24] = 0.388444\n bval[25] = 0.388440\n bval[26] = 0.387812\n bval[27] = 0.384755\n bval[28] = 0.382794\n bval[29] = 0.382506\n bval[30] = 0.382505\n bval[31] = 0.381580\n bval[32] = 0.380564\n bval[33] = 0.380920\n bval[34] = 0.383355\n bval[35] = 0.386279\n bval[36] = 0.389064\n bval[37] = 0.391561\n bval[38] = 0.395687\n bval[39] = 0.402440\n bval[40] = 0.406405\n bval[41] = 0.410797\n bval[42] = 0.414640\n bval[43] = 0.417726\n bval[44] = 0.420658\n bval[45] = 0.420960\n\n return gamval, lamval, bval", "def calculate_body_measurements(self):\n self.l = self.armature.data.bones[\"UpperLeg.L\"].head_local.z\n self.l2 = self.armature.data.bones[\"Torso\"].tail_local.z\n self.pelvis_shift = self.armature.data.bones[\"Pelvis_Main\"].head_local.z - self.armature.data.bones[\"Pelvis_Main\"].tail_local.z\n self.head_height = self.armature.data.bones[\"Head\"].head_local.z\n\n bones = self.armature.data.bones\n if True and self.animal_classification == 'Unguligrade':\n self.ankle_toe = (bones[\"Foot.L\"].head_local - bones[\"Toe.L\"].tail_local).length\n self.hip_toe = (bones[\"UpperLeg.L\"].head_local - bones[\"Toe.L\"].tail_local).length\n else:\n self.ankle_toe = (bones[\"Foot.L\"].head_local - bones[\"Toe.L\"].head_local).length\n self.hip_toe = (bones[\"UpperLeg.L\"].head_local - bones[\"Toe.L\"].head_local).length\n self.hip_ankle = (bones[\"UpperLeg.L\"].head_local - bones[\"Foot.L\"].head_local).length\n\n if True and self.animal_classification == 'Unguligrade':\n self.wrist_finger = (bones[\"Hand.L\"].head_local - bones[\"Finger.L\"].tail_local).length\n self.shoulder_finger = (bones[\"UpperArm.L\"].head_local - bones[\"Finger.L\"].tail_local).length\n else:\n self.wrist_finger = (bones[\"Hand.L\"].head_local - bones[\"Finger.L\"].head_local).length\n self.shoulder_finger = (bones[\"UpperArm.L\"].head_local - bones[\"Finger.L\"].head_local).length\n self.shoulder_wrist = (bones[\"UpperArm.L\"].head_local - bones[\"Hand.L\"].head_local).length\n\n self.foot_length = bones[\"Foot.L\"].length\n if self.animal_classification == 'Unguligrade':\n self.foot_length += bones[\"Toe.L\"].length\n self.leg_length = self.foot_length + bones[\"LowerLeg.L\"].length + bones[\"UpperLeg.L\"].length\n\n self.hand_length = bones[\"Hand.L\"].length\n if self.animal_classification == 'Unguligrade':\n self.hand_length += bones[\"Finger.L\"].length\n self.arm_length = self.hand_length + bones[\"LowerArm.L\"].length + bones[\"UpperArm.L\"].length\n self.neck_y = bones[\"Torso\"].tail_local.y", "def calc_ubudget(datafiles, ndays, lon1, lon2, plev=200):\n\n # Read data\n data = xray.Dataset()\n for nm in datafiles:\n print('Reading ' + datafiles[nm])\n with xray.open_dataset(datafiles[nm]) as ds:\n if nm in ds.data_vars:\n var = ds[nm]\n else:\n var = ds[nm + '%d' % plev]\n if 'Day' in var.dims:\n var = var.rename({'Day' : 'day'})\n data[nm] = atm.squeeze(var)\n data[nm].load()\n data['PHI'] = atm.constants.g.values * data['H']\n\n # Put zeros in for any missing variables (e.g. du/dp)\n for nm in ['OMEGA', 'DUDP', 'DOMEGADP', 'DUDTANA']:\n if nm not in data.data_vars:\n data[nm] = 0.0 * data['U']\n\n # Eddy decomposition\n taxis = 0\n for nm in data.data_vars:\n print('Eddy decomposition for ' + nm)\n comp = eddy_decomp(data[nm], ndays, lon1, lon2, taxis)\n for compnm in comp:\n data[compnm] = comp[compnm]\n\n # Momentum budget calcs\n # du/dt = sum of terms in ubudget\n ubudget = xray.Dataset()\n readme = 'Momentum budget: ACCEL = sum of all other data variables'\n ubudget.attrs['readme'] = readme\n ubudget.attrs['ndays'] = ndays\n ubudget.attrs['lon1'] = lon1\n ubudget.attrs['lon2'] = lon2\n\n # Advective terms\n keypairs = [ ('AVG', 'AVG'), ('AVG', 'ST'), ('ST', 'AVG')]\n print('Computing advective terms')\n for pair in keypairs:\n print(pair)\n ukey, flowkey = pair\n u = data['U_' + ukey]\n dudp = data['DUDP_' + ukey]\n uflow = data['U_' + flowkey]\n vflow = data['V_' + flowkey]\n omegaflow = data['OMEGA_' + flowkey]\n adv = advection(uflow, vflow, omegaflow, u, dudp)\n for nm in adv.data_vars:\n key = 'ADV_%s_%s_%s' % (ukey, flowkey, nm)\n ubudget[key] = - adv[nm]\n long_name = 'Advection of %s momentum by %s' % (ukey, flowkey)\n ubudget[key].attrs['long_name'] = long_name\n\n # EMFD terms\n keys = ['TR', 'ST']\n print('Computing EMFD terms')\n for key in keys:\n print(key)\n u = data['U_' + key]\n v = data['V_' + key]\n omega = data['OMEGA_' + key]\n dudp = data['DUDP_' + key]\n domegadp = data['DOMEGADP_' + key]\n emfd = fluxdiv(u, v, omega, dudp, domegadp)\n for nm in emfd.data_vars:\n ubudget['EMFC_%s_%s' % (key, nm)] = - emfd[nm]\n\n # Coriolis terms\n latlon = latlon_data(data['V_ST'])\n lat = latlon['LAT']\n f = atm.coriolis(lat)\n ubudget['COR_AVG'] = data['V_AVG'] * f\n ubudget['COR_ST'] = data['V_ST'] * f\n\n # Pressure gradient terms\n a = atm.constants.radius_earth.values\n coslat = latlon['COSLAT']\n lonrad = latlon['LONRAD']\n londim = atm.get_coord(data['PHI_ST'], 'lon', 'dim')\n ubudget['PGF_ST'] = - atm.gradient(data['PHI_ST'], lonrad, londim) / (a*coslat)\n\n # Analysis increment for dU/dt\n ubudget['ANA'] = data['DUDTANA']\n\n # Time mean\n print('Computing rolling time mean')\n for nm in ubudget.data_vars:\n ubudget[nm] = atm.rolling_mean(ubudget[nm], ndays, axis=taxis, center=True)\n\n # Acceleration\n nseconds = 60 * 60 * 24 * ndays\n delta_u = np.nan * data['U']\n u = data['U'].values\n delta_u.values[ndays//2:-ndays//2] = (u[ndays:] - u[:-ndays]) / nseconds\n ubudget['ACCEL'] = delta_u\n\n return ubudget, data", "def run(self):\n\n mu_low=self.ip['mb_at_mb']\n\n\n #-------------#\n # The running #\n #-------------#\n\n MZ = self.ip['Mz']\n alpha_at_mb = 1/self.ip['aMZinv']\n as_MZ = self.ip['asMZ']\n as_mb = self.ip['as_at_mb']\n\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n adm_eff = self.ADM_eff\n else:\n pass\n\n evolve1 = rge.RGE(self.gamma_QCD, 5)\n evolve2 = rge.RGE(self.gamma_QCD2, 5)\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n evolve8 = rge.RGE(adm_eff, 5)\n else:\n pass\n\n # Mixing in the dim.6 DM-SM sector\n #\n # Strictly speaking, MZ and mb should be defined at the same scale\n # (however, this is a higher-order difference)\n C_at_mb_QCD = np.dot(evolve2.U0_as2(as_MZ, as_mb),\\\n np.dot(evolve1.U0(as_MZ, as_mb),\\\n self.coeff_list_dm_dim5_dim6_dim7))\n C_at_mb_QED = np.dot(self.coeff_list_dm_dim5_dim6_dim7, self.gamma_QED)\\\n * np.log(mu_low/MZ) * alpha_at_mb/(4*np.pi)\\\n + np.dot(self.coeff_list_dm_dim5_dim6_dim7, self.gamma_QED2)\\\n * np.log(mu_low/MZ) * (alpha_at_mb/(4*np.pi))**2\n\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n # Mixing in the dim.6 SM-SM and dim.8 DM-SM sector\n\n DIM6_DIM8_init = np.hstack((self.coeff_list_sm_dim6, self.coeff_list_dm_dim8))\n\n DIM6_DIM8_at_mb = np.dot(evolve8.U0(as_MZ, as_mb), DIM6_DIM8_init)\n\n\n # Revert back to dictionary\n\n dict_coeff_mb = list_to_dict(C_at_mb_QCD + C_at_mb_QED, self.wc_name_list)\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n dict_dm_dim8 = list_to_dict(np.delete(DIM6_DIM8_at_mb, np.s_[0:100]), self.wc8_name_list)\n dict_sm_dim6 = list_to_dict(np.delete(DIM6_DIM8_at_mb, np.s_[100:112]), self.sm_name_list)\n dict_sm_lepton_dim6 = list_to_dict(self.coeff_list_sm_lepton_dim6, self.sm_lepton_name_list)\n\n dict_coeff_mb.update(dict_dm_dim8)\n dict_coeff_mb.update(dict_sm_dim6)\n dict_coeff_mb.update(dict_sm_lepton_dim6)\n\n return dict_coeff_mb", "def magnetic_energy(self, dbe):\n #pylint: disable=C0103, R0914\n phase = dbe.phases[self.phase_name]\n param_search = dbe.search\n self.TC = self.curie_temperature = S.Zero\n self.BMAG = self.beta = S.Zero\n if 'ihj_magnetic_structure_factor' not in phase.model_hints:\n return S.Zero\n if 'ihj_magnetic_afm_factor' not in phase.model_hints:\n return S.Zero\n\n site_ratio_normalization = self._site_ratio_normalization\n # define basic variables\n afm_factor = phase.model_hints['ihj_magnetic_afm_factor']\n\n if afm_factor == 0:\n # Apply improved magnetic model which does not use AFM / Weiss factor\n return self.xiong_magnetic_energy(dbe)\n\n bm_param_query = (\n (where('phase_name') == phase.name) & \\\n (where('parameter_type') == 'BMAGN') & \\\n (where('constituent_array').test(self._array_validity))\n )\n tc_param_query = (\n (where('phase_name') == phase.name) & \\\n (where('parameter_type') == 'TC') & \\\n (where('constituent_array').test(self._array_validity))\n )\n\n mean_magnetic_moment = \\\n self.redlich_kister_sum(phase, param_search, bm_param_query)\n beta = mean_magnetic_moment / Piecewise(\n (afm_factor, mean_magnetic_moment <= 0),\n (1., True)\n )\n self.BMAG = self.beta = self.symbol_replace(beta, self._symbols)\n\n curie_temp = \\\n self.redlich_kister_sum(phase, param_search, tc_param_query)\n tc = curie_temp / Piecewise(\n (afm_factor, curie_temp <= 0),\n (1., True)\n )\n self.TC = self.curie_temperature = self.symbol_replace(tc, self._symbols)\n\n # Used to prevent singularity\n tau_positive_tc = v.T / (curie_temp + 1e-9)\n tau_negative_tc = v.T / ((curie_temp/afm_factor) + 1e-9)\n\n # define model parameters\n p = phase.model_hints['ihj_magnetic_structure_factor']\n A = 518/1125 + (11692/15975)*(1/p - 1)\n # factor when tau < 1 and tc < 0\n sub_tau_neg_tc = 1 - (1/A) * ((79/(140*p))*(tau_negative_tc**(-1)) + (474/497)*(1/p - 1) \\\n * ((tau_negative_tc**3)/6 + (tau_negative_tc**9)/135 + (tau_negative_tc**15)/600)\n )\n # factor when tau < 1 and tc > 0\n sub_tau_pos_tc = 1 - (1/A) * ((79/(140*p))*(tau_positive_tc**(-1)) + (474/497)*(1/p - 1) \\\n * ((tau_positive_tc**3)/6 + (tau_positive_tc**9)/135 + (tau_positive_tc**15)/600)\n )\n # factor when tau >= 1 and tc > 0\n super_tau_pos_tc = -(1/A) * ((tau_positive_tc**-5)/10 + (tau_positive_tc**-15)/315 + (tau_positive_tc**-25)/1500)\n # factor when tau >= 1 and tc < 0\n super_tau_neg_tc = -(1/A) * ((tau_negative_tc**-5)/10 + (tau_negative_tc**-15)/315 + (tau_negative_tc**-25)/1500)\n\n # This is an optimization to reduce the complexity of the compile-time expression\n expr_cond_pairs = [(sub_tau_neg_tc, curie_temp/afm_factor > v.T),\n (sub_tau_pos_tc, curie_temp > v.T),\n (super_tau_pos_tc, And(curie_temp < v.T, curie_temp > 0)),\n (super_tau_neg_tc, And(curie_temp/afm_factor < v.T, curie_temp < 0)),\n (0, True)\n ]\n g_term = Piecewise(*expr_cond_pairs)\n\n return v.R * v.T * log(beta+1) * \\\n g_term / site_ratio_normalization", "def ode_rhs(self):\n\n #: Bandpass l_ce\n #b, a = signal.butter(2, 50, 'low', analog=True)\n #l_ce_filt = signal.lfilter(b, a, self._l_ce.sym)\n\n l_ce_tol = cas.fmax(self._l_ce.sym, 0.0)\n _stim = cas.fmax(0.01, cas.fmin(self._stim.sym, 1.))\n\n #: Algrebaic Equation\n l_mtc = self._l_slack.val + self._l_opt.val + self._delta_length.sym\n l_se = l_mtc - l_ce_tol\n\n #: Muscle Acitvation Dynamics\n self._dA.sym = (\n _stim - self._activation.sym)/GeyerMuscle.tau_act\n\n #: Muscle Dynamics\n #: Series Force\n _f_se = (self._f_max.val * (\n (l_se - self._l_slack.val) / (\n self._l_slack.val * self.e_ref))**2) * (\n l_se > self._l_slack.val)\n\n #: Muscle Belly Force\n _f_be_cond = self._l_opt.val * (1.0 - self.w)\n\n _f_be = (\n (self._f_max.val * (\n (l_ce_tol - self._l_opt.val * (1.0 - self.w)) / (\n self._l_opt.val * self.w / 2.0))**2)) * (\n l_ce_tol <= _f_be_cond)\n\n #: Force-Length Relationship\n val = cas.fabs(\n (l_ce_tol - self._l_opt.val) / (self._l_opt.val * self.w))\n exposant = GeyerMuscle.c * val**3\n _f_l = cas.exp(exposant)\n\n #: Force Parallel Element\n _f_pe_star = (self._f_max.val * (\n (l_ce_tol - self._l_opt.val) / (self._l_opt.val * self.w))**2)*(\n l_ce_tol > self._l_opt.val)\n\n #: Force Velocity Inverse Relation\n _f_v_eq = ((\n self._f_max.val * self._activation.sym * _f_l) + _f_pe_star)\n\n f_v_cond = cas.logic_and(\n _f_v_eq < self.tol, _f_v_eq > -self.tol)\n\n _f_v = cas.if_else(f_v_cond, 0.0, (_f_se + _f_be) / ((\n self._f_max.val * self._activation.sym * _f_l) + _f_pe_star))\n\n f_v = cas.fmax(0.0, cas.fmin(_f_v, 1.5))\n\n self._v_ce.sym = cas.if_else(\n f_v < 1.0, self._v_max.sym * self._l_opt.val * (\n 1.0 - f_v) / (1.0 + f_v * GeyerMuscle.K),\n self._v_max.sym*self._l_opt.val * (f_v - 1.0) / (\n 7.56 * GeyerMuscle.K *\n (f_v - GeyerMuscle.N) + 1.0 - GeyerMuscle.N\n ))\n\n #: Active, Passive, Tendon Force Computation\n _f_v_ce = cas.if_else(\n self._v_ce.sym < 0.,\n (self._v_max.sym*self._l_opt.val - self._v_ce.sym) /\n (self._v_max.sym*self._l_opt.val + GeyerMuscle.K * self._v_ce.sym),\n GeyerMuscle.N + (GeyerMuscle.N - 1) * (\n self._v_max.sym*self._l_opt.val + self._v_ce.sym\n ) / (\n 7.56 * GeyerMuscle.K * self._v_ce.sym - self._v_max.sym*self._l_opt.val\n ))\n\n self._a_force = self._activation.sym * _f_v_ce * _f_l * self._f_max.val\n self._p_force = _f_pe_star*_f_v - _f_be\n self._t_force = _f_se\n\n self._alg_tendon_force.sym = self._z_tendon_force.sym - self._t_force\n self._alg_active_force.sym = self._z_active_force.sym - self._a_force\n self._alg_passive_force.sym = self._z_passive_force.sym - self._p_force\n self._alg_v_ce.sym = self._z_v_ce.sym - self._v_ce.sym\n self._alg_l_mtc.sym = self._z_l_mtc.sym - l_mtc\n self._alg_dact.sym = self._z_dact.sym - self._dA.sym\n\n return True", "def __extract_data(self, FET_instance, is_ambi): # Added type to indicate n/p/a-type\n print('starting extraction')\n\n # check the type of FET inserted\n if is_ambi:\n if isinstance(FET_instance, NFET):\n type = \"_n\"\n elif isinstance(FET_instance, PFET):\n type = \"_p\"\n else:\n type = \"\"\n\n # now compute the vt and gm using the max Vd\n vd = self.idvg.get_secondary_indep_values()\n # adjust the shape to be [num_set, 1]\n vd = Value.array_like(np.expand_dims(np.array(vd), axis=-1), unit=ureg.volt)\n max_vd = max(vd)\n\n vg = self.idvg.get_column('vg')\n max_vg = FET_instance.max_value(vg)\n\n # get the max and min Ion\n ion = self.idvg.get_column('id')\n max_ion, max_ion_i = FET_instance.max_value(ion, return_index=True)\n max_ion_vd = vd[max_ion_i[0], 0]\n max_ion_vg = vg[max_ion_i]\n FET_instance.max_Ion.set(value=max_ion, input_values={'Vg': max_ion_vg, 'Vd': max_ion_vd})\n\n # compute the gm\n gm_fwd, max_gm_fwd, max_gm_fwd_i = self._extract_gm(FET_instance, fwd=True, return_max=True)\n gm_bwd, max_gm_bwd, max_gm_bwd_i = self._extract_gm(FET_instance, bwd=True, return_max=True)\n\n gm = np.concatenate((gm_fwd, gm_bwd), axis=-1)\n\n self.idvg.add_column(column_name='gm'+type, column_data=gm)\n max_gm, max_gm_i = FET_instance.max_slope_value(self.idvg.get_column(column_name='gm'+type), return_index=True)\n\n max_gm_vd = vd[max_gm_i[0], 0]\n max_gm_input_values = {'Vg': self.idvg.get_column_set('vg', max_gm_vd)[max_gm_i[-1]], 'Vd': max_gm_vd}\n\n FET_instance.max_gm.set(max_gm, max_gm_input_values)\n # print('set gm')\n # Now extract the Vt values\n vt_fwd = self._extract_vt(index=max_gm_fwd_i, max_gm=max_gm_fwd, fwd=True)\n vt_bwd = self._extract_vt(index=max_gm_bwd_i, max_gm=max_gm_bwd, bwd=True)\n\n FET_instance.Vt_fwd.set(vt_fwd)\n FET_instance.Vt_bwd.set(vt_bwd)\n\n # compute the ss\n ss = self._slope(y_data=self.idvg.get_column('vg'),\n x_data=np.log10(self.idvg.get_column('id')),\n keep_dims=True, remove_zeroes=True)\n self.idvg.add_column(column_name='ss'+type, column_data=ss)\n FET_instance.min_ss = FET_instance.min_value(self.idvg.get_column(column_name='ss'+type), return_index=False)\n\n FET_instance.compute_properties()\n\n # now compute the carrier density\n n = FET_instance.vg_to_n(self.idvg.get_column('vg'))\n self.idvg.add_column('n'+type, n)\n\n # print('adding r')\n # now compute the resistance\n r = vd / self.idvg.get_column('id')\n self.idvg.add_column('resistance'+type, r)", "def balance(self):\n # mass balance equation\n self.oPort[0].fdot = self.iPort[0].fdot\n # energy\n self.workExtracted = self.iPort[0].fdot *(self.iPort[0].h - self.oPort[0].h)", "def set(self, g_imp, mu):\n if not(self.filling is None):\n assert False, 'todo, filling feature not implented yet'\n #mu = self.find_and_set_mu(self.filling, se_lat, mu, self.dmu_max)\n if self.transf is not None:\n g_imp = self.transf.backtransform_g(g_imp)\n self.g_lat_initdict['g_r'] = self.calc_g_r(g_imp)\n self.g_lat = LatticeGreensfunction(**self.g_lat_initdict)\n self.g_lat.periodize()\n self.g_cluster = self.set_g_cluster(self.g_cluster, self.g_lat)\n for ri, rj in itt.product(*[self.r_cavity]*2):\n self.g_cavity[ri, rj] = self.g_lat[ri, rj].copy()\n for ra, rb in itt.product(*[self.r_cluster]*2):\n for s, b in self.g_cavity[ri, rj]:\n b -= self.g_lat[ri, ra][s] * self.g_lat.inverse_real_space_at(ra, rb)[\n s] * self.g_lat[rb, rj][s]\n self.lambd.zero()\n for ila, icl in self.lat_r_to_cluster.items():\n ra, rb = ila[0], ila[1]\n bo, bi, bj = ila[2], ila[3], ila[4]\n for ri, rj in itt.product(*[self.r_cavity]*2):\n self.lambd[icl[0]][icl[1], icl[2]] += self.hopping_lat[ra, ri][bo][bi, bj] * \\\n self.g_cavity[ri, rj][bo][bi, bj] * \\\n self.hopping_lat[rj, rb][bo][bi, bj]\n\n if self.transf is not None:\n self.lambda_imp_basis = self.transf.transform_g(self.lambd)\n self << self.transf.transform_g(self.g_cluster)\n else:\n self.lambda_imp_basis << self.lambd\n self << self.g_cluster\n return mu" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the footprint of a raster
def get_footprint(raster, footprint): # set the environment variable and workspace arcpy.env.overwriteOutput = True input_folder = os.path.dirname(raster) arcpy.env.workspace = input_folder raster_extent = arcpy.Describe(raster).extent get_extent_mask(raster_extent, footprint) # add the original spatial reference to the footprint desc = arcpy.Describe(raster) arcpy.DefineProjection_management(footprint, desc.spatialReference)
[ "def test_raster_shape(dem_rast):\n width = dem_rast.width\n height = dem_rast.height\n\n assert width == 1147 and height == 974", "def get_raster_pixels(raster_path):\n\n raster_pixels = arcpy.SearchCursor(raster_path).next().count\n\n return raster_pixels", "def find_footprint(self, member='total'):\n\n if self.total_mask is None:\n print(\"Please add exposures before computing footprint...\")\n if member == 'total':\n mask = self.total_mask\n else:\n if member not in self.exp_masks:\n raise ValueError(\"Member {} not added to footprint\".format(member))\n # Recompute mask specifically for this member\n # mask = self.exp_masks[member]['mask']\n exp_mask = self.exp_masks[member]['mask']\n mask = np.zeros(self.meta_wcs.array_shape, dtype=np.int16)\n\n for sci in exp_mask.values():\n img = Image.new('L', sci['img_shape'], 0)\n ImageDraw.Draw(img).polygon(sci['polygon'], outline=1, fill=1)\n blank = np.array(img).astype(np.int16)\n mask[sci['scell_slice']] += blank\n\n self.footprint = np.clip(mask, 0, 1)\n self.footprint_member = member", "def find_base_size(self):\n\n# Find longitudinal locations of first two points\n first_UTM = self.shapes[0].points[0][0]\n second_UTM = self.shapes[1].points[0][0]\n\n# Find the difference. This difference in meters is the size of the grid\n grid_size = second_UTM - first_UTM\n\n return grid_size", "def Get_binsize(d_nm):\n BC03_z2_res = 9\n A2pix = d_nm*10\n bs = int(BC03_z2_res/A2pix)\n print 'Binsize = BC03_res(z~2)/spec_res = %s A/(%s A/pix) = %s pix' % (BC03_z2_res,A2pix,bs)\n return bs", "def test_calc_footprint_2():\n fits = get_pkg_data_filename(\"data/sip.fits\")\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(fits)\n\n axes = (1000, 1051)\n ref = np.array(\n [\n [202.39265216, 47.17756518],\n [202.7469062, 46.91483312],\n [203.11487481, 47.14359319],\n [202.76092671, 47.40745948],\n ]\n )\n footprint = w.calc_footprint(axes=axes, undistort=False)\n assert_allclose(footprint, ref)", "def RasterXCellSize(self):\n return self._ImageShape__pix_sz[0]", "def getPatchSize(self) -> retval:\n ...", "def get_band_count(raster_path):\n with rasterio.open(raster_path) as src:\n return src.count", "def RasterCellSize(self):\n assert self._ImageShape__pix_sz[0] == abs(self._ImageShape__pix_sz[1]), \"Cannot use method ImageShape.CellSize when X and Y cell sizes are different.\"\n return self._ImageShape__pix_sz[0]", "def get_pixel_count(self): # pragma: no cover\n pass", "def watermark_len(wmname):\n wm = Image.open(wmname)\n x, y = wm.size\n\n return x * y * 8", "def get_image_pixel_size(): \n function = LegacyFunctionSpecification() \n function.addParameter('nx', dtype='i', direction=function.OUT)\n function.addParameter('ny', dtype='i', direction=function.OUT)\n function.result_type = 'i'\n return function", "def getFreeTileCount(self):\n return self.freeTiles", "def getSuperpixelSize(self) -> retval:\n ...", "def get_size():\n l = self.linfeats.get_size()\n return 2*l + (l *(l-1)) / 2", "def searchDimension(self):\n\t\t\t\treturn pow(2, self._ppmResolution-1)", "def footprint(self, i):\n return np.array(footprint(self.lon[i], self.lat[i], self.rsat[i]))", "def get_full_uncompressed_image_size(self) -> int:\n\n total_blocks = self.NBPR*self.NBPC\n if self.IMODE == 'S':\n total_blocks *= len(self.Bands)\n return total_blocks*self.get_uncompressed_block_size()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the effective area of interest based on input vegetation map, DEM and extent.
def find_aoi(extent, dem, veg): # set the environment variable and workspace arcpy.env.overwriteOutput = True input_dem_folder = os.path.dirname(dem) input_veg_folder = os.path.dirname(veg) arcpy.env.workspace = input_dem_folder # derive the effective mask based on the input data arcpy.AddMessage('Get the area of interest from the input extent ...') mask = pjoin(input_dem_folder, 'mask.shp') if str(extent) in ['DEFAULT', 'MAXOF', 'MINOF']: # get the extent of inputs dem_poly = pjoin(input_dem_folder, "dem_poly.shp") veg_poly = pjoin(input_veg_folder, "veg_poly.shp") get_footprint(dem, dem_poly) get_footprint(veg, veg_poly) arcpy.Intersect_analysis([dem_poly, veg_poly], mask) # delete intermediate files if arcpy.Exists(dem_poly): arcpy.Delete_management(dem_poly) if arcpy.Exists(veg_poly): arcpy.Delete_management(veg_poly) else: get_extent_mask(extent, mask) # add dem's spatial reference to the mask desc = arcpy.Describe(dem) arcpy.DefineProjection_management(mask, desc.spatialReference) return mask
[ "def exposed_area(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.exposure\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)", "def get_avg_elev(dem_image_file):\n from osgeo import gdal\n \n ds = gdal.Open(dem_image_file, gdal.GA_ReadOnly)\n dem_image = ds.GetRasterBand(1).ReadAsArray()\n avg_elev = dem_image[dem_image>0.0].mean() # Negative values are ignored.\n ds = None\n del dem_image\n return avg_elev", "def compute_area(self) -> None:\n for ann in tqdm(\n self.anns.values(), desc=\"process images\", disable=not self.verbose\n ):\n ann[\"area\"] = ann[\"bbox\"][2] * ann[\"bbox\"][3]", "def area(self) -> float:\n if self._moments is None:\n self._moments = cv.moments(self.points)\n return self._moments[\"m00\"]", "def _get_area(self) -> \"double\" :\n return _core.SurfaceEvaluator__get_area(self)", "def compute_elevation(displacement):\n return displacement - np.nanmean(displacement)", "def get_field_of_view_extent_mm(self) -> np.ndarray:\n pass", "def get_average_elevation(elevation_map: List[List[int]]) -> float:\n total = 0\n for total_elements in elevation_map:\n for element in total_elements:\n total += element\n return total/(len(elevation_map)**2)", "def find_common_area(veg_class, slope, aspect):\n\n output_folder = os.path.dirname(veg_class)\n arcpy.env.overwriteOutput = True\n\n # set directory\n work_folder = output_folder\n os.chdir(work_folder)\n arcpy.env.workspace = work_folder\n\n # get the common area of veg and dem\n # get the extent of inputs\n slope_poly = \"slope_poly.shp\"\n veg_class_poly = \"veg_class_poly.shp\"\n get_footprint(slope, slope_poly)\n get_footprint(veg_class, veg_class_poly)\n\n mask_com = 'mask_com.shp'\n\n arcpy.Intersect_analysis([slope_poly, veg_class_poly], mask_com)\n\n veg_class_com = pjoin(output_folder, 'veg_c')\n slope_com = pjoin(output_folder, 'slope_c')\n aspect_com = pjoin(output_folder, 'aspect_c')\n\n extract_by_mask(veg_class, mask_com, veg_class_com)\n extract_by_mask(slope, mask_com, slope_com)\n extract_by_mask(aspect, mask_com, aspect_com)\n\n if arcpy.Exists(slope_poly):\n arcpy.Delete_management(slope_poly)\n if arcpy.Exists(veg_class_poly):\n arcpy.Delete_management(veg_class_poly)\n if arcpy.Exists(mask_com):\n arcpy.Delete_management(mask_com)\n if arcpy.Exists(veg_class):\n arcpy.Delete_management(veg_class)\n if arcpy.Exists(slope):\n arcpy.Delete_management(slope)\n if arcpy.Exists(aspect):\n arcpy.Delete_management(aspect)\n\n return veg_class_com, slope_com, aspect_com", "def get_average_density(self):\n if self.fine_map_average is None:\n self.import_fine_map_array()\n self.fine_map_average = self.deme * np.mean(self.fine_map_array)\n return self.fine_map_average", "def viz_elevation(self) -> (hv.DynamicMap, hv.Layout):\n\n OA_da = self.parallel_request_OA()\n\n if OA_da is None:\n print(\"No data\")\n return (None,) * 2\n\n else:\n\n cols = (\n [\"lat\", \"lon\", \"elevation\", \"canopy\", \"rgt\", \"cycle\"]\n if self.product == \"ATL08\"\n else [\"lat\", \"lon\", \"elevation\", \"rgt\", \"cycle\"]\n )\n ddf = dd.io.from_dask_array(OA_da, columns=cols).astype(\n {\n \"lat\": \"float\",\n \"lon\": \"float\",\n \"elevation\": \"float\",\n \"rgt\": \"int\",\n \"cycle\": \"int\",\n }\n )\n\n print(\"Plot elevation, please wait...\")\n\n x, y = ds.utils.lnglat_to_meters(ddf.lon, ddf.lat)\n ddf_new = ddf.assign(x=x, y=y).persist()\n dset = hv.Dataset(ddf_new)\n\n raster_cycle = dset.to(\n hv.Points,\n [\"x\", \"y\"],\n [\"elevation\"],\n groupby=[\"cycle\"],\n dynamic=True,\n )\n raster_rgt = dset.to(\n hv.Points, [\"x\", \"y\"], [\"elevation\"], groupby=[\"rgt\"], dynamic=True\n )\n curve_rgt = dset.to(\n hv.Scatter, [\"lat\"], [\"elevation\"], groupby=[\"rgt\"], dynamic=True\n )\n\n tiles = hv.element.tiles.EsriImagery().opts(\n xaxis=None, yaxis=None, width=450, height=450\n )\n map_cycle = tiles * rasterize(\n raster_cycle, aggregator=ds.mean(\"elevation\")\n ).opts(colorbar=True, tools=[\"hover\"])\n map_rgt = tiles * rasterize(\n raster_rgt, aggregator=ds.mean(\"elevation\")\n ).opts(colorbar=True, tools=[\"hover\"])\n lineplot_rgt = rasterize(curve_rgt, aggregator=ds.mean(\"elevation\")).opts(\n width=450, height=450, cmap=[\"blue\"]\n )\n\n return map_cycle, map_rgt + lineplot_rgt", "def interpolate_area(self, energy):\n earea = np.interp(np.asarray(energy), self.emid, self.eff_area, \n left=0.0, right=0.0)\n return u.Quantity(earea, \"cm**2\")", "def get_area(shape: gpd.geodataframe.GeoDataFrame) -> float:\n return round(sum(shape.area) / 10 ** 6, 4)", "def evaluate(self, energy=None):\n energy = self.ebounds.log_centers if energy is None else Energy(energy)\n\n i = self.ebounds.find_energy_bin(energy)\n\n # TODO: Use some kind of interpolation here\n return self.effective_area[i]", "def bounding_box_area(self):\n return (self.bounding_box[1][0] - self.bounding_box[0][0]) * (self.bounding_box[1][1] - self.bounding_box[0][1])", "def getArea(self,i):\n a,b = self.lim\n area = self.c*self.f(a + i*self.c)\n return area", "def total_area_exposed_to_zone(self):\n return self._total_area_exposed_to_zone", "def calculate_area(self):\n self.area = len(self.points)\n return self.area", "def signed_area(self):\n flat = self.flatten()\n area = 0\n for s in flat.asSegments():\n area = area + (s.start.x * s.end.y) - (s.start.y * s.end.x)\n area = area / 2.0\n return area" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the BAL calculations.
def run(): # add subfolders into path cmd_folder = os.path.realpath( os.path.abspath( os.path.split( inspect.getfile( inspect.currentframe()))[0])) if cmd_folder not in sys.path: sys.path.insert(0, cmd_folder) cmd_subfolder = pjoin(cmd_folder, "utilities") if cmd_subfolder not in sys.path: sys.path.insert(0, cmd_subfolder) # get input parameters from toolbox interface dem = arcpy.GetParameterAsText(0) veg = arcpy.GetParameterAsText(1) remap = arcpy.GetParameterAsText(2) output_folder = arcpy.GetParameterAsText(3) fdi = arcpy.GetParameter(4) extent = arcpy.GetParameter(5) dem_sr = arcpy.Describe(dem).spatialReference arcpy.AddMessage("DEM's spatial reference type is {0}".format(dem_sr.type)) if dem_sr.type == "Projected": # find effective AOI based on the input parameters mask = find_aoi(extent, dem, veg) try: # calculate the BAL for the effective AOI bal_calc(veg, dem, fdi, output_folder, remap, mask) arcpy.AddMessage("Successfully completed BAL calculation!") except Exception as err: # Report any exceptions back arcpy.AddError(err) else: arcpy.AddError("To go ahead, the DEM needs to be projected first")
[ "def run(self):\n try:\n logger.info(\"start calculation cycle\")\n self.calculate()\n self.process()\n except:\n import traceback\n logger.error(\"failure during state cycle\")\n logger.error(traceback.format_exc())", "def run():\n # create a Parameters object containing current-law policy (clp) parameters\n clp = Parameters()\n\n # create a Records object (puf) containing puf.csv input records\n tax_dta = pd.read_csv('puf.csv')\n blowup_factors = './taxcalc/StageIFactors.csv'\n weights = './taxcalc/WEIGHTS.csv'\n puf = Records(tax_dta, blowup_factors, weights)\n\n # create a Calculator object using clp params and puf records\n calc = Calculator(params=clp, records=puf)\n\n # save calculated test results in output dataframe (odf)\n odf = calc.calc_all_test()\n odf = odf.T.groupby(level=0).first().T\n\n # write test output to csv file named 'results_puf.csv'\n odf.to_csv('results_puf.csv', float_format='%1.3f',\n sep=',', header=True, index=False)", "def run_algo(self):\n\n # dots\n self.update_alphadots()\n\n # canvas\n self.update_canvas()\n\n # run the program\n #self.wait_proc(self.run_proc(['do_everything.sh']))\n self.run_comparison()\n return", "def main(args):\n config_dict = read_configfile(args.configfile, args.simulation,\n args.verbose)\n for i, s in enumerate(config_dict['simulation']):\n simulation_config_dict = config_dict['simulation'][s]\n user_config_dict = config_dict['user_input']\n catalog_name = os.path.join(\n user_config_dict['data_dir'],\n simulation_config_dict['catalog'])\n # Set parameter values in param\n param = get_config_class(simulation_config_dict,\n catalog_name, args.verbose)\n # Set seed\n np.random.seed(int(param.seed))\n # Generate images of blends in all the observing bands\n draw_blend_generator = make_draw_generator(\n param, user_config_dict, simulation_config_dict)\n # Create generator for measurement algorithm outputs\n measure_generator = make_measure_generator(param, user_config_dict,\n draw_blend_generator)\n # get metrics class that can generate metrics\n metrics_class = get_metrics_class(user_config_dict,\n param.verbose)\n test_size = int(simulation_config_dict['test_size'])\n metrics_param = metrics_class(measure_generator, param)\n ouput_path = get_ouput_path(user_config_dict, param.verbose)\n output_name = os.path.join(ouput_path, s + '_metrics_results.dill')\n results = btk.compute_metrics.run(metrics_param, test_size=test_size)\n with open(output_name, 'wb') as handle:\n dill.dump(results, handle)\n print(\"BTK outputs saved at \", output_name)\n save_config_file(param, user_config_dict, simulation_config_dict,\n s, ouput_path)", "def run_all(self):\n\n self.fill_table()\n\n self.traceback()\n\n self.alignment()\n\n self.total_score()", "def blast_analysis_main():\n conn = sqlite3.connect(ProjectDefinitions.data_directory + ProjectDefinitions.drugbank_db_name)\n curs = conn.cursor()", "def run(self):\n\t\tusers = models.User.objects.filter(is_active=True).all()\n\t\tfor user in users:\n\t\t\tself.log.debug(\"Resolving balances for %s\" % user)\n\t\t\tbalances = db.get_balances(user, credited=False)\n\t\t\tself.resolve_balances(balances)", "def bal_calc(vegetation, dem, fdi, output_folder, remap, mask):\n arcpy.env.overwriteOutput = True\n\n arcpy.AddMessage('Reclassify the vegetation map ... ')\n veg_class = reclass_veg(vegetation, dem, output_folder, remap, mask)\n\n arcpy.AddMessage('Reclassify slope and aspect ... ')\n slope, aspect = get_slope_aspect(dem, output_folder, mask)\n\n if arcpy.Exists(mask):\n arcpy.Delete_management(mask)\n\n # extract the common area between vegtation, slope and aspect\n arcpy.AddMessage('Get common area of input data ... ')\n veg_class_com, slope_com, aspect_com = find_common_area(veg_class,\n slope, aspect)\n\n arcpy.AddMessage('Calculate the BAL ... ')\n bal_cal(veg_class_com, slope_com, aspect_com, fdi)", "def _run_evaluation(self) -> None:", "def run (self, scalers = {'capital costs':1.0}):\n tag = self.cd['file id'].split('+')\n\n self.was_run = True\n self.reason = \"OK\"\n\n if len(tag) > 1 and tag[1] != 'water-wastewater':\n self.was_run = False\n self.reason = \"Not a water/wastewater project.\"\n return\n #~ print self.comp_specs['data']['assumption type used']\n if self.comp_specs['data']['assumption type used'] == 'UNKNOWN':\n self.was_run = False\n self.reason = \"Water/wastewater system type unknown.\"\n #~ print self.reason\n return\n\n if self.cd[\"model electricity\"]:\n self.calc_baseline_kWh_consumption()\n self.calc_proposed_kWh_consumption()\n self.calc_savings_kWh_consumption()\n\n if self.cd[\"model heating fuel\"]:\n self.calc_baseline_HF_consumption()\n self.calc_proposed_HF_consumption()\n self.calc_savings_HF_consumption()\n\n #~ years = range(self.start_year,self.end_year)\n #~ self.forecast.add_heating_fuel_column(\\\n #~ \"heating_fuel_water-wastewater_consumed [gallons/year]\",\n #~ years,\n #~ self.baseline_HF_consumption*constants.mmbtu_to_gal_HF)\n #~ self.forecast.add_heating_fuel_column(\\\n #~ \"heating_fuel_water-wastewater_consumed [mmbtu/year]\", years,\n #~ self.baseline_HF_consumption)\n\n #~ self.forecast.add_heat_demand_column(\\\n #~ \"heat_energy_demand_water-wastewater [mmbtu/year]\",\n #~ years, self.baseline_HF_consumption)\n\n if self.cd[\"model financial\"]:\n self.calc_capital_costs()\n\n self.get_diesel_prices()\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n self.calc_annual_total_savings()\n\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n self.calc_levelized_costs(0)\n #~ self.levelized_cost_of_energy['MMBtu'] *= .5\n #~ self.levelized_cost_of_energy['kWh'] *= .5", "def main(self, run_step=0):\n\n self.copy_files()\n\n if run_step < 1:\n print(\"Creating a dta file with bundles and measure for the legacy stata process. Also an rei/acause map to bundle_id\")\n self.create_bundle_file()\n\n write_acause_rei_to_bundle_map(self.run_id)\n\n if run_step < 2:\n print(\"Sending out the master script to process MS data to the individual level\")\n run_marketscan(self.run_id)\n self.job_holder(job_name=\"ms_group\")\n\n if run_step < 3:\n print(\"Qsubbing the legacy Stata code to aggregate before noise reduction\")\n self.pre_nr_agg()\n self.job_holder(job_name=\"pre_nr_\")\n\n if run_step < 4:\n print(\"Qsubbing the legacy Stata code to run Noise Reduction on inpatient data\")\n self.ms_nr(\"inp\")\n self.job_holder(job_name=\"stata_submit\")\n self.job_holder(job_name=\"ms_nr\")\n\n if run_step < 5:\n print(\"Qsubbing the legacy Stata code to run Noise Reduction on U(inpatient, outpatient) data\")\n\n\n self.ms_nr(\"all\")\n self.job_holder(job_name=\"stata_submit\")\n self.job_holder(job_name=\"ms_nr\")\n\n print(\"The claims process has finished running. Inputs for the CFs should be ready and NR\"\\\n \" estimates are available in FILEPATH\"\\\n format(self.run_id))\n print(\"Sending out the job to format the final bundle level estimates for upload.\")\n warnings.warn(\"we're applying the asfr adjustment for maternal data. Maternal Bundles are hard coded and decomp step is hardcoded\")\n self.decomp_step = 'step1'\n self.write_bundle_csv()\n return", "def main():\n file_name = os.path.dirname(os.path.realpath(__file__)) + \"/addition.lisp\"\n print(file_name)\n actr_to_mdf(file_name)\n mdf_graph = load_mdf(file_name[:-5] + \".json\").graphs[0]\n eg = EvaluableGraph(graph=mdf_graph, verbose=True)\n term = False\n goal = {}\n retrieval = {}\n while not term:\n eg.evaluate(initializer={\"goal_input\": goal, \"dm_input\": retrieval})\n term = eg.enodes[\"check_termination\"].evaluable_outputs[\"check_output\"].curr_value\n goal = eg.enodes[\"fire_production\"].evaluable_outputs[\"fire_prod_output_to_goal\"].curr_value\n retrieval = eg.enodes[\"fire_production\"].evaluable_outputs[\"fire_prod_output_to_retrieval\"].curr_value\n print(\"Final Goal:\")\n print(eg.enodes[\"goal_buffer\"].evaluable_outputs[\"goal_output\"].curr_value)", "def run(self):\n tstart = time.time()\n if self.logEN:\n msg = \"Info : Pboost is running for configuration number : \"\n msg = msg + str(self.conf_num)\n print datetime.now(),msg\n\n if self.isLeader:\n self.clean()\n\n \"\"\"Extract features\"\"\"\n extractor = Extractor(pb=self)\n if self.logEN:\n msg = \"Info : Extractor is created.\"\n print datetime.now(),msg\n\n extractor.extract()\n if self.logEN:\n msg = \"Info : Extractor finished running.\"\n print datetime.now(),msg\n\n if self.logEN:\n msg = \"Info : Partition of feature space is \"+str(self.partition)\n print datetime.now(),msg\n\n if self.xvalEN:\n jobs = np.arange(self.xval_no+1)\n else:\n jobs = (0,)\n\n \"\"\"Run boosting for each xval index\"\"\"\n for xval_ind in jobs:\n boost_process = Process(pb = self,\n xval_ind = xval_ind\n )\n if self.logEN:\n msg = \"Info : Boosting process is created for xval index \"\n msg = msg + str(xval_ind)\n print datetime.now(),msg\n\n boost_process.run()\n if self.logEN:\n msg = \"Info : Boosting computation is finished xval index \"\n msg = msg + str(xval_ind)\n print datetime.now(),msg\n boost_process = None\n\n \"\"\"Report the results\"\"\"\n if self.isLeader:\n \"\"\"Call post processing step\"\"\"\n reporter = Reporter(pb = self)\n if self.logEN:\n msg = \"Info : Reporter is created.\"\n print datetime.now(),msg\n\n reporter.run()\n if not self.debugEN:\n self.clean()\n\n if self.logEN:\n msg = \"Info : Reporter finished running.\"\n print datetime.now(),msg\n tfinish = time.time()\n delta = tfinish - tstart\n msg = \"Info : Total runtime : %0.2f seconds\" % (delta,)\n print datetime.now(),msg", "def run(self):\n for mod in self.models:\n pYields = mod.getYields(self.validation_freq) #estimated percent yields\n for a in self.alphas:\n n_days = self.stock.n_days_test #number of days we're investing\n dailyCap = self.principal/n_days \n principal, acctStock, cash = self.principal, 0, 0\n acctValue = principal\n snapshots, investments, cashStock = [], [], []\n for i in range(n_days):\n cash = dailyCap #get dailyCap in cash to spend for the day\n principal -= dailyCap \n stockPrice = self.stock.getDayPriceOpen(i)\n stockPriceClose = self.stock.getDayPriceClose(i)\n cash, acctStock, moneySpent = self.buyOrSell(pYields[i], cash, acctStock, stockPrice, alpha=a, beta=a)\n principal += (dailyCap-moneySpent) #return money not spent to principal\n acctValue = principal+acctStock*stockPriceClose #compute account value\n snapshots.append(acctValue)\n investments.append(moneySpent)\n cashStock.append((cash, acctStock*stockPriceClose))\n if self.debug:\n print(\"[debug] percent yield: %f\\n[debug] spent %f on %s at stock price %f\\n[debug] account value is now %f\" %(py, moneySpent, str(stock.testData.index[i]), stockPriceClose, acctValue))\n print(\"[debug] principal: %f dailyCap: %f cash available: %f stock owned: %f\" % (principal, dailyCap, cash, acctStock)) \n mod.addPerformance(a, snapshots)\n mod.addInvestments(a, investments)\n mod.addYield(a, 100*(acctValue-self.principal)/self.principal)\n mod.addCashStock(a, cashStock)\n print(\"[info] Investing $\" + str(self.principal) + \" in \" + self.stock.name + \" using \" + mod.name + ' with alpha=' +str(a)+ ' from ' + str(self.stock.startDate) + ' to ' + str(self.stock.endDate) + ' yielded %' + str(100*(acctValue-self.principal)/self.principal))\n print(\"[info] Total mean error: \" + str(mod.meanError))\n print(\"[info] Stock yield over timeframe: \" + str(self.stockYield))", "def run_gollum(self):\n\n self.report('Running Gollum calculation')\n\n ginputs = dict(self.ctx.gollum_inputs)\n\n # Get the remote folders of previous calculations\n remote_folder_le = self.ctx.workchain_leads.get_outputs_dict()['remote_folder']\n lepath = remote_folder_le.get_remote_path()\n lep = \"\\n 1 \" + lepath + \"/aiida.out\" + \"\\n 2 \" + lepath + \"/aiida.out\"\n\n remote_folder_em = self.ctx.workchain_extmol.get_outputs_dict()['remote_folder']\n empath = remote_folder_em.get_remote_path()\n emp = empath + \"/aiida.out\"\n\n ginputs['parameters'].update({\n 'SBlock Path_Leads': lep,\n 'Path_EM': emp,\n })\n\n gollum_inputs = {}\n gollum_inputs['code'] = ginputs['gollum_code']\n #gollum_inputs['parent_folder'] = remote_folder\n gollum_inputs['settings'] = ParameterData(dict=ginputs['settings'])\n gollum_inputs['parameters'] = ParameterData(dict=ginputs['parameters'])\n gollum_inputs['_options'] = ginputs['options']\n\n process = GollumCalculation.process()\n running = submit(process, **gollum_inputs)\n \n self.report('launching GollumCalculation<{}>'.format(running.pid))\n \n return ToContext(gollum_calc=running)", "def main():\n run_db, cal_db = \"runDB.json\", \"calDB.json\"\n\n par = argparse.ArgumentParser(description=\"pygama calibration suite\")\n arg, st, sf = par.add_argument, \"store_true\", \"store_false\"\n arg(\"-ds\", nargs='*', action=\"store\", help=\"load runs for a DS\")\n arg(\"-r\", \"--run\", nargs=1, help=\"load a single run\")\n arg(\"-s\", \"--spec\", action=st, help=\"print simple spectrum\")\n arg(\"-p1\", \"--pass1\", action=st, help=\"run pass-1 (linear) calibration\")\n arg(\"-p2\", \"--pass2\", action=st, help=\"run pass-2 (peakfit) calibration\")\n arg(\"-m\", \"--mode\", nargs=1, help=\"set pass-2 calibration mode\")\n arg(\"-e\", \"--etype\", nargs=1, help=\"custom energy param (default is e_ftp)\")\n arg(\"-t\", \"--test\", action=st, help=\"set verbose (testing) output\")\n arg(\"-db\", \"--writeDB\", action=st, help=\"store results in DB\")\n arg(\"-pr\", \"--printDB\", action=st, help=\"print calibration results in DB\")\n args = vars(par.parse_args())\n\n # -- standard method to declare the DataSet from cmd line --\n ds = pu.get_dataset_from_cmdline(args, \"runDB.json\", \"calDB.json\")\n \n # -- start calibration routines --\n etype = args[\"etype\"][0] if args[\"etype\"] else \"e_ftp\"\n\n if args[\"printDB\"]:\n show_calDB(cal_db) # print current DB status\n\n if args[\"spec\"]:\n show_spectrum(ds, etype) \n\n if args[\"pass1\"]:\n calibrate_pass1(ds, etype, args[\"writeDB\"], args[\"test\"])\n\n if args[\"pass2\"]:\n cal_mode = int(args[\"mode\"][0]) if args[\"mode\"] else 0\n calibrate_pass2(ds, cal_mode, args[\"writeDB\"])", "def algorithm(self):\n # init\n self.transfer = Transfer()\n # Read info from files written by PostJobs and bookkeeping from previous run.\n self.transfer.readInfo()\n self.rucioClient = self._initRucioClient(self.transfer.username, self.transfer.restProxyFile)\n # Get info what's already in Rucio containers\n self.transfer.readInfoFromRucio(self.rucioClient)\n self.crabRESTClient = self._initCrabRESTClient(\n self.transfer.restHost,\n self.transfer.restDBInstance,\n self.transfer.restProxyFile,\n )\n # build dataset\n BuildDBSDataset(self.transfer, self.rucioClient, self.crabRESTClient).execute()\n # do 1\n RegisterReplicas(self.transfer, self.rucioClient, self.crabRESTClient).execute()\n # do 2\n MonitorLockStatus(self.transfer, self.rucioClient, self.crabRESTClient).execute()", "def run(self):\n t_starting_run = time.time()\n if self.verbose:\n print(\"Started BayesOpt.run()\")\n\n self._initialise_bo_df()\n\n if self.starting_jobs is not None:\n for job in self.starting_jobs:\n self.interface.add_job_to_queue(job)\n\n for self.curr_bo_step in range(0, self.n_bo_steps):\n new_sample_x, new_sample_y = None, None\n # try:\n if True:\n t_beginning_of_bo_step = time.time()\n if self.verbose:\n print(\"**--** Starting BayesOpt iteration {}/{} **--**\"\n .format(self.curr_bo_step + 1, self.n_bo_steps))\n\n # Move time ahead until we have the correct number of free\n # workers\n self.interface.run_until_n_free(self.batch_size)\n n_free_workers = self.interface.status['n_free_workers']\n\n completed_jobs = self.interface.get_completed_jobs()\n if len(completed_jobs) > 0:\n new_sample_x, new_sample_y = \\\n self._add_completed_jobs_to_surrogate(completed_jobs)\n assert n_free_workers >= self.batch_size\n\n t_before_opt_surrogate = time.time()\n # if self.verbose:\n # print(f\"Surrogate n_data = {len(self.surrogate.X)}\")\n # if self.optimise_surrogate_model_flag:\n # if self.verbose > 1:\n # print(\"Optimising surrogate model...\")\n # self.surrogate.optimize()\n # self.param_array_hist.append(self.surrogate.param_array)\n # if self.verbose > 1:\n # print(\n # f\"Surrogate model optimisation complete. \"\n # f\"New param_array = {self.surrogate.param_array}\")\n self.optimize_surrogate_if_needed()\n\n t_after_opt_surrogate = time.time()\n\n t_before_find_y_min = time.time()\n self.x_min, self.y_min, self.var_at_y_min = self._get_y_min()\n t_after_find_y_min = t_before_get_next = time.time()\n if self.verbose:\n print(\"Selecting next point(s)...\")\n x_batch, acq_at_x_batch = self.get_next()\n t_after_get_next = t_end_of_bo_step = time.time()\n\n time_taken_opt_surrogate = \\\n (t_after_opt_surrogate - t_before_opt_surrogate)\n time_taken_find_y_min = \\\n (t_after_find_y_min - t_before_find_y_min)\n time_taken_get_next = \\\n (t_after_get_next - t_before_get_next)\n time_taken_bo_step = \\\n (t_end_of_bo_step - t_beginning_of_bo_step)\n\n time_taken_dict = {\n 'time_taken_opt_surrogate': time_taken_opt_surrogate,\n 'time_taken_find_y_min': time_taken_find_y_min,\n 'time_taken_get_next': time_taken_get_next,\n 'time_taken_bo_step': time_taken_bo_step, }\n\n if self.create_plots:\n self.plot_step(x_batch=x_batch)\n\n # queue the jobs\n jobs = []\n for ii in range(len(x_batch)):\n job = {'x': x_batch[ii], 'f': self.sampler}\n jobs.append(job)\n\n self.interface.add_job_to_queue(jobs)\n\n self.save_history(None)\n\n if self.curr_bo_step == self.n_bo_steps - 1: # last step\n if self.verbose > 1:\n print(\"Used up budget.\")\n print(\"Minimum at\",\n self.surrogate.X[np.argmin(self.surrogate.Y)])\n\n self._update_bo_df(x_batch, acq_at_x_batch, new_sample_x,\n new_sample_y, time_taken_dict)\n\n # Attempting to force SLURM to update the output file\n sys.stdout.flush()\n\n # except np.linalg.linalg.LinAlgError:\n # print(\"WARNING: BayesOpt crashed at iteration {}!\".format(\n # self.curr_bo_step))\n # break\n if self.verbose:\n print(\n f\"Completed BO exp in;\"\n f\" {round(time.time() - t_starting_run, 2)}s\")", "def run():\n # Initialize db\n connection = init_db()\n sql_helper.nuke_tables(connection)\n\n logger.info(\"Populating probabilistic database...\")\n number_of_elements = 1000000\n generator.run(connection, size=number_of_elements)\n logger.info(\"Populating complete!\")\n\n # Let the benchmark test the database\n benchmark_results = benchmark.runBenchmark(connection, logger)\n\n # Clear the database\n logger.info(\"Clearing the database...\")\n sql_helper.nuke_tables(connection)\n logger.info(\"Clear complete\")\n\n # Close the db connection.\n connection.close()\n logger.info(\"Database connection ended.\")\n\n # Save the results to a file\n date_time = datetime.now().strftime(\"%Y%m%d-%H%M\")\n export_results(results=benchmark_results, filename=\"{}_{}-elements_maybms-benchmark-result.csv\".format(date_time, number_of_elements))\n\n logger.info(\"Bye!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup is used for creating a new instance of epg
def setUp(self): self.epg = epg()
[ "def setUp(self):\n models.Dog.objects.create(\n name='George',\n image_filename='13.jpg',\n age=12,\n gender='m',\n size='xl',\n )", "def setUp(self):\n self.game = game.Game()", "def setup_method(self):\n self.g = Game()", "def setUp(self):\n self.exercise = Exercise.objects.create(exercise_name=\"soccer\")\n self.exercise2 = Exercise.objects.create(exercise_name=\"yoga\")", "def setUp(self):\n self.isa = ISA()", "def create_setup(self):\n raise NotImplementedError", "def setUp(self):\n self.test_checked_port = False\n fog_client.pt_setup_logger()\n self.old_environ = os.environ.copy()\n self.fog_instance = None", "def setUp(self):\n self.game = Game(\"Test Space Wars\")\n self.game.main_loop(testmode = True)", "def setUp(self):\n\t\tself.fisier, self.nume_fisier = mkstemp()\n\n\t\t#instanta clasei Anagrame careia ii testam din metode\n\t\tself.anagrame = Anagrame(self.nume_fisier)", "def setUp(self):\n\n self.ks = KeyStone(environ=None, default_role=\"user\", create_default_role=True, target_domain_name='elixir',\n cloud_admin=True)", "def setUp(self):\n self.dummy_device = \"Garmin00XT\"\n self.xdg_object = utilities.XDG(self.dummy_device)", "def setUp(self):\n SelTestBase.setUp(self)\n self.addDevice()", "def setUp(self):\r\n\r\n self.DUT = Allocation()", "def setUp(self):\n\n self.orm = GraphORM()", "def setup_environment():", "def setUp(self):\n \n # generate a test family\n sex = \"F\"\n mom_aff = \"1\"\n dad_aff = \"1\"\n \n self.trio = self.create_family(sex, mom_aff, dad_aff)\n \n # generate a test variant\n child = create_snv(sex, \"0/1\")\n mom = create_snv(\"F\", \"0/0\")\n dad = create_snv(\"M\", \"0/0\")\n \n var = TrioGenotypes(child.get_chrom(), child.get_position(),child, mom, dad)\n self.variants = [var]\n \n # make sure we've got known genes data\n self.known_gene = {\"inh\": [\"Monoallelic\"], \"confirmed_status\": [\"confirmed dd gene\"]}\n \n self.inh = Autosomal(self.variants, self.trio, self.known_gene, \"1001\")\n self.inh.is_lof = var.child.is_lof()", "def setup(self, abstraction) :\n pass", "def mainSetup():\n setupGlobals()\n setupCallbacks()", "def setUpClass(cls):\n tc_name = \"Create Instance\"\n inst_pre = 'qe_usage_cls_cafe'\n super(UsageTest, cls).setUpClass()\n cls.client = cls.dbaas_provider.client.reddwarfclient\n cls.mgmt_client = cls.dbaas_provider.mgmt_client.reddwarfclient\n cls.mgmt_client.authenticate()\n for i in range(cls.NUM_INSTS):\n inst_name = inst_pre + \"_\" + str(i + 1).zfill(2)\n cls.starttime_list.append(datetime.utcnow())\n testInstance = cls.client.instances.create(\n name=inst_name,\n flavor_id=2,\n volume={\"size\": 2},\n databases=[{\"name\": \"db_name\"}])\n httpCode = testutil.get_last_response_code(cls.client)\n if httpCode != '200':\n raise Exception(\"Create instance failed with code %s\" % httpCode)\n #print(\"Test Create active inst: %s\" % testInstance)\n cls.instance_id_list.append(testInstance.id)\n #print(\"inst id create: [%r] - %r\" % (i, cls.instance_id_list[i]))\n testutil.wait_for_all_active(cls.client, cls.instance_id_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if we are able to retrieve channels
def testRetrieveChannel(self): self.assert_(self.epg.channels())
[ "def get_channels():", "def is_channel_available():\n try:\n client = _get_slack_client()\n channel_name = _get_slack_channel_name()\n response = client.conversations_list(limit=sys.maxsize)\n if response.status_code == 200:\n for channel in response.data['channels']:\n if channel['name'] == channel_name:\n return True\n except (SlackConfigurationError, SlackApiError) as e:\n # if the environment variables are missing or the slack api failed to identify the channel\n logging.error(e)\n return False", "def test_get_channel(self):\n channel = api.get_channel(self.channel[\"id\"])\n self.assertEqual(channel.id, self.channel[\"id\"])\n self.assertEqual(channel.name, self.channel[\"name\"])", "def testRetrieveMovieChannels(self):\n self.assert_(self.epg.movieChannels())", "def test_no_channels(self):\n self.client.force_login(create_user())\n\n response = self.client.get(reverse('chat:channel-list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'No messages yet.')\n self.assertQuerysetEqual(response.context['object_list'], [])", "def check_channel(self, remote):\n remote_id = remote.id()\n for c in self.rpc.channels():\n channel = self.rpc.channel(c)\n if channel['nodeid'] == remote_id:\n return channel['state'] == 'NORMAL'\n return False", "def get_channels(self):\n if self._channels is None:\n log.warn(\"get_channels called before check_for_update succeeded!\")\n return self._channels", "def testRetrieveChannelIds(self):\n self.assert_(self.epg.channelIds())", "def channels():\n if not get_secrets():\n click.secho(\n \"・You need to be logged in to view all channels\",\n err=True,\n fg=\"red\",\n bold=True,\n )\n else:\n jwt, _ = get_secrets()\n headers = {\"Authorization\": f\"Bearer {jwt}\"}\n r = requests.get(f\"{URL}/channels\", headers=headers)\n if r.status_code > 200:\n click.secho(\n f\"・{r.json()['msg']}\",\n err=True,\n fg=\"red\",\n bold=True,\n )\n return\n\n # get all public channels available\n data = r.json()[\"data\"]\n if len(data) < 1:\n click.secho(\"・No channels available at the moment!\", fg=\"blue\", bold=True)\n else:\n click.secho(\"---------- AVAILABLE PUBLIC CHANNELS ----------\", bold=True)\n for _channel in data:\n click.secho(\n f'{_channel[\"name\"]}: {len(_channel[\"participants\"])} participant(s)',\n fg=\"blue\",\n bold=True,\n )", "def test_channel_without_membership(self):\n create_channel(create_user())\n\n self.client.force_login(create_user())\n\n response = self.client.get(reverse('chat:channel-list'))\n self.assertContains(response, 'No messages yet.')\n self.assertQuerysetEqual(response.context['object_list'], [])", "def hasAlphaChannels(*args, **kwargs):\n \n pass", "def test_get_channels_error(self):\n resp = {'ok': False, 'error': 'bad bad bad'}\n self.mock_sc.conversations_list.return_value = resp\n with self.assertRaises(SlackAPIError):\n self.bot.get_channels()", "def test_list_user_communication_channels(self):\r\n user_id = None # Change me!!\r\n\r\n r = self.client.list_user_communication_channels(user_id)", "def channelList(self):\n\n url = \"/channels.json\"\n parms = {'api_key': self.pluginPrefs.get('apiKey', '')}\n\n response, response_dict = self.sendToThingspeak('get', url, parms)\n\n if response == 200:\n write_key = \"\"\n indigo.server.log(u\"{0:<8}{1:<25}{2:^9}{3:<21}{4:^10}{5:<18}\".format('ID',\n 'Name',\n 'Public',\n 'Created At',\n 'Ranking',\n 'Write Key'\n )\n )\n indigo.server.log(u\"{0:{1}^100}\".format(\"\", \"=\"))\n for thing in response_dict:\n for key in thing['api_keys']:\n if key['write_flag']:\n write_key = key['api_key']\n indigo.server.log(u\"{0:<8}{1:<25}{2:^9}{3:<21}{4:^10}{5:<18}\".format(thing['id'],\n thing['name'],\n thing['public_flag'],\n thing['created_at'],\n thing['ranking'],\n write_key\n )\n )\n\n return True\n\n else:\n return False", "def _verify_scan_channels(self):\n if 'scan' not in self.properties:\n self.logger.error(\"'scan' not found in properties\")\n return\n if 'ao_channel' not in self.properties['scan'] or self.properties['scan']['ao_channel'] not in [1,2]:\n self.logger.error(\"'ao_channel' not found in properties or invalid value (should be 1 or 2)\")\n return\n if 'ai_channel' not in self.properties['scan'] or self.properties['scan']['ai_channel'] not in [1,2]:\n self.logger.error(\"'ai_channel' not found in properties or invalid value (should be 1 or 2)\")\n return\n return self.properties['scan']['ao_channel'], self.properties['scan']['ai_channel']", "async def check_controlled_channels(message):\n channel = message.channel\n id = message.server.id\n found = False\n for private_channel_pair in servers[id][\"created_channels\"]:\n user_channel = private_channel_pair.user_channel\n admin_channel = private_channel_pair.admin_channel\n if channel.id == user_channel.id:\n author = message.author.mention\n mirror_message = f\"{author}: {message.content}\"\n await bot.send_message(admin_channel, mirror_message)\n found = True\n if channel.id == admin_channel.id:\n if message.content.startswith(prefix):\n message_no_prefix = message.content.lstrip(prefix)\n if message_no_prefix.startswith(\"resolve\"):\n await bot.delete_channel(user_channel)\n await bot.delete_channel(admin_channel)\n else:\n admin_message = f\"Admins: {message_no_prefix}\"\n await bot.send_message(user_channel, admin_message)\n found = True\n return found", "def checkChannels(self):\n\t\t\n\t\tb=True\n\t\t\n\t\tfor i,exp in enumerate(self.exps):\n\t\t\tnames=exp.getChannelNames()\n\t\t\tnames.sort()\n\t\t\t\n\t\t\tif i>0:\n\t\t\t\tif names!=lastNames:\n\t\t\t\t\tprintWarning(exp.name+ \" has not the same channels than \"+ self.exps[i-1])\n\t\t\t\t\tb=False\t\n\t\t\t\t\t\n\t\t\tlastNames=list(names)\n\t\t\t\n\t\treturn b", "def test_channel_created(self):\n self.assertTrue(Channel.objects.get(name=self.channel))", "def is_open(channel):\n return channel.topic is None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if we are able to retrieve genre
def testRetrieveGenre(self): self.assert_(self.epg.genres())
[ "def populateGenre(self):\r\n \r\n data = showInformation.getJson(self.infourl)\r\n if \"genres\" in data:\r\n return data[\"genres\"]\r\n else:\r\n return False", "def test_user_genre(self, test_client, headers):\n response = test_client.get(\"/api/user/genre\", headers=headers)\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True", "def test_genre_creation(self):\n\t\tgenre = self.create_genre()\n\t\tself.assertTrue(isinstance(genre, Genre))", "def _set_genres(self):\r\n try:\r\n genres = self.page.find('div', itemprop='genre')\r\n if genres:\r\n genres = genres.findAll('a')\r\n if genres:\r\n for genre in genres:\r\n try:\r\n genre = genre.contents[0].strip()\r\n if len(genre) > 0:\r\n self.genres.append(genre)\r\n except KeyError:\r\n pass\r\n except Exception, e:\r\n raise IMDBException('Unable to retrieve genre(%s)(%s)' %\r\n (self.imdb_id, e))", "def get_movies_by_genre(self, genre):\r\n raise NotImplementedError", "def Get(genre):\r\n fields = 'name, genre, type, episodes, rating, members'\r\n query = f'SELECT {fields} FROM Anime WHERE genre LIKE \"%{genre}%\" '\r\n result = RunQuery(query)\r\n if result is None:\r\n raise Exception(f\"Genre not found\")\r\n return random.choice(result)", "def testRetrieveChannelsByGenre(self):\n self.assert_(self.epg.channelsByGenre())", "def test_mlgenre():\r\n # Set the tests up\r\n uri = 'x-rincon-playlist:RINCON_000E5884455C01400#A:GENRE/Acid'\r\n genre = data_structures.MLGenre(uri, TITLE, 'dummy.class')\r\n\r\n # Run tests on inherited methods and attributes\r\n content = {'uri': uri, 'title': TITLE, 'item_class': 'dummy.class'}\r\n common_tests('A:GENRE', 'A:GENRE/Acid', genre, content,\r\n GENRE_XML, GENRE_DICT)", "def GenreMenu(title):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title1=title)\n\n html = html_from_url(clean_url('/movies/genre.php?showC=27'))\n for m in media_list(html, '/movies', genre=True):\n oc.add(DirectoryObject(\n key=Callback(ShowCategory, title=m['title'], category='/movies', href=m['url']),\n title=m['title'],\n thumb=Callback(get_thumb, url=m['thumb'])\n ))\n\n if len(oc) != 0:\n return oc\n\n return MessageContainer('Warning', 'No Genre(s) Found')", "def get_genre(id_genre) -> dict:\n sql_request = sql_request_genre(id_genre)\n sql_data = get_data_from_db(sql_request)\n genre = create_genre(sql_data)\n return genre", "def test_subgenre_created(self):\n\t\tsubgenre = Genre.objects.get(name='test')\n\t\ttopgenre = Genre.objects.get(name='topgenre')\n\n\t\tself.assertIn(subgenre, topgenre.subgenre)", "def get_genres():\n response = requests.get(\"https://api.themoviedb.org/3/genre/movie/list?api_key=\" + tmdb_api_key + \"&language=en-US\")\n \n if response.status_code == 200:\n data = response.json()\n genres = []\n for genre in data['genres']:\n genres += [[str(genre.get(\"id\")), genre.get('name')]]\n return genres\n else:\n raise Exception('tmdb API gave status code {}'.format(response.status_code))", "def tmdb_movie_genres(lang):\n genres = None\n try:\n result = tmdbsimple.Genres().list(language=lang)\n genres = dict([(i['id'], i['name']) for i in result['genres']\n if i['name'] is not None])\n except:\n pass\n if genres:\n return genres\n else:\n mock = [{\n \"id\": 28,\n \"name\": \"Action\"\n }, {\n \"id\": 12,\n \"name\": \"Adventure\"\n }, {\n \"id\": 16,\n \"name\": \"Animation\"\n }, {\n \"id\": 35,\n \"name\": \"Comedy\"\n }, {\n \"id\": 80,\n \"name\": \"Crime\"\n }, {\n \"id\": 99,\n \"name\": \"Documentary\"\n }, {\n \"id\": 18,\n \"name\": \"Drama\"\n }, {\n \"id\": 10751,\n \"name\": \"Family\"\n }, {\n \"id\": 14,\n \"name\": \"Fantasy\"\n }, {\n \"id\": 10769,\n \"name\": \"Foreign\"\n }, {\n \"id\": 36,\n \"name\": \"History\"\n }, {\n \"id\": 27,\n \"name\": \"Horror\"\n }, {\n \"id\": 10402,\n \"name\": \"Music\"\n }, {\n \"id\": 9648,\n \"name\": \"Mystery\"\n }, {\n \"id\": 10749,\n \"name\": \"Romance\"\n }, {\n \"id\": 878,\n \"name\": \"Science Fiction\"\n }, {\n \"id\": 10770,\n \"name\": \"TV Movie\"\n }, {\n \"id\": 53,\n \"name\": \"Thriller\"\n }, {\n \"id\": 10752,\n \"name\": \"War\"\n }, {\n \"id\": 37,\n \"name\": \"Western\"\n }]\n return dict([(i['id'], i['name'], i['properties']) for i in mock])", "def test_genre_str(self):\n genre = models.Genre.objects.create(\n user=sample_user(),\n name='Action'\n )\n\n self.assertEqual(str(genre), genre.name)", "def find_by_genre():\n os.system('clear')\n music_list = music()\n genre = input(\"Enter the genre of the music: \")\n print(\"%s: \" % genre)\n occurrence = 0\n for item in music_list:\n if item[1][1] == genre:\n print(\"%s - %s\" % (item[0][0], item[0][1]))\n occurrence = 1\n if occurrence == 0:\n print(\"there is no album from this genre on this music list.\")\n print(\"\\nPress enter to continue\")\n input()\n os.system('clear')", "def set_genre(self, genre=UNKNOWN_GENRE):\n self.genre = genre", "def test_user_genre_id(self, test_client, headers, genre_test1):\n\n response = test_client.put(\n \"/api/user/genre/\"+str(genre_test1.genre_id), headers=headers)\n res = json.loads(response.data)\n\n assert response.status_code == 201\n assert res['status'] == True", "def get_genres():\n \n return Genre.query.order_by('genre_name').all()", "def test_user_genre_fake_jwt(self, test_client, headers_fake):\n response = test_client.get(\"/api/user/genre\", headers=headers_fake)\n res = json.loads(response.data)\n\n assert response.status_code == 404\n assert res['status'] == False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if we are able to retrieve channels by genre
def testRetrieveChannelsByGenre(self): self.assert_(self.epg.channelsByGenre())
[ "def testRetrieveGenre(self):\n self.assert_(self.epg.genres())", "def testRetrieveMovieChannels(self):\n self.assert_(self.epg.movieChannels())", "def testRetrieveChannel(self):\n self.assert_(self.epg.channels())", "def get_movies_by_genre(self, genre):\r\n raise NotImplementedError", "def populateGenre(self):\r\n \r\n data = showInformation.getJson(self.infourl)\r\n if \"genres\" in data:\r\n return data[\"genres\"]\r\n else:\r\n return False", "def Get(genre):\r\n fields = 'name, genre, type, episodes, rating, members'\r\n query = f'SELECT {fields} FROM Anime WHERE genre LIKE \"%{genre}%\" '\r\n result = RunQuery(query)\r\n if result is None:\r\n raise Exception(f\"Genre not found\")\r\n return random.choice(result)", "def test_user_genre(self, test_client, headers):\n response = test_client.get(\"/api/user/genre\", headers=headers)\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True", "def testRetrieveChannelIds(self):\n self.assert_(self.epg.channelIds())", "def test_get_channel(self):\n channel = api.get_channel(self.channel[\"id\"])\n self.assertEqual(channel.id, self.channel[\"id\"])\n self.assertEqual(channel.name, self.channel[\"name\"])", "def find_by_genre():\n os.system('clear')\n music_list = music()\n genre = input(\"Enter the genre of the music: \")\n print(\"%s: \" % genre)\n occurrence = 0\n for item in music_list:\n if item[1][1] == genre:\n print(\"%s - %s\" % (item[0][0], item[0][1]))\n occurrence = 1\n if occurrence == 0:\n print(\"there is no album from this genre on this music list.\")\n print(\"\\nPress enter to continue\")\n input()\n os.system('clear')", "def GenreMenu(title):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title1=title)\n\n html = html_from_url(clean_url('/movies/genre.php?showC=27'))\n for m in media_list(html, '/movies', genre=True):\n oc.add(DirectoryObject(\n key=Callback(ShowCategory, title=m['title'], category='/movies', href=m['url']),\n title=m['title'],\n thumb=Callback(get_thumb, url=m['thumb'])\n ))\n\n if len(oc) != 0:\n return oc\n\n return MessageContainer('Warning', 'No Genre(s) Found')", "def get_movies_from_genre(movie_data, genre):\r\n return np.where(movie_data[genre].values==1)[0]", "def test_mlgenre():\r\n # Set the tests up\r\n uri = 'x-rincon-playlist:RINCON_000E5884455C01400#A:GENRE/Acid'\r\n genre = data_structures.MLGenre(uri, TITLE, 'dummy.class')\r\n\r\n # Run tests on inherited methods and attributes\r\n content = {'uri': uri, 'title': TITLE, 'item_class': 'dummy.class'}\r\n common_tests('A:GENRE', 'A:GENRE/Acid', genre, content,\r\n GENRE_XML, GENRE_DICT)", "def filter_by_genre(self, genre: str) -> 'MusicFileSet':\n subset = MusicFileSet()\n subset.collection = [item for item in self.collection if item.genre == genre]\n return subset", "def test_subgenre_created(self):\n\t\tsubgenre = Genre.objects.get(name='test')\n\t\ttopgenre = Genre.objects.get(name='topgenre')\n\n\t\tself.assertIn(subgenre, topgenre.subgenre)", "def random_by_genre():\n os.system('clear')\n music_list = music()\n genre = input(\"Enter the genre of the music: \")\n print(\"%s album:\" % genre)\n genre_list = []\n for item in music_list:\n if item[1][1].lower() == genre.lower():\n genre_list.append(item)\n if len(genre_list) > 0:\n album = random.choice(genre_list)\n print(\"%s - %s\" % (album[0][0], album[0][1]))\n else:\n print(\"there is no %s album on this music list.\" % genre)\n print(\"\\nPress enter to continue\")\n input()\n os.system('clear')", "def get_movies_by_genre(self, genre) -> List[Movie]:\n raise NotImplementedError", "def moviesInThisGenre(genre):\n data = movies.find({\"genres\": {\"$in\": [genre] } })\n for movie in data:\n for key, value in movie.items():\n if key == \"title\":\n print(\"{title: %s}\" % value)", "def test_should_get_a_channel_by_id(self):\n\n response = self.client.get(\n '/api/v3/channel/1/',\n content_type='application/json',\n HTTP_AUTHORIZATION=self.auth)\n\n self.assertEqual(200, response.status_code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if we are able to retrieve movie channels (by genre name)
def testRetrieveMovieChannels(self): self.assert_(self.epg.movieChannels())
[ "def testRetrieveChannelsByGenre(self):\n self.assert_(self.epg.channelsByGenre())", "def testRetrieveGenre(self):\n self.assert_(self.epg.genres())", "def testRetrieveChannel(self):\n self.assert_(self.epg.channels())", "def get_movies_by_genre(self, genre):\r\n raise NotImplementedError", "def test_get_channel(self):\n channel = api.get_channel(self.channel[\"id\"])\n self.assertEqual(channel.id, self.channel[\"id\"])\n self.assertEqual(channel.name, self.channel[\"name\"])", "def GenreMenu(title):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title1=title)\n\n html = html_from_url(clean_url('/movies/genre.php?showC=27'))\n for m in media_list(html, '/movies', genre=True):\n oc.add(DirectoryObject(\n key=Callback(ShowCategory, title=m['title'], category='/movies', href=m['url']),\n title=m['title'],\n thumb=Callback(get_thumb, url=m['thumb'])\n ))\n\n if len(oc) != 0:\n return oc\n\n return MessageContainer('Warning', 'No Genre(s) Found')", "def test_user_genre(self, test_client, headers):\n response = test_client.get(\"/api/user/genre\", headers=headers)\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True", "def testRetrieveChannelIds(self):\n self.assert_(self.epg.channelIds())", "def populateGenre(self):\r\n \r\n data = showInformation.getJson(self.infourl)\r\n if \"genres\" in data:\r\n return data[\"genres\"]\r\n else:\r\n return False", "def Get(genre):\r\n fields = 'name, genre, type, episodes, rating, members'\r\n query = f'SELECT {fields} FROM Anime WHERE genre LIKE \"%{genre}%\" '\r\n result = RunQuery(query)\r\n if result is None:\r\n raise Exception(f\"Genre not found\")\r\n return random.choice(result)", "def find_by_genre():\n os.system('clear')\n music_list = music()\n genre = input(\"Enter the genre of the music: \")\n print(\"%s: \" % genre)\n occurrence = 0\n for item in music_list:\n if item[1][1] == genre:\n print(\"%s - %s\" % (item[0][0], item[0][1]))\n occurrence = 1\n if occurrence == 0:\n print(\"there is no album from this genre on this music list.\")\n print(\"\\nPress enter to continue\")\n input()\n os.system('clear')", "def get_channels_from_title(video: Video, cache_only=False) -> set:\n\n def find_channel_by_title(results, titles):\n # Separate method to allow return from nested loop\n for result in results:\n try:\n guest = Channel.from_id(result.id)\n for title_fragment in titles:\n if guest.title == title_fragment:\n return guest\n except IntegrityError:\n current_session.rollback()\n\n channels = set()\n\n for title in video.get_collaborators_from_title():\n guest = None\n possible_titles = []\n\n # Build the list of titles in reverse size order\n # eg: \"Halocene ft.\" becomes [\"Halocene ft.\", \"Halocene\"]\n title_words = title.split()\n for idx, word in enumerate(title_words):\n possible_titles.append(' '.join(title_words[:idx + 1]))\n possible_titles.reverse()\n\n for possible_title in possible_titles:\n if guest := Channel.from_title(possible_title):\n break\n\n if not guest:\n try:\n if search_results := SearchResult.from_term(\"|\".join(possible_titles), cache_only=cache_only):\n guest = find_channel_by_title(search_results, possible_titles)\n except HTTPError as err:\n logger.error(f\"Processing search term '{possible_titles}' for video '{video}' - '{err}'\")\n\n if guest:\n channels.update([guest])\n elif not cache_only:\n logger.error(f\"Processing channel name '{title}' from title of '{video}' failed\")\n\n return channels", "def get_channels():", "def get_movie_genre(monthb,monthl,genrenum):\n data = requests.get('https://api.themoviedb.org/3/discover/movie?api_key='+ TMDB_KEY +\n '&primary_release_date.gte='+ monthb + '&primary_release_date.lte=' \n + monthl +'&with_genres='+ str(genrenum)).json()['total_results']\n return data", "def get_genres():\n response = requests.get(\"https://api.themoviedb.org/3/genre/movie/list?api_key=\" + tmdb_api_key + \"&language=en-US\")\n \n if response.status_code == 200:\n data = response.json()\n genres = []\n for genre in data['genres']:\n genres += [[str(genre.get(\"id\")), genre.get('name')]]\n return genres\n else:\n raise Exception('tmdb API gave status code {}'.format(response.status_code))", "def moviesInThisGenre(genre):\n data = movies.find({\"genres\": {\"$in\": [genre] } })\n for movie in data:\n for key, value in movie.items():\n if key == \"title\":\n print(\"{title: %s}\" % value)", "def get_movies_from_genre(movie_data, genre):\r\n return np.where(movie_data[genre].values==1)[0]", "def get_movies_by_genre(self, genre) -> List[Movie]:\n raise NotImplementedError", "def test_mlgenre():\r\n # Set the tests up\r\n uri = 'x-rincon-playlist:RINCON_000E5884455C01400#A:GENRE/Acid'\r\n genre = data_structures.MLGenre(uri, TITLE, 'dummy.class')\r\n\r\n # Run tests on inherited methods and attributes\r\n content = {'uri': uri, 'title': TITLE, 'item_class': 'dummy.class'}\r\n common_tests('A:GENRE', 'A:GENRE/Acid', genre, content,\r\n GENRE_XML, GENRE_DICT)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if we are able to retrieve channel ids
def testRetrieveChannelIds(self): self.assert_(self.epg.channelIds())
[ "def testRetrieveChannel(self):\n self.assert_(self.epg.channels())", "def test_should_not_get_a_channel_by_id(self):\n\n response = self.client.get(\n '/api/v3/channel/0/',\n content_type='application/json',\n HTTP_AUTHORIZATION=self.auth)\n\n self.assertEqual(500, response.status_code)", "def test_should_get_a_channel_by_id(self):\n\n response = self.client.get(\n '/api/v3/channel/1/',\n content_type='application/json',\n HTTP_AUTHORIZATION=self.auth)\n\n self.assertEqual(200, response.status_code)", "def test_get_channel_users(self):\n ids = [\"U12314\", \"U42839\", \"U31055\"]\n self.mock_sc.conversations_members.return_value = {'ok': True,\n 'members': [\n \"U12314\",\n \"U42839\",\n \"U31055\"\n ]}\n assert self.bot.get_channel_users(\"C1234441\") == ids\n self.mock_sc.conversations_members.assert_called_with(\n channel=\"C1234441\"\n )", "def test_get_channel(self):\n channel = api.get_channel(self.channel[\"id\"])\n self.assertEqual(channel.id, self.channel[\"id\"])\n self.assertEqual(channel.name, self.channel[\"name\"])", "def test_list_user_communication_channels(self):\r\n user_id = None # Change me!!\r\n\r\n r = self.client.list_user_communication_channels(user_id)", "def test_no_channels(self):\n self.client.force_login(create_user())\n\n response = self.client.get(reverse('chat:channel-list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'No messages yet.')\n self.assertQuerysetEqual(response.context['object_list'], [])", "def check_channel(self, remote):\n remote_id = remote.id()\n for c in self.rpc.channels():\n channel = self.rpc.channel(c)\n if channel['nodeid'] == remote_id:\n return channel['state'] == 'NORMAL'\n return False", "def is_channel_available():\n try:\n client = _get_slack_client()\n channel_name = _get_slack_channel_name()\n response = client.conversations_list(limit=sys.maxsize)\n if response.status_code == 200:\n for channel in response.data['channels']:\n if channel['name'] == channel_name:\n return True\n except (SlackConfigurationError, SlackApiError) as e:\n # if the environment variables are missing or the slack api failed to identify the channel\n logging.error(e)\n return False", "async def check_controlled_channels(message):\n channel = message.channel\n id = message.server.id\n found = False\n for private_channel_pair in servers[id][\"created_channels\"]:\n user_channel = private_channel_pair.user_channel\n admin_channel = private_channel_pair.admin_channel\n if channel.id == user_channel.id:\n author = message.author.mention\n mirror_message = f\"{author}: {message.content}\"\n await bot.send_message(admin_channel, mirror_message)\n found = True\n if channel.id == admin_channel.id:\n if message.content.startswith(prefix):\n message_no_prefix = message.content.lstrip(prefix)\n if message_no_prefix.startswith(\"resolve\"):\n await bot.delete_channel(user_channel)\n await bot.delete_channel(admin_channel)\n else:\n admin_message = f\"Admins: {message_no_prefix}\"\n await bot.send_message(user_channel, admin_message)\n found = True\n return found", "def testRetrieveMovieChannels(self):\n self.assert_(self.epg.movieChannels())", "def test_getChannelMessages(channel_id: str = CHANNEL_ID) -> json:\r\n # Action\r\n # status, result = u.getChannelMessages(\"B4EF14CFE2782C1E94E82631F9B782E2\")\r\n status, result = u.getChannelMessages(channel_id)\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)", "def test_channel_without_membership(self):\n create_channel(create_user())\n\n self.client.force_login(create_user())\n\n response = self.client.get(reverse('chat:channel-list'))\n self.assertContains(response, 'No messages yet.')\n self.assertQuerysetEqual(response.context['object_list'], [])", "def get_channels():", "def test_channel_created(self):\n self.assertTrue(Channel.objects.get(name=self.channel))", "def testRetrieveChannelsByGenre(self):\n self.assert_(self.epg.channelsByGenre())", "def test__Channel__iter_channels():\n channel_id_0 = 202304130070\n channel_id_1 = 202304130071\n channel_id_2 = 202304130072\n guild_id = 202304130073\n \n guild = Guild.precreate(guild_id)\n channel = Channel.precreate(channel_id_0, channel_type = ChannelType.guild_category, guild_id = guild_id)\n channel_0 = Channel.precreate(\n channel_id_1, channel_type = ChannelType.guild_text, parent_id = channel_id_0, guild_id = guild_id\n )\n channel_1 = Channel.precreate(\n channel_id_2, channel_type = ChannelType.guild_text, parent_id = channel_id_0, guild_id = guild_id\n )\n \n guild.channels[channel_id_0] = channel\n guild.channels[channel_id_1] = channel_0\n guild.channels[channel_id_2] = channel_1\n \n vampytest.assert_eq({*channel.iter_channels()}, {channel_0, channel_1})", "async def checkChannels(self): # pylint: disable=too-many-branches,too-many-statements\n while self == self.bot.get_cog(\"TempChannels\"):\n await asyncio.sleep(SLEEP_TIME)\n # Create/maintain the channel during a valid time and duration, else\n # delete it.\n for guild in self.bot.guilds:\n async with self.config.guild(guild).all() as guildData:\n try:\n if not guildData[KEY_ENABLED]:\n continue\n\n if (\n int(time.strftime(\"%H\")) == guildData[KEY_START_HOUR]\n and int(time.strftime(\"%M\")) == guildData[KEY_START_MIN]\n and not guildData[KEY_CH_CREATED]\n and not guildData[KEY_CH_ID]\n ):\n # See if ALL of the following is satisfied.\n # - It is the starting time.\n # - The channel creation flag is not set.\n # - The channel ID doesn't exist.\n #\n # If it is satisfied, let's create a channel, and then\n # store the following in the settings:\n # - Channel ID.\n # - Time to delete channel.\n # Start with permissions\n\n # Always allow the bot to read.\n permsDict = {self.bot.user: PERMS_READ_Y}\n\n if guildData[KEY_ROLE_ALLOW]:\n # If we have allow roles, automatically deny @everyone the \"Read\n # Messages\" permission.\n permsDict[guild.default_role] = PERMS_READ_N\n for roleId in guildData[KEY_ROLE_ALLOW]:\n role = discord.utils.get(guild.roles, id=roleId)\n self.logger.debug(\"Allowed role %s\", role)\n if role:\n permsDict[role] = deepcopy(PERMS_READ_Y)\n\n # Check for deny permissions.\n if guildData[KEY_ROLE_DENY]:\n for roleId in guildData[KEY_ROLE_DENY]:\n role = discord.utils.get(guild.roles, id=roleId)\n self.logger.debug(\"Denied role %s\", role)\n if role and role not in permsDict.keys():\n self.logger.debug(\"Role not in dict, adding\")\n permsDict[role] = deepcopy(PERMS_SEND_N)\n elif role:\n self.logger.debug(\"Updating role\")\n permsDict[role].update(send_messages=False)\n\n self.logger.debug(\"Current permission overrides: \\n%s\", permsDict)\n\n # Grab parent category. If not set, this will return None anyways.\n category = None\n if guildData[KEY_CH_CATEGORY]:\n category = discord.utils.get(\n guild.channels, id=guildData[KEY_CH_CATEGORY]\n )\n\n chanObj = await guild.create_text_channel(\n guildData[KEY_CH_NAME],\n overwrites=permsDict,\n category=category,\n position=guildData[KEY_CH_POS],\n topic=guildData[KEY_CH_TOPIC],\n nsfw=guildData[KEY_NSFW],\n )\n self.logger.info(\n \"Channel #%s (%s) in %s (%s) was created.\",\n chanObj.name,\n chanObj.id,\n guild.name,\n guild.id,\n )\n guildData[KEY_CH_ID] = chanObj.id\n\n # Set delete times, and save settings.\n duration = (\n guildData[KEY_DURATION_HOURS] * 60 * 60\n + guildData[KEY_DURATION_MINS] * 60\n )\n guildData[KEY_STOP_TIME] = time.time() + duration\n guildData[KEY_CH_CREATED] = True\n\n elif guildData[KEY_CH_CREATED]:\n # Channel created, see when we should delete it.\n if time.time() >= guildData[KEY_STOP_TIME]:\n self.logger.debug(\n \"Past channel stop time, clearing ID \" \"and created keys.\"\n )\n chanObj = guild.get_channel(guildData[KEY_CH_ID])\n guildData[KEY_CH_ID] = None\n guildData[KEY_CH_CREATED] = False\n\n if chanObj and guildData[KEY_ARCHIVE]:\n await chanObj.set_permissions(\n guild.default_role, overwrite=PERMS_READ_N\n )\n for role in guild.roles:\n if role == guild.default_role:\n continue\n await chanObj.set_permissions(\n role, overwrite=None, reason=\"Archiving tempchannel\"\n )\n currentDate = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n await chanObj.edit(name=f\"tc-{currentDate}\")\n self.logger.info(\n \"Channel #%s (%s) in %s (%s) was archived.\",\n chanObj.name,\n chanObj.id,\n guild.name,\n guild.id,\n )\n elif chanObj and not guildData[KEY_ARCHIVE]:\n await chanObj.delete()\n\n self.logger.info(\n \"Channel #%s (%s) in %s (%s) was deleted.\",\n chanObj.name,\n chanObj.id,\n guild.name,\n guild.id,\n )\n except Exception: # pylint: disable=broad-except\n self.logger.error(\n \"Something went terribly wrong for server %s (%s)!\",\n guild.name,\n guild.id,\n exc_info=True,\n )", "def test_getChannelModerators(channel_id: str = CHANNEL_ID) -> json:\r\n\r\n # Action\r\n status, result = u.getChannelModerators(channel_id)\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace tokens by formatted data from appointment or contact returns messages dicts with keys sms, mail, chat and mail_subject containing the text with replaced variable
def _replace_tags_in_text(self, notification, contact, appointment): messages = { "sms": notification.sms_message, "mail": notification.mail_message, "chat": notification.chat_message, "mail_subject": notification.mail_subject, } if appointment.slot is not None: public_description = appointment.slot.public_description else: public_description = None if appointment.advisor is not None: advisor = appointment.advisor.full_name else: advisor = "" tokens = { "{contact}": contact.full_name if contact else "", # NOQA: FS003 "{date_rdv}": format_datetime_interval( # NOQA: FS003 appointment.start_date, appointment.end_date ), "{conseiller}": advisor, # NOQA: FS003 "{structure}": appointment.group.name or "", # NOQA: FS003 "{lieu}": f"{self._format_place(appointment.place)}" # NOQA: FS003 if appointment.place else "", "{tel_structure}": f"{appointment.group.phone}" # NOQA: FS003 if appointment.group else "", "{tel_lieu}": f"{appointment.place.phone}" # NOQA: FS003 if appointment.place else "", # NOQA: FS003 "{texte_lieu}": appointment.place.presentation or "" # NOQA: FS003 if appointment.place else "", "{texte_motif}": appointment.reason.description or "" # NOQA: FS003 if appointment.reason else "", "{texte_creneau}": public_description or "", # NOQA: FS003 "{jitsi}": f"<a href=\"https://meet.jit.si/{contact.email.replace('@', '-')}\">https://meet.jit.si/{contact.email.replace('@', '-')}</a>" # NOQA: FS003,E501 if contact else "", # NOQA: FS003, E501 } for media in messages: for token in tokens: try: messages[media] = ( messages[media].replace(token, tokens[token]).strip() ) except AttributeError: pass # we may want to strip for sms : messages["sms"] = messages["sms"][:160] if messages["mail"] is not None: messages["mail_ascii"] = self._strip_html_tags(messages["mail"]) else: messages["mail_ascii"] = "" return messages
[ "def replace_ids(dialogue):\n movie_titles = dialogue[\"movieMentions\"]\n for message in dialogue[\"messages\"]:\n text = message[\"text\"]\n replaced = []\n for word in text.split():\n if word[0] == \"@\" and re.sub('\\\\D', '', word) in movie_titles:\n movie_id = re.sub('\\\\D', '', word)\n replaced.append(\"@ \" + movie_titles[movie_id] + \" @\")\n else:\n replaced.append(word)\n message[\"text\"] = \" \".join(replaced)", "def regex_string(self):\r\n import re\r\n import datetime\r\n\r\n print(\"Read the following message: \")\r\n message_string = \"Hello <<name>>, We have your full name as <<full name>> in our system. \\n\" \\\r\n \"Your contact number is 91-xxxxxxxxxx. \\n\" \\\r\n \"Please,let us know in case of any clarification Thank you BridgeLabz 01/01/2016.\"\r\n print(message_string)\r\n\r\n first_name = (input(\"Enter first name: \")).capitalize()\r\n full_name = (input(\"Enter full name: \")).title()\r\n mobile_number = input(\"Enter mobile number: \")\r\n\r\n year = int(input('Enter a year: '))\r\n month = int(input('Enter a month: '))\r\n day = int(input('Enter a day: '))\r\n # Converting the year, month, day into a date format\r\n date_format = datetime.date(year, month, day)\r\n date = str(date_format)\r\n # Replacing the necessary values using sub function from the re module\r\n mod_message = re.sub(\"<<name>>\", first_name, message_string)\r\n mod_message1 = re.sub(\"<<full name>>\", full_name, mod_message)\r\n mod_message2 = re.sub(\"xxxxxxxxxx\", mobile_number, mod_message1)\r\n mod_message3 = re.sub(\"01/01/2016\", date, mod_message2)\r\n\r\n print('\\n')\r\n print(mod_message3)", "def _parse(self, msg_dict):\n error_present = False\n # log.debug('Matching the message:')\n # log.debug(msg_dict)\n for message in self.compiled_messages:\n # log.debug('Matching using:')\n # log.debug(message)\n match_on = message[\"match_on\"]\n if match_on not in msg_dict:\n # log.debug('%s is not a valid key in the partially parsed dict', match_on)\n continue\n if message[\"tag\"] != msg_dict[match_on]:\n continue\n if \"__python_fun__\" in message:\n return {\n \"model\": message[\"model\"],\n \"error\": message[\"error\"],\n \"__python_fun__\": message[\"__python_fun__\"],\n }\n error_present = True\n match = message[\"line\"].search(msg_dict[\"message\"])\n if not match:\n continue\n positions = message.get(\"positions\", {})\n values = message.get(\"values\")\n ret = {\n \"model\": message[\"model\"],\n \"mapping\": message[\"mapping\"],\n \"replace\": message[\"replace\"],\n \"error\": message[\"error\"],\n \"_state\": message[\"state\"],\n \"_state_tag\": message[\"state_tag\"],\n }\n for key in values.keys():\n # Check if the value needs to be replaced\n if key in message[\"replace\"]:\n result = napalm_logs.utils.cast(\n match.group(positions.get(key)), message[\"replace\"][key]\n )\n else:\n result = match.group(positions.get(key))\n ret[key] = result\n return ret\n if error_present is True:\n log.info(\n \"Configured regex did not match for os: %s tag %s\",\n self._name,\n msg_dict.get(\"tag\", \"\"),\n )\n else:\n log.info(\n \"Syslog message not configured for os: %s tag %s\",\n self._name,\n msg_dict.get(\"tag\", \"\"),\n )", "def clean_content(self):\n\n transformations = {\n re.escape('<#{0.id}>'.format(channel)): '#' + channel.name\n for channel in self.channel_mentions\n }\n\n mention_transforms = {\n re.escape('<@{0.id}>'.format(member)): '@' + member.display_name\n for member in self.mentions\n }\n\n # add the <@!user_id> cases as well..\n second_mention_transforms = {\n re.escape('<@!{0.id}>'.format(member)): '@' + member.display_name\n for member in self.mentions\n }\n\n transformations.update(mention_transforms)\n transformations.update(second_mention_transforms)\n\n if self.server is not None:\n role_transforms = {\n re.escape('<@&{0.id}>'.format(role)): '@' + role.name\n for role in self.role_mentions\n }\n transformations.update(role_transforms)\n\n def repl(obj):\n return transformations.get(re.escape(obj.group(0)), '')\n\n pattern = re.compile('|'.join(transformations.keys()))\n result = pattern.sub(repl, self.content)\n\n transformations = {\n '@everyone': '@\\u200beveryone',\n '@here': '@\\u200bhere'\n }\n\n def repl2(obj):\n return transformations.get(obj.group(0), '')\n\n pattern = re.compile('|'.join(transformations.keys()))\n return pattern.sub(repl2, result)", "def pt_txt_replace (firstname, surname, pt_txt,\n redact_message_fname = 'XXXfirstnameXXX',\n redact_message_sname = 'XXXsurnameXXX'):\n\n pt_txt_fnamefilter = pt_txt.replace(firstname, redact_message_fname)\n pt_txt_sfnamefilter = pt_txt_fnamefilter.replace(surname, redact_message_sname)\n \n names = [firstname, surname]\n \n return pt_txt_sfnamefilter, names", "def replace_words(message, words):\n for item in words:\n message = message.replace(item['key'], item['value'])\n\n return message", "def create_message_content(\n html_template_ref,\n txt_template_ref,\n tables,\n receivers,\n template_base_data,\n subject_data,\n attachment=None,\n cc=None\n):\n return {\n 'html_template_ref': html_template_ref,\n 'txt_template_ref': txt_template_ref,\n 'tables': tables,\n 'receivers': receivers,\n 'template_base_data': template_base_data,\n 'subject_data': subject_data,\n 'attachment': attachment,\n 'cc': cc,\n }", "def get_formatted_messages(self, formats: Sequence[str], label: str, context: dict) -> Dict[str, str]:\n format_templates = {}\n for fmt in formats:\n format_templates[fmt] = render_to_string((\n \"campaigns/notifications/{0}/{1}\".format(label, fmt),\n \"campaigns/notifications/{0}\".format(fmt)), context)\n return format_templates", "def replace_emails(text, replace_with=\"<EMAIL>\"):\n result = re.sub(EMAIL_REGEX, replace_with, text)\n return result", "def process(string_in):\n email_contents = string_in.lower()\n\n email_contents = remove_header(email_contents)\n\n email_contents = strip_html(email_contents)\n\n # newline fix\n email_contents = email_contents.replace(\"=\\n\", \"\")\n\n # Http addresses corrected\n # Replace occurrence of \"http\" or \"https\" + \"://\" + zero or all following (*) non-white-space characters [^\\s]\n # to \"httpaddr\"\n email_contents = re.sub('(http|https)://[^\\\\s]*', ' httpaddr ', email_contents)\n\n # Email addresses corrected\n # Replace occurrence of at least one (+) non-white-space character [^\\s] + \"@\" +\n # at least one (+) non-white-space character [^\\s], into \"emailaddr\"\n email_contents = re.sub('[^\\\\s]+@[^\\\\s]+', ' emailaddr ', email_contents)\n\n # Dollar sign correction\n email_contents = re.sub('[$]+', ' dollar ', email_contents)\n\n # Number correction\n email_contents = re.sub('[0-9]+', ' number ', email_contents)\n\n # Replace symbols which are not a-z or non-white-space character \\s with \" \"\n email_contents = re.sub('[^a-z\\\\s]', ' ', email_contents)\n\n # Create list of stemmed words using Porter Stemmer\n email_words = []\n ps = PorterStemmer()\n for word in re.split('[\\\\s]', email_contents):\n if len(word):\n email_words.append(ps.stem(word))\n return \"\\n\".join(email_words)", "def _parse_body(self, msg):\n # get the msg\n parts = msg.get_payload()\n msg = parts[-1]\n raw = msg.get_payload().strip()\n\n if self.regex:\n if isinstance(self.regex, re._pattern_type):\n m = self.regex.search(raw)\n if not m:\n raise ValueError('Bad regex!')\n body = m.groupdict()\n self._check_required(body)\n return body\n\n else:\n raise ValueError('Regex failed to compile!')\n\n else:\n return {'raw':raw}", "def _prepare_msg(\n subject, txt_template, html_template, context, to_emails,\n from_email=settings.NOTIFY_FROM_EMAIL):\n\n context = Context(context)\n txt = get_template(txt_template).render(context)\n html = get_template(html_template).render(context)\n\n msg = EmailMultiAlternatives(\n subject, txt, from_email, to_emails)\n msg.attach_alternative(html, \"text/html\")\n return msg", "def teams_msg(context):\n facts = []\n if context.get('WHAT', None) == \"SERVICE\":\n state = context[\"SERVICESTATE\"]\n color = COLORS.get(state)\n subtitle = \"Service Notification\"\n facts.append({\"name\": \"Service:\", \"value\": context[\"SERVICEDESC\"]})\n output = context[\"SERVICEOUTPUT\"] if context[\"SERVICEOUTPUT\"] else \"\"\n else:\n state = context[\"HOSTSTATE\"]\n color = COLORS.get(state)\n subtitle = \"Host Notification\"\n output = context[\"HOSTOUTPUT\"] if context[\"HOSTOUTPUT\"] else \"\"\n\n facts.extend([\n {\n \"name\": \"Host:\",\n \"value\": context[\"HOSTNAME\"]\n },\n {\n \"name\": \"State:\",\n \"value\": state\n }\n ])\n\n return {\n \"@type\": \"MessageCard\",\n \"@context\": \"https://schema.org/extensions\",\n \"summary\": subtitle,\n \"themeColor\": color,\n \"sections\": [\n {\n \"activityTitle\": \"CheckMK\",\n \"activitySubtitle\": subtitle,\n \"activityImage\": \"https://checkmk.com/favicon-16x16.png\",\n \"facts\": facts,\n \"text\": output\n }\n ]\n }", "def transform(self, payload):\n post = Post(payload)\n logging.info('Got a message for application %s' % (post['application'],))\n logging.info(self.config['binoas']['applications'][post['application']])\n # The result should be a valid post also ...\n result = {\n 'application': post['application'],\n 'payload': {}\n }\n for fld, expr in self.config['binoas']['applications'][post['application']]['rules'].items():\n # we have several fields and also a data field. The non-data fields\n # are used in the email (Ie. to provide title and description and\n # so on.)\n if fld != 'data':\n # if it is not a data field we have a simple transformation\n jsonpath_expr = parse(expr)\n try:\n res = jsonpath_expr.find(post['payload'])[0].value\n except IndexError:\n res = None\n result['payload'][fld] = res\n else:\n # the data fields have a more elaborate transformation as it\n # can contain a lot of information\n result['payload']['data'] = []\n for expr_info in expr:\n # the data fields can be specified as a list of strings or\n # as a list of dicts (or in between). This allows you to\n # override the keys in the transformed result. (Otherwise\n # the keys would have taken the json path expressions which\n # can be quite complex)\n if not isinstance(expr_info, dict):\n expr_info = {\n 'path': expr_info,\n 'name': expr_info\n }\n jsonpath_expr = parse(expr_info['path'])\n for res in jsonpath_expr.find(post['payload']):\n # if the values of the requested path are a list we\n # should transform accordingly (Ie. returns multiple\n # data objects)\n if type(res.value) is list:\n values = res.value\n else:\n values = [res.value]\n for value in values:\n result['payload']['data'].append({\n 'key': expr_info['name'],\n 'value': value\n })\n\n return result", "def preprocess(in_sentence, language):\r\n # TODO: Implement Function\r\n # if language is english\r\n start = \"SENTSTART \"\r\n end = \" SENTEND\"\r\n out_sentence = in_sentence.strip().lower()\r\n \r\n if language == \"e\":\r\n out_sentence = re.sub(r'([,:;()\\-+<>=.?!*/\"])',r' \\1 ',out_sentence)\r\n\r\n \r\n if language == \"f\":\r\n out_sentence = re.sub(r'([,:;()\\-+<>=.?!*/\"])',r' \\1 ',out_sentence)\r\n\r\n #for l', I think this we do not have to do this step since next step covers this\r\n out_sentence = re.sub(r'(\\b)(l\\')(\\w+)',r'\\1\\2 \\3',out_sentence)\r\n #for consonant assume y is not a consonant\r\n out_sentence = re.sub(r'(\\b)([aeiouqwrtypsdfghjklzxcvbnm]\\')(\\w+)',r'\\1\\2 \\3',out_sentence)\r\n #for que\r\n out_sentence = re.sub(r'(\\b)(qu\\')(\\w+)',r'\\1\\2 \\3',out_sentence)\r\n #for on and il\r\n out_sentence = re.sub(r'(\\w+)(\\')(on|il)(\\b)',r'\\1\\2 \\3\\4',out_sentence)\r\n #for d’abord, d’accord, d’ailleurs, d’habitude special cases\r\n out_sentence = re.sub(r'(d\\') (abord|accord|ailleurs|habitude)(\\b)',r'\\1\\2\\3',out_sentence)\r\n \r\n out_sentence = start + out_sentence + end\r\n out_sentence = re.sub(r' {2,}',r' ',out_sentence) \r\n return out_sentence", "def transform_text(messages, word_dictionary):\n # *** START CODE HERE ***\n def create_message_entry(message):\n words = get_words(message)\n entry = [0] * len(word_dictionary)\n for word in words:\n if word in word_dictionary.keys():\n entry[word_dictionary[word]] += 1\n return entry\n\n # returns np array of shape (n_messages, dict_size)\n return np.asarray([create_message_entry(message) for message in messages])\n\n # *** END CODE HERE ***", "def process_files(files, dir):\n messages = {}\n for f in files:\n with open(join(dir, f)) as config_file:\n content = json.load(config_file)\n if content:\n from_add, from_name = '', ''\n if len(content.get('messages')):\n for message in content.get('messages'):\n if not message.get('sent') and message.get('inbox'):\n from_add = message.get('from').get('e')\n from_name = message.get('from').get('n')\n # if senders are from gmail, yahoo, hotmail - may be personal emails\n # and others would be apps and services - so split to get the service name\n if not ('gmail' in from_add or 'yahoo' in from_add or 'hotmail' in from_add):\n from_add = from_add.rsplit('@')[1]\n break\n if from_add:\n if messages.get(from_add):\n mails = messages[from_add]['mails']\n if mails and len(mails):\n mails.append((content.get('subject'), content.get('internalDate')))\n else:\n mails = [(content.get('subject'), content.get('internalDate'))]\n messages[from_add]['mails'] = mails\n else:\n messages.update({from_add: {}})\n messages[from_add]['mails'] = [(content.get('subject'), content.get('internalDate'))]\n messages[from_add]['from_name'] = from_name\n\n return messages", "def parse_data(data):\n\n dict_list = []\n\n for msg in data:\n formatted_dict = {}\n # add number to dictionary\n number = incident_number_custom_function(msg)\n formatted_dict[\"number\"] = number\n # add call_type to dictionary\n call_type = call_type_custom_function(msg)\n formatted_dict[\"call_type\"] = call_type\n # add location and coordinates to dictionary\n location_tuple = location_custom_function(msg)\n if location_tuple is not None:\n # add location\n location = location_tuple[0]\n formatted_dict[\"location\"] = location\n # add coordinates\n coordinates = location_tuple[1]\n formatted_dict[\"coordinates\"] = coordinates\n else:\n location = None\n # add equipment(units) to dictionary\n equipment = equipment_custom_function(msg)\n formatted_dict[\"equipment\"] = equipment\n # add remarks, if any, to dictionary\n remarks = remarks_custom_function(msg)\n if remarks is None:\n remarks = \"None\"\n formatted_dict[\"remarks\"] = remarks\n # add time stamp to dictionary\n time = time_custom_function(msg)\n formatted_dict[\"time\"] = time\n # add this dictionary to list of dictionaries\n if number and location and call_type and equipment is not None:\n dict_list.append(formatted_dict)\n \n return dict_list", "def format_messages(messages: list):\n for message in messages:\n to_addresses = message.get('toAddresses')\n if isinstance(to_addresses, str):\n message['toAddresses'] = argToList(to_addresses)\n return messages" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for nested lists passing a dictionary.
def test_remove_nested_loop_dict(self): expected = {} result = nested_lists.remove_nested_loop(expected) self.assertFalse(result)
[ "def p_1():\n nested_list = [[], [], [], [], []]\n # Tests\n print(nested_list)\n\n return 0", "def test31(self):\n self.check('aDict.nestedDict')", "def test_second_level_retrieval(nested_dict):\n\n l = ['first', 'second']\n\n val = get_nested_value(d=nested_dict, keys=l)\n\n assert val == {'third': {'fourth': 'leaf', 'another': 'label'} }", "def test33(self):\n self.check('aDict.nestedDict.one')", "def p_2():\n nested_list = [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]\n # Tests\n print(nested_list)\n return 0", "def is_nested(input):\n return is_sequence(input) or isinstance(input, dict)", "def test_nsx_struct_get_list_dict2list(self):\n # convert dict to list\n nsx_object = {\n 'a': {\n 'b': {\n 'c': 'd'\n }\n }\n }\n self.assertEqual(\n common.nsx_struct_get_list(nsx_object, 'a/b'),\n [{\n 'c': 'd'\n }]\n )\n self.assertEqual(\n nsx_object,\n {\n 'a': {\n 'b': [{\n 'c': 'd'\n }]\n }\n }\n )", "def test35(self):\n self.check('aDict.nestedFunc')", "def test_pydata_list_nested(self):\n\n intype = [1, ['two', 'three'], [1, 2, 3, 4.33]]\n graph = read_pydata(intype)\n\n self.assertListEqual(intype, write_pydata(graph))", "def test_remove_nested_loop_nested_list(self):\n expected = [1, 2, [3, 4], 5]\n result = nested_lists.remove_nested_loop(expected)\n expected = [1, 2, 3, 4, 5]\n self.assertEqual(result, expected)", "def check_type_nested_structure(\n typechecked_object,\n build_arg_nested_type: Union[list, tuple, dict, type],\n call_arg_nested_obj: any,\n path: str,\n ) -> None:\n\n iterable_supported_list = (list, tuple, dict)\n\n if type(call_arg_nested_obj) not in iterable_supported_list:\n if not isinstance(call_arg_nested_obj, build_arg_nested_type):\n NestedTypeWrapper.raise_typecheck_err(\n typechecked_object,\n build_arg_nested_type.__name__,\n type(call_arg_nested_obj).__name__,\n path,\n )\n return\n\n if type(build_arg_nested_type) != type(call_arg_nested_obj):\n NestedTypeWrapper.raise_typecheck_err(\n typechecked_object,\n type(build_arg_nested_type).__name__,\n type(call_arg_nested_obj).__name__,\n path,\n )\n return\n\n if isinstance(build_arg_nested_type, (list, tuple)):\n if len(build_arg_nested_type) != len(call_arg_nested_obj):\n NestedTypeWrapper.raise_missmatch_err(\n typechecked_object,\n len(build_arg_nested_type),\n len(call_arg_nested_obj),\n path,\n )\n\n for idx in range(len(build_arg_nested_type)):\n check_type_nested_structure(\n typechecked_object,\n build_arg_nested_type[idx],\n call_arg_nested_obj[idx],\n f\"element {idx} of \" + path,\n )\n\n if isinstance(build_arg_nested_type, dict):\n if len(build_arg_nested_type) != len(call_arg_nested_obj):\n NestedTypeWrapper.raise_missmatch_err(\n typechecked_object,\n len(build_arg_nested_type),\n len(call_arg_nested_obj),\n path,\n )\n\n for key in build_arg_nested_type.keys():\n if key in call_arg_nested_obj:\n check_type_nested_structure(\n typechecked_object,\n build_arg_nested_type[key],\n call_arg_nested_obj[key],\n f\"key {key} of \" + path,\n )\n else:\n NestedTypeWrapper.raise_key_missing_err(typechecked_object, key, path)", "def validate_dict(object: dict, validate_keys: Callable[[dict], None]):\n validate_keys(object)\n\n for _, value in object.items():\n if isinstance(value, dict):\n validate_dict(value, validate_keys)\n\n elif isinstance(value, list):\n for list_item in value:\n if isinstance(list_item, dict):\n validate_dict(list_item, validate_keys)", "def explore_list(l):\n if isinstance(l , list):\n for e in l:\n if isinstance(e, list) or isinstance(e, dict):\n for ee in explore_list(e):\n yield ee\n else:\n yield e\n else:\n for e in l.itervalues():\n if isinstance(e, list) or isinstance(e, dict):\n for ee in explore_list(e):\n yield ee\n else:\n yield e", "def access(dictionary, nested_keys):\r\n\r\n for index, key in enumerate(nested_keys):\r\n\r\n print index, key\r\n\r\n try:\r\n if dictionary.has_key(key):\r\n if nested_keys[index + 1:] != []:\r\n return access(dictionary[key], nested_keys[index + 1:])\r\n else:\r\n return dictionary[key]\r\n else:\r\n return False\r\n except AttributeError: # at this point, dictionary is a list, perhaps containing dictionaries\r\n if key < len(dictionary):\r\n if nested_keys[index + 1:] != []:\r\n return access(dictionary[key], nested_keys[index + 1:])\r\n else:\r\n return dictionary[key]\r\n else:\r\n return False", "def test_sanitize_params_only_jsonifies_dicts_lists(self):\n payload = {'message': 'abc', 'privacy': self.privacy_dict, 'xyz': ['a','b']}\n retval = self.c._sanitize_params(payload)\n self.assertEquals(retval, \n {'access_token': self.access_token,\n 'message': 'abc', \n 'privacy': json.dumps(self.privacy_dict),\n 'xyz': json.dumps(['a','b'])})", "def test_nsx_struct_get_list_list(self):\n\n # list by path\n nsx_object = {\n 'a': {\n 'b': [{\n 'c': 'd'\n }, {\n 'e': 'f'\n }]\n }\n }\n self.assertEqual(\n common.nsx_struct_get_list(nsx_object, 'a/b'),\n [{\n 'c': 'd'\n }, {\n 'e': 'f'\n }]\n )\n self.assertEqual(\n nsx_object,\n {\n 'a': {\n 'b': [{\n 'c': 'd'\n }, {\n 'e': 'f'\n }]\n }\n }\n )", "def _check_lists(self, exp_name, exp_list, actual_list):\n if len(exp_list) != len(actual_list):\n self.fail(\n \"In examining %s, expected list:\\n%s\\n\\nFound list:\\n%s\" %\n (exp_name,\n pprint.pformat(exp_list),\n pprint.pformat(actual_list)))\n for exp_val, actual_val in zip(exp_list, actual_list):\n if isinstance(exp_val, types.ListType):\n self._check_lists(exp_name + \" >>\", exp_val, actual_val)\n elif isinstance(exp_val, types.DictType):\n for k, v in exp_val.items():\n self._check_contains(exp_name, k, v, actual_val)\n else:\n eq_(exp_val,\n actual_val,\n \"In examining %s, within a list, expected %s, found %s\" %\n (exp_name, exp_val, actual_val))", "def test_multidict():\n d = Multidict(a='foo', b=['bar', 'baz'])\n assert isinstance(d, dict)\n assert d['a'] == 'foo'\n assert d['b'] == 'bar'\n assert d.getlist('a') == ['foo']\n assert d.getlist('b') == ['bar', 'baz']", "def p_6():\n list_dicts =[{},{},{},{},{},{}]\n # Tests\n print(list_dicts)\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for nested lists passing a nested list.
def test_remove_nested_loop_nested_list(self): expected = [1, 2, [3, 4], 5] result = nested_lists.remove_nested_loop(expected) expected = [1, 2, 3, 4, 5] self.assertEqual(result, expected)
[ "def p_1():\n nested_list = [[], [], [], [], []]\n # Tests\n print(nested_list)\n\n return 0", "def p_2():\n nested_list = [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]\n # Tests\n print(nested_list)\n return 0", "def test_pydata_list_nested(self):\n\n intype = [1, ['two', 'three'], [1, 2, 3, 4.33]]\n graph = read_pydata(intype)\n\n self.assertListEqual(intype, write_pydata(graph))", "def _check_lists(self, exp_name, exp_list, actual_list):\n if len(exp_list) != len(actual_list):\n self.fail(\n \"In examining %s, expected list:\\n%s\\n\\nFound list:\\n%s\" %\n (exp_name,\n pprint.pformat(exp_list),\n pprint.pformat(actual_list)))\n for exp_val, actual_val in zip(exp_list, actual_list):\n if isinstance(exp_val, types.ListType):\n self._check_lists(exp_name + \" >>\", exp_val, actual_val)\n elif isinstance(exp_val, types.DictType):\n for k, v in exp_val.items():\n self._check_contains(exp_name, k, v, actual_val)\n else:\n eq_(exp_val,\n actual_val,\n \"In examining %s, within a list, expected %s, found %s\" %\n (exp_name, exp_val, actual_val))", "def is_list(node):\r\n return (isinstance(node, Node)\r\n and len(node.children) > 1\r\n and isinstance(node.children[0], Leaf)\r\n and isinstance(node.children[-1], Leaf)\r\n and node.children[0].value == \"[\"\r\n and node.children[-1].value == \"]\")", "def isListLike(value):\r\n\r\n return isinstance(value, (list, tuple, set))", "def p_3():\n zero_list = [0 for dummy_idx in range(3)]\n nested_list = [[0 for dummy_idx in range(3)] for dummy_idx2 in range(5) ]\n # Tests\n print(zero_list)\n print(nested_list)\n\n return 0", "def test_validate_list_of_tuples(input, expectation):\n with expectation:\n problems.helpers.validate_list_of_tuples(input)", "def list_should_contain_sub_list(self, list1, list2, msg=None):\n\n self.built_in.run_keyword_and_continue_on_failure(\n 'Collections.List Should Contain Sub List', list1, list2, msg\n )", "def doubler_recursive(lst):\n\n for n in lst:\n if isinstance(n, list):\n doubler_recursive(n)\n else:\n print n * 2,", "def _check_is_list(obj):\n return isinstance(obj, (list, List))", "def test_nsx_struct_get_list_list(self):\n\n # list by path\n nsx_object = {\n 'a': {\n 'b': [{\n 'c': 'd'\n }, {\n 'e': 'f'\n }]\n }\n }\n self.assertEqual(\n common.nsx_struct_get_list(nsx_object, 'a/b'),\n [{\n 'c': 'd'\n }, {\n 'e': 'f'\n }]\n )\n self.assertEqual(\n nsx_object,\n {\n 'a': {\n 'b': [{\n 'c': 'd'\n }, {\n 'e': 'f'\n }]\n }\n }\n )", "def test_flatten():\n\n print(\"Testing flatten function with deeply nested lists\")\n test_deep_list = [\"one\", \"two\", [\"three\"], \"four\", [\"five\", \"six\", [\"seven\"]]]\n expected_result = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\"]\n assert flatten(test_deep_list) == expected_result\n\n print(\"Testing flatten function with list with no nested lists\")\n test_shallow_list = [\"one\", \"two\", \"three\"]\n assert flatten(test_shallow_list) == test_shallow_list\n\n print(\"Testing flatten function with empty list\")\n test_empty_list = []\n assert flatten(test_empty_list) == test_empty_list", "def nested_list_equal(obj1: Union[int, List], obj2: Union[int, List]) -> bool:\n if isinstance(obj1, int) and isinstance(obj2, int):\n if obj1 != obj2:\n return False\n return True\n elif isinstance(obj1, list) and isinstance(obj2, list) and len(obj1) == len(\n obj2):\n res = []\n for i in range(len(obj1)):\n res.append(nested_list_equal(obj1[i], obj2[i]))\n return all(e is True for e in res)\n return False", "def check_type_nested_structure(\n typechecked_object,\n build_arg_nested_type: Union[list, tuple, dict, type],\n call_arg_nested_obj: any,\n path: str,\n ) -> None:\n\n iterable_supported_list = (list, tuple, dict)\n\n if type(call_arg_nested_obj) not in iterable_supported_list:\n if not isinstance(call_arg_nested_obj, build_arg_nested_type):\n NestedTypeWrapper.raise_typecheck_err(\n typechecked_object,\n build_arg_nested_type.__name__,\n type(call_arg_nested_obj).__name__,\n path,\n )\n return\n\n if type(build_arg_nested_type) != type(call_arg_nested_obj):\n NestedTypeWrapper.raise_typecheck_err(\n typechecked_object,\n type(build_arg_nested_type).__name__,\n type(call_arg_nested_obj).__name__,\n path,\n )\n return\n\n if isinstance(build_arg_nested_type, (list, tuple)):\n if len(build_arg_nested_type) != len(call_arg_nested_obj):\n NestedTypeWrapper.raise_missmatch_err(\n typechecked_object,\n len(build_arg_nested_type),\n len(call_arg_nested_obj),\n path,\n )\n\n for idx in range(len(build_arg_nested_type)):\n check_type_nested_structure(\n typechecked_object,\n build_arg_nested_type[idx],\n call_arg_nested_obj[idx],\n f\"element {idx} of \" + path,\n )\n\n if isinstance(build_arg_nested_type, dict):\n if len(build_arg_nested_type) != len(call_arg_nested_obj):\n NestedTypeWrapper.raise_missmatch_err(\n typechecked_object,\n len(build_arg_nested_type),\n len(call_arg_nested_obj),\n path,\n )\n\n for key in build_arg_nested_type.keys():\n if key in call_arg_nested_obj:\n check_type_nested_structure(\n typechecked_object,\n build_arg_nested_type[key],\n call_arg_nested_obj[key],\n f\"key {key} of \" + path,\n )\n else:\n NestedTypeWrapper.raise_key_missing_err(typechecked_object, key, path)", "def _is_list(e):\n return isinstance(e, LIST_TYPE)", "def test_list_validation():\n with pytest.raises(ValueError):\n # labels must be a list of string, but contains an int\n IngredientSpec(labels=[\"Label 1\", 17], name=\"foo\")\n\n ingredient = IngredientSpec(labels=[\"Label 1\", \"label 2\"], name=\"foo\")\n with pytest.raises(TypeError):\n # cannot append an int to a list of strings\n ingredient.labels.append(17)\n\n with pytest.raises(ValueError):\n # list of conditions cannot contain a property\n MeasurementRun(\"A measurement\", conditions=[Property(\"not a condition\")])", "def sublist_added_to_list(*args, **kwargs):\n if (not loaded_from_fixture(kwargs)):\n update_unit_test_infos(kwargs[\"instance\"].parent)", "def is_list(x):\n return type(x) == list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether or not tests are currently running on one of our public CIs.
def running_on_public_ci() -> bool: return os.getenv("GITHUB_ACTIONS") == "true"
[ "def in_ci():\n for key in ('CI', 'TRAVIS'):\n if os.environ.get(key, '') not in [False, '', '0', 'false']:\n return True\n return False", "def inside_test():\n return 'inside_test' in testing.environment and testing.environment.inside_test", "def is_interactive(self):\n return not bool(self.env_vars.get('CI'))", "def internal(self):\n return self.remote == site_config.params.INTERNAL_REMOTE", "def local_run():\n server_software = os.environ.get('SERVER_SOFTWARE')\n if server_software is None:\n return True\n if 'remote_api' in server_software:\n return False\n if server_software.startswith(('Development', 'testutil')):\n return True\n return False", "def check_connected(self):\n return\\\n (self.setup is not None) and\\\n (self.design is not None) and\\\n (self.project is not None) and\\\n (self.desktop is not None) and\\\n (self.app is not None)", "def _openface_running(self):\n try:\n return self.DOCKER_NAME in subprocess.check_output(['docker', 'ps'])\n except Exception as e:\n return False", "def IsPublic(self) -> bool:", "def HasCTI(self):\n return self.__has('CTI')", "def is_test(self):\n # Note: test numbers cannot be used in the PP Pro sandbox.\n # Instead, use the credit card number associated with a\n # sandbox account (Test Accounts -> View Details).\n return self.number in TEST_NUMBERS", "def has_active_truss() -> bool:\n\n return 'active_truss' in globals().keys()", "def can_run():\n # Is it GCE VM?\n try:\n get_metadata('id')\n except: # pylint: disable=bare-except\n log.error('Please run from a GCE VM.')\n return False\n\n # Running as sudo?\n if os.geteuid() != 0:\n log.error('Requires sudo access.')\n return False\n\n return True", "def _is_runnging_on_k8s():\n return os.getenv('IS_K8S_ENV') == 'true'", "def publicly_accessible(self) -> bool:\n return pulumi.get(self, \"publicly_accessible\")", "def is_running(self):\n return (self.configsvr is not None and self.configsvr.is_running() and\n all(shard.is_running() for shard in self.shards) and\n self.mongos is not None and self.mongos.is_running())", "def isCGROUPSSite():\n\n status = False\n\n # Make experiment specific?\n if os.environ.has_key('ATLAS_CGROUPS_BASE'):\n cgroups = os.environ['ATLAS_CGROUPS_BASE']\n if cgroups != \"\":\n pUtil.tolog(\"ATLAS_CGROUPS_BASE = %s\" % (cgroups))\n # if cgroups.lower() == \"true\":\n status = True\n\n return status", "def supported(self) -> bool:\n return self.api_id.value in self.vapix.api_discovery", "def test_environment(self):\n return os.path.exists(self.get_ejbca_home()) and self.jboss.test_environment()", "def running_in_ipython() -> bool:\n try:\n return __IPYTHON__\n except NameError:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the sleep time based on the number of past attempts. The sleep time grows exponentially with the attempts up to a maximum of 10 seconds.
def calc_sleep(self, attempt): return min(10, pow(2, attempt))
[ "def _calc_sleep_time(self, retry_index):\n # If we have already passed the retry index that would return the max timeout\n # then there is no reason to calculate the timeout.\n if self._max_retry_index is not None and self._max_retry_index <= self._retry_count:\n return self._max_retry_time\n\n # Calculate the timeout\n else:\n timeout = self._retry_coeff * math.factorial(retry_index)\n if timeout >= self._max_retry_time:\n self._max_retry_index = retry_index + 1\n return self._max_retry_time\n else:\n return timeout", "def sleeptime(self) -> float:\n return self._cur_delay + (random.randint(0, self.max_jitter) / 100.0)", "def seconds_to_sleep(self):\n if self.next_request_timestamp is None:\n return\n sleep_seconds = self.next_request_timestamp - time.time()\n if sleep_seconds <= 0:\n return\n return sleep_seconds", "def sleep_time(self):\n return self._sleep", "def test_backoff(self):\n r = retrying.retry(wait_exponential_multiplier=1000)(fail_n(9))\n\n fake_time = FakeTime()\n with fake_time:\n r()\n self.assertGreaterEqual(fake_time.mock_sleep.total, 2**9 - 1)", "def remainingTimeToWait(self) -> int:\n ...", "def _compute_timeout(count: int, delay: float) -> int:\n max_tasks_fn_timeout = 15 # 15 seconds\n return int((count * max_tasks_fn_timeout) + (delay * count))", "def _next_sleep_interval(previous_sleep_interval):\n min_interval = previous_sleep_interval or MIN_RETRY_INTERVAL\n max_interval = previous_sleep_interval * 3 or MIN_RETRY_INTERVAL\n return min(MAX_RETRY_INTERVAL, random.randint(min_interval, max_interval))", "def worker_sleep_time(self) -> ConfigNodePropertyInteger:\n return self._worker_sleep_time", "def _backoff_status(cls):\n now = time.time()\n backed_off_time = now - cls._backoff_timestamp\n backoff_time_left = cls._backoff_period - backed_off_time\n return round(backed_off_time, 2), round(backoff_time_left, 2)", "def _recompute_reconnect_backoff(self):\r\n self._reconnect_attempt = self._reconnect_attempt + 1\r\n self._reconnect_timeout = 2**self._reconnect_attempt\r\n # pylint: disable=consider-using-f-string\r\n self.logger.debug(\r\n \"Reconnect timeout computed to {:.2f}\".format(self._reconnect_timeout)\r\n )\r\n\r\n if self._reconnect_timeout > self._reconnect_maximum_backoff:\r\n self.logger.debug(\r\n f\"Truncating reconnect timeout to {self._reconnect_maximum_backoff} seconds\"\r\n )\r\n self._reconnect_timeout = float(self._reconnect_maximum_backoff)\r\n\r\n # Add a sub-second jitter.\r\n # Even truncated timeout should have jitter added to it. This is why it is added here.\r\n jitter = randint(0, 1000) / 1000\r\n # pylint: disable=consider-using-f-string\r\n self.logger.debug(\r\n \"adding jitter {:.2f} to {:.2f} seconds\".format(\r\n jitter, self._reconnect_timeout\r\n )\r\n )\r\n self._reconnect_timeout += jitter", "def getSleeperCount(self):\r\n return len(self.sleepers)", "def _bump_backoff(self):\n self.backoff_time = min(self.max_backoff, 2*(self.backoff_time or 1))", "def compute_backoff_delay(request, fixed_delay_ms):\n timeout_ms = request.get_timeout()\n start_time_ms = request.get_start_time_ms()\n delay_ms = fixed_delay_ms\n if delay_ms == 0:\n # Add 200ms plus a small random amount.\n m_sec_to_add = 200 + int(random() * 50)\n delay_ms = request.get_retry_delay_ms()\n delay_ms += m_sec_to_add\n # If the delay would put us over the timeout, reduce it to just before\n # the timeout would occur.\n now_ms = int(round(time() * 1000))\n ms_left = start_time_ms + timeout_ms - now_ms\n if ms_left < delay_ms:\n delay_ms = ms_left\n if delay_ms < 1:\n return 0\n return delay_ms", "def _nextAttempt(self):\n return Time() + timedelta(minutes=2)", "def _rate_limit_sleep(self, last_body_download_time):\n current_time = round(time.time(), 2)\n diff = round(current_time - last_body_download_time, 2)\n\n if diff >= self._api_hit_rate:\n return\n\n sleep_diff = round(self._api_hit_rate - diff, 3)\n self.logger.debug(\"Sleep time is: {0}\".format(sleep_diff))\n time.sleep(sleep_diff)", "def rand_sleep():\n time.sleep(random.uniform(0.75, 1.5))", "def logSleep(self, timeSlept):\n self.slept = timeSlept/3600", "def compute_sleep_interval(current_timestamp, last_timestamp, sleep_interval,\n max_interval=2.0, alpha=0.5):\n timestamp_interval = current_timestamp - last_timestamp\n timestamp_interval /= 1000.0\n if timestamp_interval < max_interval:\n sleep_interval += alpha * (timestamp_interval - sleep_interval)\n sleep_interval = bound_sleep_interval(sleep_interval)\n return sleep_interval" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the unproxied object
def unproxied_object(self): return self._unproxied_object
[ "def get_proxy (self):\n return self.proxy", "def WrappedObject(self) -> object:", "def GetRealObj(obj: Any) -> Any:\n if IsWeakRef(obj):\n return obj()\n if isinstance(obj, LambdaType):\n return obj()\n return obj", "def deref_safe ( self ):\n obj = self.deref_unsafe()\n if obj is not None:\n return obj\n else:\n raise ObjectDisappeared()", "def unproxify_device_objects(obj: Any, skip_explicit_proxies: bool = False):\n if isinstance(obj, dict):\n return {\n k: unproxify_device_objects(v, skip_explicit_proxies)\n for k, v in obj.items()\n }\n if isinstance(obj, (list, tuple, set, frozenset)):\n return type(obj)(\n unproxify_device_objects(i, skip_explicit_proxies) for i in obj\n )\n\n if hasattr(obj, \"_obj_pxy\"):\n if not skip_explicit_proxies or not obj._obj_pxy[\"explicit_proxy\"]:\n obj._obj_pxy[\"explicit_proxy\"] = False\n obj = obj._obj_pxy_deserialize(maybe_evict=False)\n return obj", "def extract_lazy_object(lo):\n if lo._wrapped is empty:\n lo._setup()\n return lo._wrapped", "def get_wrapper( self, remote_id, remote_name=None ):\n try:\n return self.objects[ remote_id ]\n except KeyError:\n wrapper = ProxyObject( self, remote_id, remote_name )\n self.objects[ remote_id ] = wrapper\n return wrapper", "def as_proxied_type(cls) -> T:\n return cast(T, cls())", "def _decode_proxy(self, data):\n return self.ProxyObject(self, data[1])", "def to_unreliable(self):\r\n return self.__class__(self.unreliable_packets)", "def obtain(proxy):\n return pickle.loads(pickle.dumps(proxy))", "def address_obj(self):\n if not self._address_obj:\n self.address()\n return self._address_obj", "def test_get_reverse_on_unsaved_object(self):\n p = Place()\n\n # When there's no instance of the origin of the one-to-one\n with self.assertNumQueries(0):\n with self.assertRaises(UndergroundBar.DoesNotExist):\n p.undergroundbar\n\n UndergroundBar.objects.create()\n\n # When there's one instance of the origin\n # (p.undergroundbar used to return that instance)\n with self.assertNumQueries(0):\n with self.assertRaises(UndergroundBar.DoesNotExist):\n p.undergroundbar\n\n # Several instances of the origin are only possible if database allows\n # inserting multiple NULL rows for a unique constraint\n if connection.features.supports_nullable_unique_constraints:\n UndergroundBar.objects.create()\n\n # When there are several instances of the origin\n with self.assertNumQueries(0):\n with self.assertRaises(UndergroundBar.DoesNotExist):\n p.undergroundbar", "def Wrapper(self) -> object:", "def prop_proxy(self, prop):\n return self", "def get_object(self):\n return self.the_object", "def get_network_object(self):\n return self.g", "def props_proxy(self):\n return self", "def getProxy(self, rank):\n return self.proxies[rank]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change le groupe du membre.
def opt_changer_groupe(self, arguments): membre = self.objet squelette = membre.parent nom_groupe = arguments.strip() squelette.changer_groupe_membre(membre.nom, nom_groupe) self.actualiser()
[ "def agrupar(self, grupo):\n self.grupos[grupo.tipo] = grupo", "def change_group_name(self, user_id: int, group_name: str):\n self.cursor.execute(f\"UPDATE public.users SET group_name = '{group_name}' WHERE user_id = {user_id}\")\n self.conn.commit()", "def update_groups(self, uid, groups, character=None):\n pass", "def changegroup(self, func):\r\n return self._subscribe(\"changegroup\", func)", "def set_group(group,path):\n path = Location(path)\n if not path.is_remote:\n # Set the group for local files\n gid = bcftbx_utils.get_gid_from_group(group)\n if gid is None:\n raise Exception(\"Failed to get gid for group '%s'\" % group)\n for f in bcftbx_utils.walk(path.path,include_dirs=True):\n logger.debug(\"Updating group for %s\" % f)\n os.lchown(f,-1,gid)\n else:\n try:\n # Set group for remote files\n chmod_cmd = applications.general.ssh_command(\n path.user,\n path.server,\n ('chgrp','-R',group,path.path))\n print \"Running %s\" % chmod_cmd\n chmod_cmd.run_subprocess()\n except Exception as ex:\n raise Exception(\n \"Exception changing group to '%s' on remote \"\n \"destination %s: %s\" %\n (group,path,ex))", "def changegroup(self, func):\n return self._subscribe(\"changegroup\", func)", "def change_user_group(self, user_group_id, user_group_data):\r\n return self.put(\r\n 'usergroups/{}'.format(user_group_id),\r\n payload=user_group_data).json()", "def setGroup(self, new_group):\n if new_group.screen == self:\n return\n elif new_group.screen:\n # g1 <-> s1 (self)\n # g2 (new_group)<-> s2 to\n # g1 <-> s2\n # g2 <-> s1\n g1 = self.group\n s1 = self\n g2 = new_group\n s2 = new_group.screen\n\n s2.group = g1\n g1._setScreen(s2)\n s1.group = g2\n g2._setScreen(s1)\n else:\n if self.group is not None:\n self.group._setScreen(None)\n self.group = new_group\n new_group._setScreen(self)\n hook.fire(\"setgroup\")\n hook.fire(\"focus_change\")\n hook.fire(\"layout_change\",\n self.group.layouts[self.group.currentLayout])", "def dupli_group_swap(self, obj, new_group):\n obj.dupli_group = new_group \n obj.name = new_group.name", "def set_group(self, group: t.Optional[jank.graphics.Group]):", "def save(self, *args, **kwargs):\n\t\tbase_group = Group.objects.get_or_create(name='usuario_base')[0]\n\t\tself.user.groups.add(base_group)\n\t\treturn super(BaseUser, self).save(*args, **kwargs)", "def setfsgroup(self, groupname, memberlist=None):", "def update_group_for_admin(self):\n ResGroups = self.env['res.groups']\n # get all technical groups\n technical_groups = ResGroups.search([('category_id.name', '=',\n 'Technical Settings')])\n # get all groups\n all_groups = ResGroups.search([])\n todo_groups = all_groups - technical_groups\n if todo_groups:\n superuser = self.env['res.users'].browse(SUPERUSER_ID)\n superuser.write({'groups_id': [(6, 0, todo_groups.ids)]})\n return True", "def save(self):\n bbs.dbproxy.DBProxy('groupbase')[self.name] = self", "def change_process_gid(gid: int):\n try:\n os.setgid(gid)\n except OSError as ex:\n raise DaemonError(\n 'Unable to change the owning GID to {!r}'.format(gid)) from ex", "def set(self, group, insert, remove):\n # If the inital value is unset use the removed value is the initial value\n if self.initial[group] is Unset:\n self.initial[group] = remove\n self.current[group] = insert", "def GroupChange(self, context):\n for agent in bpy.context.scene.iai_agents.coll:\n if str(agent.group) == self.name:\n agent.type = self.type", "def modificarcategoria(self, categoria):\n self.categoria = categoria\n pass", "def changeGun(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get lyrics from a artist/songtitle
def get_lyrics_for_song(soap_url, artist, songtitle): soap = lyricwiki.LyricWikiBindingSOAP(soap_url) song = lyricwiki.getSongRequest() song.Artist = artist song.Song = songtitle result = soap.getSong(song) lyrics = result.Return.Lyrics if not lyrics or "not found" in lyrics.splitlines()[0].lower(): return return result.Return.Lyrics
[ "def scrape(title, artist):\r\n # Format artist and title for building url\r\n title = format(title)\r\n artist = format(artist)\r\n\r\n # Build url\r\n url = \"http://www.metrolyrics.com/{}-lyrics-{}.html\".format(title, artist)\r\n\r\n # Request url\r\n try:\r\n log.debug(\"Requesting %s\", url)\r\n resp = requests.get(url)\r\n except requests.ConnectionError as e:\r\n log.debug(e)\r\n raise ConnectionError(\"Couldn't connect to www.metrolyrics.com\")\r\n\r\n if resp.status_code != 200:\r\n log.debug(\"Request failed with %d\", resp.status_code)\r\n return None\r\n\r\n # Parse page\r\n soup = BeautifulSoup(resp.text, \"html.parser\")\r\n verses = [ v.get_text() for v in soup.find_all(\"p\", \"verse\") ]\r\n if not verses:\r\n log.debug(\"No verses found\")\r\n return None\r\n \r\n return (\"\\n\\n\".join(verses))", "def get_artist_song(r):\n h = html.fromstring(r.text)\n song = h.find_class('header_with_cover_art-primary_info-title')[0].text.title()\n artist = h.find_class('header_with_cover_art-primary_info-primary_artist')[0].text.title()\n return artist, song", "def get_lyrics(query):\n result = []\n search_results = get_search_result(query)\n for song in search_results:\n title = song['title'] + TITLE_ARTIST_SEPARATOR + song['artist']\n result.append(InlineQueryResultArticle(\n id=song['url'].replace('https://www.azlyrics.com/lyrics/', ''),\n title=title,\n description=\"\",\n input_message_content=InputTextMessageContent(\n message_text=\"Won't load too many lyrics at once\", # This method is deprecated\n parse_mode=\"Markdown\"\n )))\n if len(result) == 0:\n result.append(InlineQueryResultArticle(\n id=0, title=NO_RESULTS_TEXT, input_message_content=InputTextMessageContent(\n message_text=NO_RESULTS_ALERT,\n parse_mode=\"Markdown\")\n ))\n return result", "def get_lyrics(self, name: str, artists: List[str], **_) -> Optional[str]:\n\n lyrics = syncedlyrics.search(f\"{name} - {artists[0]}\", allow_plain_format=True)\n\n return lyrics", "def lyrics_from_song_api_path(song_api_path):\r\n song_url = base_url + song_api_path\r\n response = requests.get(song_url, headers=headers)\r\n json = response.json()\r\n path = json[\"response\"][\"song\"][\"path\"]\r\n # html scrapping\r\n page_url = \"http://genius.com\" + path\r\n page = requests.get(page_url)\r\n html = BeautifulSoup(page.text, \"html.parser\")\r\n #remove script tags that they put in the middle of the lyrics\r\n [h.extract() for h in html('script')]\r\n lyrics = html.find(\"div\", class_=\"lyrics\").get_text() #updated css where the lyrics are based in HTML\r\n return lyrics", "def get_songs_with_lyrics(self):\n try:\n for row in self.db.execute('SELECT track_id from lyrics'):\n yield row[0]\n except:\n pass", "def get_song(Song):\r\n song_name = Song[0]\r\n artist = Song[1]\r\n # get song info\r\n song_info = get_song_info(song_name, artist)\r\n if song_info:\r\n return song_info\r\n\r\n # search by song + artist\r\n song_info = get_song_info(song_name + ' ' + artist, artist)\r\n if song_info:\r\n return song_info\r\n\r\n # delete words between bracket\r\n if '(' in song_name:\r\n song_name = re.sub(r'\\([^)]*\\)', '', song_name)\r\n song_info = get_song_info(song_name + ' ' + artist, artist)\r\n if song_info:\r\n return song_info\r\n\r\n # shorten song_name by ('and', '&', 'with')\r\n song_name = song_name.lower()\r\n if 'and' in artist:\r\n SongName = song_name.split('And', 1)[0]\r\n song_info = get_song_info(SongName + ' ' + artist, artist)\r\n if song_info:\r\n return song_info\r\n\r\n if '&' in artist:\r\n SongName = song_name.split('&', 1)[0]\r\n song_info = get_song_info(SongName + ' ' + artist, artist)\r\n if song_info:\r\n return song_info\r\n\r\n if 'with' in artist:\r\n SongName = song_name.split('with', 1)[0]\r\n song_info = get_song_info(SongName + ' ' + artist, artist)\r\n if song_info:\r\n return song_info\r\n\r\n # shorten artist name by ('and', '&', 'with')\r\n artist = artist.lower()\r\n if 'and' in artist:\r\n Artist = artist.split('And', 1)[0]\r\n song_info = get_song_info(song_name + ' ' + Artist, Artist)\r\n if song_info:\r\n return song_info\r\n\r\n if '&' in artist:\r\n Artist = artist.split('&', 1)[0]\r\n song_info = get_song_info(song_name + ' ' + Artist, Artist)\r\n if song_info:\r\n return song_info\r\n\r\n if 'with' in artist:\r\n Artist = artist.split('with', 1)[0]\r\n song_info = get_song_info(song_name + ' ' + Artist, Artist)\r\n if song_info:\r\n return song_info\r\n print(f'Unable to scrap {song_name}')\r\n return song_info", "def get_lyric_body(url):\n print(\"Getting lyrics body: \" + url)\n page = urllib.request.urlopen(url)\n body = BeautifulSoup(page.read(), 'lxml').html.body.find_all(\n \"div\", class_=\"col-xs-12 col-lg-8 text-center\")[0].find_all(\"div\")\n lyrics = body[6].get_text()\n artist = body[4].text.replace(\" Lyrics\", \"\").replace(\"\\n\", \"\")\n title = body[3].text.replace(\" lyrics\", \"\").replace('\"', '')\n print(\"Done: \" + url)\n return \"*\" + title + TITLE_ARTIST_SEPARATOR + artist + \"*\" + lyrics", "def get_track_lyrics(track_api_path):\n \n request_url = 'http://www.genius.com' + track_api_path\n \n try:\n r = requests.get(request_url)\n except:\n logging.error(\" Bad request to retrieve lyrics for \" + track_api_path)\n return \"?\"\n \n soup = bs4.BeautifulSoup(r.content, 'lxml')\n\n try:\n lyrics = soup.find('div', class_='lyrics').get_text()\n except:\n logging.error(\" Unable to get lyrics for \" + track_api_path)\n return \"?\"\n \n lyrics_stripped = lyrics.replace(',', '').replace('\\n', '//')\n\n return lyrics_stripped", "def extract_lyrics(cls, url: str, song: str, artist: str) -> Lyrics:\n log.info(\"extracting lyrics from url \\\"{}\\\"\".format(url))\n url_data = UrlData(url)\n for extractor in cls.extractors:\n\n if not extractor.can_handle(url_data):\n continue\n\n log.debug(\"using {} for {}\".format(extractor, url_data))\n\n try:\n lyrics = extractor.extract_lyrics(url_data, song, artist)\n except exceptions.NoLyrics:\n log.warning(f\"{extractor} didn't find any lyrics at {url}\")\n continue\n except exceptions.NotAllowedError:\n log.warning(f\"{extractor} couldn't access lyrics at {url}\")\n continue\n except Exception:\n log.exception(f\"Something went wrong when {extractor} \"\n f\"handled {url}\")\n continue\n else:\n lyrics.origin = LyricsOrigin(url, extractor.name,\n extractor.url)\n log.debug(f\"extracted lyrics {lyrics}\")\n return lyrics\n raise exceptions.NoExtractorError(url)", "def print_lyrics(artist, track, album):\n # Genius object and properties\n genius = lyricsgenius.Genius(GENIUS_TOKEN)\n genius.skip_non_songs = True\n genius.excluded_terms = [\"(Remix)\", \"(Live)\", \"(Instrumental)\"]\n # Request genius object\n song = genius.search_song(title=track, artist=artist)\n # Check if genius object corresponds with spotify object parameters\n if (song != None and str(song.artist).lower() == artist.lower()):\n # Print lyrics and song parameters\n print(f\"\\nArtist: {artist}\\nAlbum: {album}\\nTrack name: {track}\\n{song.lyrics}\\n\")\n else:\n print(f\"There are no lyrics available for:\\n{artist} - {track}\")\n return", "def getlyrics(musixmatch_lyrics_page):\n doc = html.fromstring(musixmatch_lyrics_page.read())\n ret = [e.text_content() for e in doc.find_class('mxm-lyrics__content')]\n ret = \"\\n\".join(ret)\n return ret", "def get_rap_lyrics(song_link_meta_data):\n\tout = \"\"\n\tdriver = webdriver.PhantomJS()\n\t\t\n\tsong_link = song_link_meta_data['link']\n\t\n\t# use BS to get the song name and artist\n\tsource = hit_page(song_link)\n\tsong_name = raw_text(source.find(attrs={'class':'text_title'}).text)\n\tartist_name = raw_text(source.find(attrs={'class':'text_artist'}).text)\n\n\t# make sure we get the lyrics of the right song\n\t# if (artist_name not in song_link_meta_data['artist']) and \\\n\t# \t(song_name not in song_link_meta_data['song']):\n\t\t\n\t# \tprint \"Could not match %s to %s\"%(song_name, song_link_meta_data['song'])\n\t# \treturn None\n\n\t#get the lyrics for each song\n\t# we need to use selenium so the page actually populates\n\tdriver.get(song_link)\n\tdriver.implicitly_wait(5)\n\tsleep(2*random()) # fake a human\n\n\tlyrics = driver.find_elements_by_class_name('lyrics')\n\ttext = \"\"\n\tfor lyric in lyrics:\n\t\tsource = BeautifulSoup(lyric.get_attribute('innerHTML'))\n\t\ttext += source.text\n\ttext = raw_text(text)\n\tout += text\n\n\tdriver.close()\n\treturn out", "def get_albumart(): # album_artist, album_title\n pass", "def ask_spotify(title):\n song = sp.search(q=title, limit=1, type='track')\n try:\n info = song['tracks']['items'][0]\n if info:\n artist = info['artists'][0]['name']\n song_name = info['name']\n song_url = info['external_urls']['spotify']\n data = {'name': song_name, 'artist': artist, 'URL': song_url, 'clean_title': title}\n return data\n except IndexError:\n pass", "async def count_lyrics(session, artistname, title):\n\n title = urllib.parse.quote(title, safe='')\n\n async with session.get('https://api.lyrics.ovh/v1/{}/{}'.format(artistname, title)) as response:\n try:\n result = await response.read()\n result = json.loads(result)\n lyrics = result.get('lyrics')\n if lyrics:\n return len(lyrics.replace('\\n', ' ').split())\n except Exception:\n logging.warning('Error reading lyrics: {} - {}'.format(artistname, title))\n return", "def get_guessing_game_info(self):\n lyrics_tries = 0\n song_lyrics = \"\"\n song = \"\"\n artist = None \n genius = lyricsgenius.Genius(timeout=120,retries=3) \n \n genius.verbose = False\n genius.skip_non_songs = True\n \n while artist == None:\n artist_name = self.get_artist_name() \n genius.excluded_terms = [artist_name,\"(Remix)\", \"(Live)\",\"(Demo)\", \"(Version)\",\"(Edit)\",\"(Bonus)\", \"(Intro)\", \"(Cover)\", \n \"(Cut)\", \"tour\",\"(Black Magic*)\",\"(Extended)\",\"(Clean)\",\"Import\",\"(Booklet)\", \"(Broadway)\"\n ,\"liner notes\", \"credits\", \"interview\", \"skit\", \"instrumental\", \"setlist\"] \n\n artist = genius.search_artist(artist_name, max_songs=28, sort=\"title\")\n \n \n while song_lyrics == \"\":\n if(lyrics_tries == len(artist) or self.error):\n break\n try:\n self.song = genius.search_song(artist.songs[lyrics_tries].title, artist_name)\n except(HTTPError, Timeout): \n self.error = True\n continue\n \n\n while self.song.title in self.game.songs_done:\n if self.error:\n break\n try:\n self.song = genius.search_song(artist.songs[lyrics_tries].title, artist_name)\n lyrics_tries += 1\n \n except(HTTPError, Timeout,IndexError):\n self.error = True\n continue\n \n left_lyrics = self.song.lyrics.find(\"]\",self.song.lyrics.find(\"[Chorus:\")) + 1\n song_lyrics = profanity.censor(self.song.lyrics[left_lyrics: self.song.lyrics.find(\"[\",left_lyrics)],\"-\")\n\n if (\"Lyrics from Snippet:\" in song_lyrics):\n song_lyrics = \"\"\n\n lyrics_tries += 1\n\n if(song_lyrics == \"\" or self.error == True):\n self.lyrics = None # \"no lyrics available\"\n else:\n self.game.songs_done.append(self.song.title)\n self.lyrics = song_lyrics\n self.artist = artist_name\n self.img = self.song.song_art_image_url\n self.song_title = self.song.title.encode('ascii', 'ignore').decode(\"utf-8\") #removes any unicode characters in song title", "def get_songs(artist):\r\n\tsongs = []\r\n\tresponse = requests.get(base_url + f'/2.0/?method=artist.gettoptracks&artist={artist}&api_key={last_fm_api_key}&format=json')\r\n\tjson_text = json.loads(response.text)\r\n\ttracks = json_text['toptracks']\r\n\ttrack = tracks['track']\r\n\tfor json_obj in track:\r\n\t\tsong_name = json_obj['name']\r\n\t\tsongs.append(song_name)\r\n\treturn songs", "async def search_song(song_name):\n response = await _fetch('track.search', track=song_name)\n\n if 'error' in response:\n if response['error'] == 10:\n raise NotFound(\"Your last.fm API key is not valid.\") # https://www.last.fm/api/account/create\n raise NotFound(f\"Unknown error code {response['error']}\")\n\n results = response.get('results', {}).get('trackmatches', {}).get('track', [])\n\n if not results:\n raise NotFound\n\n results = results[0] # Gets first result\n\n # Fetches album/cover from the extra song info End-point.\n response = await _fetch('track.getinfo', track=results.get('name', ''), artist=results.get('artist', 'N/A'))\n results['cover_url'] = response.get('track', {}).get('album', {}).get('image', [])\n if results['cover_url']:\n results['cover_url'] = results['cover_url'][-1].get('#text', 'https://github.com/exofeel/Trackrr/blob/master/assets/UnknownCoverArt.png?raw=true')\n else:\n results['cover_url'] = 'https://github.com/exofeel/Trackrr/blob/master/assets/UnknownCoverArt.png?raw=true'\n results['track_album'] = response.get('track', {}).get('album', {}).get('title', 'N/A')\n\n return LFMSong(results)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the name of a given player
def name(player): return player['name']
[ "def get_player_name():\n\n return player.get(\"player_name\")", "def get_name(self):\r\n return self._player_name", "def get_player_name(self):\r\n return self.__name", "def get_other_player_name(self) :\n return self.players[1]", "def get_player_name(self, player_number):\n p, q = self.players\n return p if self.__piece_type__(p) == player_number else q", "def get_player(self, name):\n\n try:\n name = name.name\n except AttributeError: pass\n\n for i in self.players:\n if i.name == name:\n return i", "async def get_name_from_user(discord_id, *, return_player):\r\n user_doc = await get_user_document(discord_id)\r\n if not user_doc[\"osu_id\"]:\r\n return None\r\n else:\r\n if return_player:\r\n return user_doc[\"osu_id\"]\r\n else:\r\n return user_doc[\"team_name\"]", "def getplayer(title, logs=[]):\n match = consts.player_re.search(title)\n if not match:\n logs.append(\"Player: No regex match\")\n return None\n name = strip_annots(match.group(1))\n\n players = safe_call(consts.osu_api.get_user, name)\n if players:\n return players[0]\n logs.append(\"Player: '%s' not found\" % name)\n return None", "def id_to_name(player_id):\n query = \"SELECT name FROM players WHERE id=%s\"\n parameter = (player_id,)\n\n with connect_to_db() as database:\n database['cursor'].execute(query, parameter)\n player_name = database['cursor'].fetchone()[0]\n\n return player_name", "def team_player(self):\n\n return self.name.split('.')[0].split('_')[3:]", "def findPlayerByName(self, name): \r\n\t\treturn self.__players_by_name.get(name.lower(), None)", "def get_player_abbr(player_name):\n flag = 0\n name_string = \"\"\n if player_name != \"\":\n name_string += player_name[0]\n while True:\n if player_name[1] == \".\":\n name_string += \".\"\n name_string += player_name[2]\n break\n else:\n break\n\n name_string += \".\"\n for i in player_name:\n if i == \" \":\n flag = 1\n if flag == 1:\n name_string += i\n name_string = name_string.strip()\n if \"Jr.\" not in name_string:\n name_string = name_string.strip(\".\")\n return name_string\n else:\n return \"\"", "def findPlayerName(dataPlayersLeague, playerId):\n for each in dataPlayersLeague:\n if each[\"personId\"] == playerId:\n return each[\"firstName\"] + \" \" + each[\"lastName\"]", "def get_player(player_id):\n request_url = str(API_ENDPOINT) + str('/player/') + str(player_id)\n\n response = requests.get(request_url)\n if response.status_code != 200:\n print(\"Fehler GET get_player\")\n print(response.content)\n return \"error\", \"error\"\n\n response_data = response.json()\n player_name = response_data['name']\n team_name = response_data['team']\n\n print(\"player_name: \" + str(player_name))\n print(\"team_name: \"+str(team_name))\n return player_name, team_name", "def set_player_name(self, player):\r\n self.__name = player", "def get_active_player(self, player_name):\n player_one = self.get_player_one()\n player_two = self.get_player_two()\n\n if player_one.get_player_name() == player_name:\n return player_one\n if player_two.get_player_name() == player_name:\n return player_two\n else:\n return 'player not found'", "def getNickname(self, playerID):\n if oidIsValid(playerID):\n pp = self.playersColl.find_one({'_id': playerID})\n if pp != None:\n result = {'status': \"ok\", 'nickname': pp['nickname']}\n else:\n result = {'status': \"ko\", 'reason': \"unknown playerID\"}\n else:\n result = {'status': \"ko\", 'reason': \"invalid playerID\"}\n return result", "def player():\n\n name_id = 1\n return card_game.Player(name_id)", "def getPlayerNames(self):\n self.players=[]\n self.playerTiles=[]\n self.scores=[]\n for num in xrange(self.numOfPlayers):\n self.scores.append(0)\n self.playerTiles.append([])\n self.getSevenTiles(num)\n self.players+=[tkSimpleDialog.askstring(\"Name of Player %d\"\n %(num+1),\"Name:\")]\n if(type(self.players[num])!=str or len(self.players[num])==0):\n self.players[num]=\"Player %d\" %(num+1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the coin used by given player
def coins(player): return player['coins']
[ "def get_player_money(player):\r\n\r\n game_db = _load_game_db()\r\n\r\n if not player_exists(player):\r\n raise ValueError('player %s does not exists' % player)\r\n\r\n return game_db['players'][player]['money']", "def get_result(self, player):\n\t\t# return 0 if (self.knockedOut[player]) else 1\n\t\treturn self.current_scores[player % 2] / 162.", "def income(player: Player) -> None:\n player.coin += 1", "def get_account_information(self, coin):\n\n accounts = self.auth_client.get_accounts()\n for account in accounts:\n if coin in account['currency']:\n return float(account['available'])\n\n return None", "def get_mining_coin(self):\r\n url = self.url_base + \"idmining=\" + self.user_id\r\n\r\n if self.debug == 1:\r\n print url\r\n\r\n try:\r\n coin = (requests.get(url, timeout=self.timeout)).text\r\n except requests.exceptions.RequestException as exception:\r\n print exception\r\n return \"ERROR\"\r\n\r\n return coin", "def pick_coin(self):\n coin = round(random.uniform(0,1))\n if coin == 0:\n picked_coin = \"normal\"\n else: \n picked_coin = \"biased\"\n return picked_coin", "def get_coin_value(self, coin):\r\n url = self.url_base + 'coin=' + str(coin)\r\n \r\n if self.debug == 1:\r\n print url\r\n\r\n try:\r\n result = requests.get(url, timeout=self.timeout)\r\n except requests.exceptions.RequestException as exception:\r\n print exception\r\n return \"ERROR\"\r\n\r\n return float(result.text)", "def get_coin_balance(self, coin):\r\n totals = self.get_all_balances()\r\n if coin in totals.keys():\r\n if self.debug == 1:\r\n print coin\r\n\r\n return float(totals[coin])\r\n else:\r\n return 'Bad Coin'", "def _collectCoin(self):\n coinHit = pygame.sprite.spritecollide(self._Player, \\\n self._coinList, True)\n self._score += (5 * len(coinHit))", "async def coinprob(self, ctx, person: discord.User=None):\n if not person:\n person = ctx.author\n\n data = await self.jc_get(f'/wallets/{person.id}/probability')\n p = float(data['probability'])\n await ctx.send(f'You have a {p * 100}%/message chance')", "def next_damage_dealer(self):\n return self.players[random.randint(0, len(self.players)-1)]", "def get_net(self) -> float:\n return self.coin * self.currentPrice - self.coinOwed * self.currentPrice + self.balance", "async def get_chance(msg):\n if msg.channel.id not in data.get_currency_channels(msg.guild.id):\n return await embeds.desc_only(msg.channel, 'Currency Generation is **disabled** in this Channel. '\n 'Ask an Administrator to enable it.')\n return await embeds.desc_only(msg.channel, f'Currency Generation for this Server is set to '\n f'**{data.get_currency_chance(msg.guild.id)} %**.')", "def choose_gain(self, game, coins, buys):\n\n # Since strategy is a GP tree, can just call with cards_owned\n buy_order_list = self.strategy.eval(game, self.cards_owned)\n\n # Find first card in list that can buy, and buy it\n for card_name in buy_order_list:\n if game.can_buy(card_name, coins):\n return card_name\n\n # If can't buy anything, pass\n return \"pass\"", "def getClientBalance(self, client, bot_config):\n currency = str(bot_config['currency'])\n pair = currency[len(currency)-4:len(currency)]\n if(pair == 'USDT'):\n balance = client.get_asset_balance(asset='USDT')\n else:\n balance = client.get_asset_balance(asset='BTC')\n return balance['free']", "def special(self, game, player):\n player.gain_card(\"Copper\")\n player.gain_card(\"Gold\")\n player.output(\"Gained a Copper and a Gold\")", "def pickup_coin(self, x, y):\n \n # ADD CODE HERE\n if self.coins[y][x] > 0:\n coins = self.coins[y][x]\n self.coins[y][x] = 0\n return coins\n else:\n return 0", "def get_winner(self):\n if self.check_for_bust(self.dealer):\n print('Dealer bust')\n return 1\n if self.dealer.hand.total >= 17 and self.dealer.hand.total > self.player.hand.total:\n print('Dealer wins')\n return -1\n if self.dealer.hand.total < self.player.hand.total:\n print(self.player.name + (' wins!'))\n return 1\n if self.dealer.hand.total == self.player.hand.total:\n print('Push!')\n return 0", "def acceptable_coins():\n return (Coin(0.01 * 10 ** multiplier * coinsDeg) for multiplier in range(4) for coinsDeg in [1, 2, 5])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all true permissions for a user excluding ones that have been explicitly revoked.
def _get_adjusted_true_permissions(cls, user): from rolepermissions.permissions import available_perm_status default_true_permissions = set() user_permission_states = available_perm_status(user) adjusted_true_permissions = set() # Grab the default true permissions from each of the user's roles for role in get_user_roles(user): default_true_permissions.update(role.get_default_true_permissions()) # For each of those default true permissions, only keep ones # that haven't been explicitly revoked for permission in default_true_permissions: if user_permission_states[permission.codename]: adjusted_true_permissions.add(permission) return adjusted_true_permissions
[ "def get_user_permissions(cls, user):\n return set(user.permissions.values_list(\"name\", flat=True))", "def get_effective_permissions(cls, user):\n if user.is_superuser:\n return set(Permission.objects.values_list(\"name\", flat=True))\n perms = set()\n # User permissions\n for p in user.permissions.all():\n perms.add(p.name)\n if p.implied:\n perms.update(p.implied.split(\",\"))\n # Group permissions\n for g in user.groups.all():\n for p in g.permissions.all():\n perms.add(p.name)\n if p.implied:\n perms.update(p.implied.split(\",\"))\n return perms", "def get_user_permissions(user=None):\n if user is not None:\n return_list = []\n user_query = users.select().where(users.username == user).get()\n for item in permissions.select().where(permissions.user_id == user_query.id):\n return_list.append(item.zone_id)\n return return_list", "def _filter_permissions(self, qs):\n # We use qs.all() here because we want to allow a manager object (e.g. MediaItem.objects)\n # to be passed as well.\n return (\n qs.all()\n .viewable_by_user(self.request.user)\n .annotate_viewable(self.request.user)\n .annotate_editable(self.request.user)\n )", "def get_all_consent_uncached(self, user):\n stored_consent = self._get_stored_consent_for_user(user)\n result = {}\n\n if stored_consent:\n for key, value in stored_consent.consent_grants.items():\n if value:\n result[key] = Consent.GRANTED\n else:\n result[key] = Consent.DENIED\n\n return result", "def get_permissions_queryset(self):\n editable_permissions_queryset = self.get_editable_permissions()\n existing_permissions_queryset = self.object.user_permissions.all()\n\n return Permission.objects.filter(\n Q(pk__in=editable_permissions_queryset.values('pk')) |\n Q(pk__in=existing_permissions_queryset.values('pk'))\n ).order_by('content_type__app_label').select_related('content_type')", "def available_perm_status(user):\n roles = get_user_roles(user)\n permission_hash = {}\n\n user_permission_names = set(user.user_permissions.values_list(\"codename\", flat=True))\n\n for role in roles:\n for permission_name in role.permission_names_list():\n permission_hash[permission_name] = permission_name in user_permission_names\n\n return permission_hash", "def available_perm_names(user):\n roles = get_user_roles(user)\n perm_names = set(p for role in roles for p in role.permission_names_list())\n return [p.codename for p in user.user_permissions.all() if p.codename in perm_names] \\\n if roles else [] # e.g., user == None", "def get_all_consent_uncached(self, user):\n raise NotImplementedError", "def get_allowed_cases(user):\n all_cases = AssuranceCase.objects.all()\n # if get_case_permissions returns anything other than None, include in allowed_cases\n return [case for case in all_cases if get_case_permissions(case, user)]", "def filter_granted(self, queryset):\n return Dataset.filter_by_user(self.request.user)", "def get_permission_policies(self, request):\n user = request.cache.user\n users = request.app.models.get('users')\n groups = request.app.models.get('groups')\n perms = []\n if not users or not groups or not user.is_authenticated():\n return perms\n with users.session(request) as session:\n session.add(user)\n for group in set(user.groups):\n for permission in group.permissions:\n policy = permission.policy\n if not isinstance(policy, list):\n policy = (policy,)\n perms.extend(policy)\n return perms", "def get_team_permissions(self, user_obj, obj=None):\n if user_obj.is_anonymous() or obj is not None:\n return set()\n if not hasattr(user_obj, \"_team_perm_cache\"):\n memberships = Team.objects.filter(\n Q(memberships__user=user_obj),\n Q(memberships__state=\"manager\") | Q(memberships__state=\"member\"),\n )\n perms = memberships.values_list(\n \"permissions__content_type__app_label\",\n \"permissions__codename\"\n ).order_by()\n user_obj._team_perm_cache = set([\"%s.%s\" % (ct, name) for ct, name in perms])\n return user_obj._team_perm_cache", "def all_users(cls):\n return [user for user in cls.all() if user.is_admin() is False]", "def permissions(self):\n return [Element.from_href(e) for e in self.granted_element]", "def permissions_for(self, user):\n ret = UserPermissions()\n # force the permissions to be loaded into session, otherwise templates\n # that depend on the permissions being available in session may assert\n # the user has no permissions!\n ret.check_permissions()\n return ret", "def get_all_consent(self, user):\n return cache_memoize(self._get_user_cache_key(user),\n lambda: self.get_all_consent_uncached(user))", "def permissions(self):\n\n perms = set()\n\n for c in self.characters:\n bef = datetime.utcnow()\n per = c.permissions()\n aft = datetime.utcnow()\n print \"Character {0} permissions took {1}\".format(c.name, aft - bef)\n for p in per:\n perms.add(p)\n\n print \"-----\"\n perms = list(perms)\n perms.sort(key=lambda p: p.id)\n\n return perms", "def _permissions():\n return getattr(g, '_request_permissions', {})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a Role object from a role name.
def retrieve_role(role_name): return RolesManager.retrieve_role(role_name)
[ "def get_role(cls, name):\n return cls.query.filter_by(name=name).first()", "def get_role(obj, role_name):\n for role in obj.roles:\n if role.name == role_name:\n return role\n return None", "async def get_role_by_name(guild: Guild, role_name: str) -> Role:\n role = None\n if guild is not None:\n roles = guild.roles\n role = utils.get(roles, name=role_name)\n if role is None:\n try:\n roles = await guild.fetch_roles()\n except HTTPException:\n pass\n else:\n role = utils.get(roles, name=role_name)\n\n return role", "def get_role(id_or_name):\n try:\n return Role.objects.get(pk=id_or_name)\n except (Role.DoesNotExist, ValueError):\n try:\n return Role.objects.get(name=id_or_name)\n except Role.DoesNotExist:\n return None", "def get_role(self, role):\r\n uri = \"OS-KSADM/roles/%s\" % utils.get_id(role)\r\n resp, resp_body = self.method_get(uri)\r\n role = Role(self, resp_body.get(\"role\"))\r\n return role", "def get_by_name(self, name: str) -> tp.Optional[RoleType]:\n pass", "def _get_role(role_name):\n known_roles = kv().get('charm.azure.roles', {})\n if role_name in known_roles:\n return known_roles[role_name]\n sub_id = kv().get('charm.azure.sub-id')\n role_file = Path('files/roles/{}.json'.format(role_name))\n role_data = json.loads(role_file.read_text())\n role_fullname = role_data['Name'].format(sub_id)\n scope = role_data['AssignableScopes'][0].format(sub_id)\n role_data['Name'] = role_fullname\n role_data['AssignableScopes'][0] = scope\n try:\n log('Ensuring role {}', role_fullname)\n _azure('role', 'definition', 'create',\n '--role-definition', json.dumps(role_data))\n except AlreadyExistsAzureError:\n pass\n known_roles[role_name] = role_fullname\n kv().set('charm.azure.roles', known_roles)\n return role_fullname", "def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int,\n inliner: Inliner, options: dict | None = None,\n content: Sequence[str] = (),\n ) -> tuple[list[Node], list[system_message]]:\n return self.roles[name](fullname, rawtext, text, lineno,\n inliner, options or {}, content)\n self._role_cache[name] = role_adapter\n return role_adapter", "def role(self, name):\n for r, n in itertools.chain(self._role_to_prop.items(), self._ref_role_to_prop.items()):\n if n == name:\n return r\n else:\n return -1", "def cmd_role_get(self, args):\n role_id = args[0]\n self._get_obj(role_id, 'role')", "def find_role(cls, keyword):\n return _CompilerRole.find(keyword)", "def get_role_class(expected_rolename):\n \n try:\n role_class = ROLE_CLASSES_BY_TYPE[expected_rolename]\n except KeyError:\n raise tuf.FormatError(repr(expected_rolename)+' not supported')\n else:\n return role_class", "def get_role_model(channel, role):\n return ChannelGroupRole.objects.get(channel=channel, role=role)", "def get(self, role_id: str = None, role_name: str = None):\n if role_id is not None and role_name is not None:\n raise Exception(\"role_id & role_name are mutually exclusive\")\n\n try:\n responses = self._admin_api.retrieve_roles(\n authorization=self.authorization)\n except ApiException as ex:\n raise ex\n roles_list = []\n for response in responses:\n role = self.hbot._create_schema(response, RoleSchemaInner)\n if role_id and response.get('roleId') == role_id:\n return role\n if role_name and response.get('roleName') == role_name:\n return role\n roles_list.append(role)\n return roles_list", "def get_role(self, role_id, **kwargs):\n\n url = self._make_url(\"/v1/role/{0}/\".format(role_id))\n return self._session.get(url, **kwargs)", "def lookup_role_arn(roleName):\n lastSlash = roleName.rfind(\"/\")\n if lastSlash >= 0:\n prefix = roleName[:lastSlash+1]\n baseName = roleName[lastSlash+1:]\n else:\n prefix = \"/\"\n baseName = roleName\n for page in iam_client.get_paginator('list_roles').paginate(PathPrefix=prefix):\n for role in page['Roles']:\n if baseName == role['RoleName']:\n return role['Arn']\n raise Exception(f'Unable to find role with name \"{roleName}\"')", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Role':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = RoleArgs.__new__(RoleArgs)\n\n __props__.__dict__[\"arn\"] = None\n __props__.__dict__[\"assume_role_policy_document\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"managed_policy_arns\"] = None\n __props__.__dict__[\"max_session_duration\"] = None\n __props__.__dict__[\"path\"] = None\n __props__.__dict__[\"permissions_boundary\"] = None\n __props__.__dict__[\"policies\"] = None\n __props__.__dict__[\"role_id\"] = None\n __props__.__dict__[\"role_name\"] = None\n __props__.__dict__[\"tags\"] = None\n return Role(resource_name, opts=opts, __props__=__props__)", "def get_instance(self, name, id):\n cls = self.get_class(name)\n if cls:\n if hasattr(cls, 'objects') and id:\n try:\n return cls.objects.get(id=id)\n except (cls.DoesNotExist, ValueError):\n return None\n return None\n from .models import Role\n try:\n return Role.objects.get(type=name, rid=id)\n except Role.DoesNotExist:\n return None", "def validate_role(name_or_arn):\n paginator = boto3.client('iam').get_paginator('list_roles')\n for page in paginator.paginate():\n for role in page['Roles']:\n if (name_or_arn == role['Arn']) or (name_or_arn == role['RoleName']):\n return role['Arn']\n exit_if_none(None, f\"invalid role name/ARN: {name_or_arn}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign a role to a user.
def assign_role(user, role): return _assign_or_remove_role(user, role, "assign_role_to_user")
[ "async def assign_role(self, ctx, * , role: CustomRoleConverter):\n settable_role = find(lambda r: r.id in self.settable_roles, ctx.guild.roles)\n if role == settable_role and self.lockdown:\n await ctx.send(\"Server on lockdown due to high amount of people joining try again in a day or two\")\n return\n if role.position > settable_role.position:\n if ctx.channel.name != \"have-you-read-the-rules\":\n await ctx.send(\"can't give you that role\")\n return\n try:\n admin_cog = self.bot.get_cog(\"Admin\")\n if admin_cog:\n if admin_cog.mute_role == role:\n return\n member = ctx.message.author\n await member.add_roles(role)\n await ctx.send(f\"Assigned you the following role: {role.name}\")\n except discord.Forbidden as fb:\n await ctx.send(\"Sorry I don't have the permission to give you that role\")", "def set_role(username, security_role, logger, client):\n logger.info('Setting new role for user {0}...'.format(username))\n client.users.set_role(username, security_role)\n logger.info('User deleted')", "def set_role(self, role):\n self.role.set(role)", "def add_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.add(user)", "def change_user_role(self,role, user_id):\n sql = (\n \"\"\"\n UPDATE users SET user_role = '{}' WHERE user_id = '{}'\n \"\"\".format(role, user_id)\n )\n self.cur.execute(sql)\n self.conn.commit()", "def add_user_role(self, user, role, project=None):\n return self.modify_user_role(user, role, project=project,\n operation='add')", "def grant_role(self, user, targetUser, role):\n if self.can_grant_permissions(user):\n if role in self[\"roles\"]:\n self[\"roles\"][role].append(targetUser)\n return True\n return False", "def assignRole(self, request, queryset):\n form = self.__class__.AssignRoleForm(initial={\n 'users': queryset\n })\n return render_to_response('users/admin/assign-role.html', {\n 'form': form,\n 'users': queryset\n }, context_instance=RequestContext(request))", "def create_role(self, user, role):\n if self.can_modify_roles(user) and role not in self[\"roles\"] and role != \"default\":\n self[\"roles\"][role] = []\n return True\n return False", "def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id):\n raise exception.NotImplemented()", "def test_add_role_to_user(self):\n pass", "async def AddLeaderRole(self, user: User):\n await user.add_roles(self.leaderRole)", "def update_role(self, role):\r\n lookup = {\r\n 'admin' : 'org_admin',\r\n 'user' : 'org_user',\r\n 'publisher' : 'org_publisher',\r\n 'view_only' : 'tLST9emLCNfFcejK',\r\n 'viewer' : 'iAAAAAAAAAAAAAAA',\r\n 'viewplusedit' : 'iBBBBBBBBBBBBBBB'\r\n }\r\n\r\n if isinstance(role, Role):\r\n role = role.role_id\r\n elif isinstance(role, str):\r\n if role.lower() in lookup:\r\n role = lookup[role.lower()]\r\n passed = self._portal.update_user_role(self.username, role)\r\n if passed:\r\n self._hydrated = False\r\n self._hydrate()\r\n self.role = role\r\n return passed", "def add_user_role_to_tenant(request, project=None, user=None, role=None,\n group=None, domain=None):\n ksclient = get_admin_ksclient()\n if keystone.VERSIONS.active < 3:\n return ksclient.roles.add_user_role(user, role, project)\n else:\n return ksclient.roles.grant(role, user=user, project=project,\n group=group, domain=domain)", "def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()", "def assign_user(id):\n check_admin()\n\n user = User.query.get_or_404(id)\n\n # prevent admin from being assigned a department or role\n if user.is_admin:\n abort(403)\n\n form = UserAssignForm(obj=user)\n if form.validate_on_submit():\n user.department = form.department.data\n user.role = form.role.data\n db.session.add(user)\n db.session.commit()\n flash('You have successfully assigned a department and role.')\n\n # redirect to the roles page\n return redirect(url_for('admin.list_employees'))\n\n return render_template('admin/users/user.html',\n user=user, form=form,\n title='Assign Employee')", "def role(uid):\n usr = User.query.filter_by(id=uid).first()\n if usr is not None:\n form = RoleChangeForm(request.form)\n form.role.choices = [(r.name, r.description) for r in Role]\n if form.validate_on_submit():\n usr.role = Role.from_string(form.role.data)\n g.db.commit()\n return redirect(url_for('.users'))\n form.role.data = usr.role.name\n return {\n 'form': form,\n 'view_user': usr\n }\n g.log.debug(f'user with id: {uid} not found!')\n abort(404)", "def update_user_role(self, user_uid: Union[str, UUID], role: ROLES, actions: ACTIONS = []):\n self.session.checked_post(self._path() + \"/users/{}\".format(user_uid),\n {'role': role, 'actions': actions})\n return True", "def set_role(self, role):\n self.role = role\n for i, _var_ in enumerate(self.variants):\n self.variants[i].role = role" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove a role from a user.
def remove_role(user, role): return _assign_or_remove_role(user, role, "remove_role_from_user")
[ "def removeRole(self, role):\n pass", "def remove_role(self, user, targetUser, role):\n if self.can_grant_permissions(user):\n if role in self[\"roles\"]:\n if targetUser in self[\"roles\"][role]:\n self[\"roles\"][role].remove(targetUser)\n return True\n return False", "def remove_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.remove(user)", "def delete_role_from_user(self, role, user):\r\n uri = \"users/%s/roles/OS-KSADM/%s\" % (utils.get_id(user),\r\n utils.get_id(role))\r\n resp, resp_body = self.method_delete(uri)", "def delete_role(self, user, role):\n if self.can_modify_roles(user) and role in self[\"roles\"] and role != \"default\":\n for permission in Permissions.ROLE_PERMISSIONS:\n if role in self[\"general\"][permission]:\n self[\"general\"][permission].remove(role)\n for path in self[\"files\"]:\n if role in self[\"files\"][path][\"roles_write\"]:\n self[\"files\"][path][\"roles_write\"].remove(role)\n del self[\"roles\"][role]\n return True\n return False", "def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id):\n raise exception.NotImplemented()", "def test_remove_role_from_user(self):\n pass", "def revoke(session: Session, username: str, role: Role):\n if not username or not role:\n raise ValueError('A username and a role name are required.')\n user_role: Optional[UserRole] = UserRoles.find_role(session, username, role)\n if user_role is None:\n return\n try:\n session.delete(user_role)\n session.commit()\n except:\n session.rollback()\n raise", "async def RemoveLeaderRole(self, user: User):\n await user.remove_roles(self.leaderRole)", "async def _remove(self, ctx: commands.Context, user: discord.Member, role: discord.Role):\n async with self.config.member(user).temp_roles() as user_tr:\n if not (user_tr.get(str(role.id))):\n return await ctx.send(\n f\"That is not an active TempRole for {user.mention}.\",\n allowed_mentions=discord.AllowedMentions.none()\n )\n del user_tr[str(role.id)]\n message = f\"TempRole {role.mention} for {user.mention} has been removed.\"\n await ctx.send(\n message,\n allowed_mentions=discord.AllowedMentions.none()\n )\n await self._maybe_send_log(ctx.guild, message)\n await self._tr_end(user, role, admin=ctx.author)", "def remove_role(principal, role):\n try:\n if isinstance(principal, User):\n ppr = PrincipalRoleRelation.objects.get(\n user=principal, role=role, content_id=None, content_type=None)\n else:\n ppr = PrincipalRoleRelation.objects.get(\n group=principal, role=role, content_id=None, content_type=None)\n\n except PrincipalRoleRelation.DoesNotExist:\n return False\n else:\n ppr.delete()\n\n return True", "def test_remove_user_role(self):\n pass", "async def removerole(self, ctx, role:discord.Role, *users:discord.User):\n if ctx.message.server.me.permissions_in(ctx.message.channel).manage_roles == False:\n await self.bot.say(\"Sorry, I do not have the manage_roles permission\\n**Aborting**\")\n return\n if len(users) == 0:\n await self.bot.say(\"You need to add a person to remove the role from!\")\n idk = []\n for user in users:\n await self.bot.remove_roles(user, role)\n idk.append(user.name)\n await self.bot.say(\"ok, removed the role {0} from user(s) `{1}`\".format(role.name, ', '.join(idk)))", "def remove_user_role_frm_tenant(request, project=None, user=None, role=None,\n group=None, domain=None):\n ksclient = get_admin_ksclient()\n if keystone.VERSIONS.active < 3:\n return ksclient.roles.remove_user_role(user, role, project)\n else:\n return ksclient.roles.revoke(role, user=user, project=project,\n group=group, domain=domain)", "def delete_user_role(self, uid, rolename, meeting): \n if meeting !=\"\":\n status, resp = self.ucengine.request('DELETE',\n 'user/%s/roles/%s/%s' % (uid, rolename, meeting), \n params = {'uid':self.uid, 'sid': self.sid}\n )\n else:\n status, resp = self.ucengine.request('DELETE',\n 'user/%s/roles/%s' % (uid, rolename),\n params = {'uid':self.uid, 'sid': self.sid}\n )\n if status != 200:\n raise UCError(status, resp)", "async def RemoveStaticRole(self, user: User): \n await user.remove_roles(self.memberRole)", "async def remove(self, ctx, *, role_name):\n found_role = None\n for role in ctx.guild.roles:\n if role.name.lower() == role_name.lower():\n found_role = role\n if found_role:\n try:\n success = await \\\n self.bot.pg_utils.remove_autoassign_role(\n ctx.guild.id, found_role.id, self.bot.logger)\n except ValueError:\n local_embed = discord.Embed(\n title=f'{found_role.name} is already'\n ' not on the auto-assignable list',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n return\n if success:\n local_embed = discord.Embed(\n title=f'Removed {found_role.name} '\n 'from auto-assignable roles',\n description=' ',\n color=0x419400\n )\n else:\n local_embed = discord.Embed(\n title=f'Internal error occured,'\n ' please contact @dashwav#7785',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n else:\n local_embed = discord.Embed(\n title=f'Couldn\\'t find role {role_name}',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)", "async def remove(self, ctx, *, role: discord.Role):\r\n\t\tdata = json_mngr().read('./data/settings.json')\r\n\t\tif str(ctx.guild.id) not in data.keys():\r\n\t\t\tdata[str(ctx.guild.id)] = {\r\n\t\t\t\t\"edit_roles\": [],\r\n\t\t\t\t\"view_roles\": [],\r\n\t\t\t\t\"log_channel\": None\r\n\t\t\t}\r\n\t\tif role:\r\n\t\t\tif role.id in data[str(ctx.guild.id)]['edit_roles']:\r\n\t\t\t\tdata[str(ctx.guild.id)]['edit_roles'].remove(role.id)\r\n\t\t\tjson_mngr().handle_modify('./data/settings.json', newdata=data, indent=2, backup=True)\r\n\t\t\tawait ctx.send(f\"removed {role.id} as an editing role.\")", "async def remove_course_role(self, role_id):\n role = self.bot.get_guild(self.guild_id).get_role(role_id)\n if role is None:\n return logger.error(\"role is empty.\")\n\n await role.delete()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove all roles from a user.
def clear_roles(user): roles = get_user_roles(user) for role in roles: role.remove_role_from_user(user) return roles
[ "def delete_all_roles(self):\n if not self.user_roles:\n raise UserNotHasRole\n\n for user_role in self.user_roles:\n user_role.delete(commit=True)\n db.session.commit()", "def remove_users(self, *users):\n entries = CourseAccessRole.objects.filter(\n user__in=users, role=self._role_name, org=self.org, course_id=self.course_key\n )\n entries.delete()\n for user in users:\n if hasattr(user, '_roles'):\n del user._roles", "def remove_roles(self):\n minion_sets = []\n role_sets = []\n for instance in self.instances:\n minion = instance.get('minion')\n roles = set(minion.roles or [])\n for role in instance.get('roles', []):\n if role in roles:\n roles.remove(role)\n roles = list(roles)\n role_sets.append(roles)\n minion_sets.append([minion])\n self.client.set_roles(minion_sets, role_sets, timeout=30)", "async def removerole(self, ctx, role:discord.Role, *users:discord.User):\n if ctx.message.server.me.permissions_in(ctx.message.channel).manage_roles == False:\n await self.bot.say(\"Sorry, I do not have the manage_roles permission\\n**Aborting**\")\n return\n if len(users) == 0:\n await self.bot.say(\"You need to add a person to remove the role from!\")\n idk = []\n for user in users:\n await self.bot.remove_roles(user, role)\n idk.append(user.name)\n await self.bot.say(\"ok, removed the role {0} from user(s) `{1}`\".format(role.name, ', '.join(idk)))", "async def RemoveStaticRole(self, user: User): \n await user.remove_roles(self.memberRole)", "def delete_role(self, user, role):\n if self.can_modify_roles(user) and role in self[\"roles\"] and role != \"default\":\n for permission in Permissions.ROLE_PERMISSIONS:\n if role in self[\"general\"][permission]:\n self[\"general\"][permission].remove(role)\n for path in self[\"files\"]:\n if role in self[\"files\"][path][\"roles_write\"]:\n self[\"files\"][path][\"roles_write\"].remove(role)\n del self[\"roles\"][role]\n return True\n return False", "def reset_roles(self, new_roles):\n self.roles = new_roles", "def test_remove_role_from_user(self):\n pass", "def desasignar_roles(self, *args, **kw):\n if kw:\n pks = []\n for k, pk in kw.items():\n if not k.isalnum():\n continue\n pks.append(int(pk))\n transaction.begin()\n id_user = UrlParser.parse_id(request.url, \"miembrosfase\")\n user = Usuario.por_id(id_user)\n c = 0\n while c < len(user.roles):\n r = user.roles[c]\n if r.id_rol in pks:\n if r.nombre_rol == \"Miembro de Fase\":\n msg = \"No puedes eliminar el rol {nr}. Si deseas \"\n msg += \"que el usuario deje de ser miembro, debes \"\n msg += \"hacerlo en la pagina de Miembros de la Fase.\"\n flash(msg.format(nr=r.nombre_rol), \"warning\")\n DBSession.rollback()\n return \"./\"\n del user.roles[c]\n else:\n c += 1\n transaction.commit()\n flash(\"Roles Desasignados correctamente\")\n else:\n flash(\"Seleccione por lo menos un rol\", \"warning\")\n return \"./\"", "async def RemoveLeaderRole(self, user: User):\n await user.remove_roles(self.leaderRole)", "def test_remove_user_role(self):\n pass", "def desasignar_roles(self, *args, **kw):\n if kw:\n pks = []\n for k, pk in kw.items():\n if not k.isalnum():\n continue\n pks.append(int(pk))\n transaction.begin()\n id_user = UrlParser.parse_id(request.url, \"miembrostipo\")\n user = Usuario.por_id(id_user)\n c = 0\n while c < len(user.roles):\n r = user.roles[c]\n if r.id_rol in pks:\n if r.nombre_rol == \"Miembro de Tipo Item\":\n msg = \"No puedes eliminar el rol {nr}. Si deseas \"\n msg += \"que el usuario deje de ser miembro, debes \"\n msg += \"hacerlo en la pagina de Miembros para este \"\n msg += \"tipo de item.\"\n flash(msg.format(nr=r.nombre_rol), \"warning\")\n DBSession.rollback()\n return \"./\"\n del user.roles[c]\n else:\n c += 1\n transaction.commit()\n flash(\"Roles Desasignados correctamente\")\n else:\n flash(\"Seleccione por lo menos un rol\", \"warning\")\n return \"./\"", "def remove_all_users(self):\n while True:\n if not self._delete_all_user():\n break", "async def remove(self, ctx: commands.Context, *roles: typing.Union[discord.Role, int]):\n data = self.search(ctx.guild.id)\n\n if not data:\n await ctx.send(\"Join role system is not setup\")\n return\n\n removes = \"\"\n fails = \"\"\n for i in roles:\n num = i.id if isinstance(i, discord.Role) else i\n try:\n data.data.remove(num)\n except ValueError:\n fails += f\"<@&{num}>\\n\"\n else:\n removes += f\"<@&{num}>\\n\"\n\n self.db.update_one({\"guild_id\": ctx.guild.id}, {\"$set\": {\"role_array\", data.data}})\n\n embed = discord.Embed(\n title=\"Updated roles in the join role system\",\n colour=0xe74c3c\n )\n embed.add_field(name=\"Removed roles\", value=\"None\" if removes == \"\" else removes, inline=False)\n embed.add_field(name=\"Failed to remove\", value=\"None\" if fails == \"\" else fails, inline=False)\n await ctx.send(embed=embed)", "def removeAllUsers(self):\n self._userStore.removeAllUsers()\n self._movieRoomUserListStore.clear()\n self._mainRoomUserListStore.clear()", "def destroy_all_by_user(self, context, project_id, user_id):\n self._driver.destroy_all_by_user(context, user_id)", "def remove_role(user, role):\n return _assign_or_remove_role(user, role, \"remove_role_from_user\")", "def removeRole(self, role):\n pass", "async def RemoveBasicTag(self, user:User):\n await user.remove_roles(self.basicRole)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add files to the output list which tells ARC CE which files to upload
def create_output_list(files, init_dir, ddmconf): if not ddmconf: raise PilotException("copy_out() failed to resolve ddmconf from function arguments", code=ErrorCodes.STAGEOUTFAILED, state='COPY_ERROR') for fspec in files: arcturl = fspec.turl if arcturl.startswith('s3://'): # Use Rucio proxy to upload to OS arcturl = re.sub(r'^s3', 's3+rucio', arcturl) # Add failureallowed option so failed upload does not fail job rucio = 'rucio://rucio-lb-prod.cern.ch;failureallowed=yes/objectstores' rse = fspec.ddmendpoint activity = 'write' arcturl = '/'.join([rucio, arcturl, rse, activity]) else: # Add ARC options to TURL checksumtype, checksum = list(fspec.checksum.items())[0] # Python 2/3 # resolve token value from fspec.ddmendpoint token = ddmconf.get(fspec.ddmendpoint).token if not token: logger.info('No space token info for %s', fspec.ddmendpoint) else: arcturl = re.sub(r'((:\d+)/)', r'\2;autodir=no;spacetoken=%s/' % token, arcturl) arcturl += ':checksumtype=%s:checksumvalue=%s' % (checksumtype, checksum) logger.info('Adding to output.list: %s %s', fspec.lfn, arcturl) # Write output.list with open(os.path.join(init_dir, 'output.list'), 'a') as f: f.write('%s %s\n' % (fspec.lfn, arcturl))
[ "def write_files(self, outpath=None):\n files = '\\n'.join(self.files + [''])\n if outpath:\n with open(outpath, 'w') as outfile:\n outfile.write(files)\n print(outpath)\n else:\n print(files)", "def inputFiles(self):\n pass", "def on_added_handler(file_list):\n print_file_list(file_list, \"Added\")\n for f in file_list:\n logger.info('Sending file \"{0}\"...'.format(f));\n send_file(f)", "def get_files(self, filelist, dest, progtrack, version, header=None, pub=None):\n\n raise NotImplementedError", "def get_files_to_generate(self):\r\n pass", "def getCandidateFiles(self, outputList, outputLFNs, fileMask):\n fileInfo = {}\n for outputFile in outputList:\n if outputFile.has_key('outputFile') and outputFile.has_key('outputDataSE') and outputFile.has_key('outputPath'):\n fname = outputFile['outputFile']\n fileSE = outputFile['outputDataSE']\n filePath = outputFile['outputPath']\n fileInfo[fname] = {'path' : filePath, 'workflowSE' : fileSE}\n else:\n self.log.error('Ignoring malformed output data specification', str(outputFile))\n\n for lfn in outputLFNs:\n if os.path.basename(lfn) in fileInfo.keys():\n fileInfo[os.path.basename(lfn)]['lfn']=lfn\n self.log.verbose('Found LFN %s for file %s' %(lfn, os.path.basename(lfn)))\n if len(os.path.basename(lfn))>127:\n self.log.error('Your file name is WAAAY too long for the FileCatalog. Cannot proceed to upload.')\n return S_ERROR('Filename too long')\n if len(lfn)>256+127:\n self.log.error('Your LFN is WAAAAY too long for the FileCatalog. Cannot proceed to upload.')\n return S_ERROR('LFN too long')\n \n #Check that the list of output files were produced\n for fileName, metadata in fileInfo.items():\n if not os.path.exists(fileName):\n self.log.error('Output data file %s does not exist locally' % fileName)\n if not self.ignoreapperrors:\n return S_ERROR('Output Data Not Found')\n del fileInfo[fileName]\n #Check the list of files against the output file mask (if it exists)\n #candidateFiles = {}\n #if fileMask:\n ##nothing to do yet, as FileMask is not used\n #for fileName,metadata in fileInfo.items():\n # if metadata['type'].lower() in fileMask or fileName.split('.')[-1] in fileMask:\n # candidateFiles[fileName]=metadata\n # else:\n # self.log.info('Output file %s was produced but will not be treated (outputDataFileMask is %s)' %(fileName,\n # string.join(self.outputDataFileMask,', ')))\n\n #if not candidateFiles.keys():\n # return S_OK({}) #nothing to do\n # candidateFiles = fileInfo\n #else:\n #do not apply mask to files\n \n candidateFiles = fileInfo\n #Sanity check all final candidate metadata keys are present (return S_ERROR if not)\n mandatoryKeys = ['path', 'workflowSE', 'lfn'] #filedict is used for requests\n for fileName, metadata in candidateFiles.items():\n for key in mandatoryKeys:\n if not metadata.has_key(key):\n return S_ERROR('File %s has missing %s' % (fileName, key))\n \n return S_OK(candidateFiles)", "def record_processed_filenames(self):\n with open(os.path.join(self.cfg.output_preprocess,\n \"processed_filenames.txt\"), \"a\") as in_file:\n for fic in self.processed_filenames:\n in_file.write(fic + \"\\n\")", "def incrementOutputFiles(self):\n self.closeOutputFiles()\n \n self.output_file_count+=1\n \n self.createOutputFiles(self.output_tag)", "def update_files():\r\n set_to_file(Crawler.queue, Crawler.queueFile)\r\n set_to_file(Crawler.crawled, Crawler.crawledFile)\r\n external_to_file(Crawler.external, Crawler.externalFile)", "def push_files(self) -> Sequence['outputs.IosDeviceFileResponse']:\n return pulumi.get(self, \"push_files\")", "def get_files_to_deploy(self) -> List[FileToDeploy]:", "def files_to_push(self) -> Sequence['outputs.DeviceFileResponse']:\n return pulumi.get(self, \"files_to_push\")", "def upload(self, filenames):\n\n print(\"I am going to upload the following files\", filenames)\n\n for f in filenames:\n print(\"uploading\", f)\n self.filenames = args.filenames\n payload = {\n 'email': self.email,\n 'title': os.path.basename(f)\n }\n files = {'file': open(f, 'rb')}\n r = requests.post(\"http://logs.uaventure.com/upload\",\n data=payload, files=files)\n\n if r.status_code == requests.codes.ok:\n print(\"uploaded\", f)\n else:\n print(\"error while uploading\", f, \"status code:\", r.status_code)\n print(\"Dumping response:\\n\", r.raw)\n\n if self.verbose:\n print(r.text)\n\n time.sleep(1)", "def write_files(self, line_list):\n\n line_list.extend([\n \"\",\n \"#\",\n \"# Object files to work with for the library\",\n \"#\",\n \"\"\n ])\n\n obj_list = []\n if self.solution.project_list:\n codefiles = self.solution.project_list[0].codefiles\n\n for item in codefiles:\n if item.type in (FileTypes.c, FileTypes.cpp, FileTypes.x86):\n\n tempfile = convert_to_linux_slashes(\n item.relative_pathname)\n index = tempfile.rfind(\".\")\n if index == -1:\n entry = tempfile\n else:\n entry = tempfile[:index]\n\n index = entry.rfind(\"/\")\n if index != -1:\n entry = entry[index + 1:]\n\n obj_list.append(entry)\n\n if obj_list:\n colon = \"OBJS= \"\n for item in sorted(obj_list):\n line_list.append(colon + \"$(A)/\" + item + \".obj &\")\n colon = \"\\t\"\n # Remove the \" &\" from the last line\n line_list[-1] = line_list[-1][:-2]\n\n else:\n line_list.append(\"OBJS=\")\n return 0", "def add_list_to_basket(self, file_list):\n for file_name in file_list:\n self.click_add_to_basket_icon(file_name)", "def fileinputlist(logtype,target_day):\n exchanges = ['adbrite','admeld','adnexus','casale','contextweb',\n 'id','nexage','openx','rtb','rubicon','yahoo'] \n base_hdfs = '/tmp/log_sync/'+logtype+'/'+target_day+'/'\n outfilename = log_type+'-'+target_day+'-s3_locations.txt' \n for exchange in exchanges:\n output = open(outfilename,'w')\n print >> output, 'bid_all',exchange,target_day \n output.close()\n hdfs_target = base_hdfs+target_day+'/'+outfilename\n os.system('hadoop fs -put '+outfilename+' '+hdfs_target)\n os.system('rm '+outfilename)", "def list_files() -> dict:\n endpoint_url = '/real-time-response/entities/put-files/v1'\n response = http_request('GET', endpoint_url)\n return response", "def append(self, urls, outfiles, post_request=True):\n assert len(urls) == len(outfiles)\n self.urls += urls\n self.outfiles += outfiles\n _LOGGER.debug('append %d => %d',\n len(urls), len(self.urls))", "def list_patch_files(self, resourcelist, max_files=-1):\n rdf_out_files = sorted(glob(os.path.join(self.resource_dir, PATTERN_RDF_OUT + \"*\")))\n if len(rdf_out_files) > 0:\n rdf_out_files.pop() # remove last from list\n n = 0\n for file in rdf_out_files:\n filename = os.path.basename(file)\n timestamp = self.extract_timestamp(file)\n length = os.stat(file).st_size\n md5 = compute_md5_for_file(file)\n resourcelist.add(\n Resource(self.publish_url + filename, md5=md5, length=length, lastmod=timestamp, path=file))\n n += 1\n if 0 < max_files == n:\n break\n\n exhausted = len(rdf_out_files) == n\n return exhausted" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Da un nuevo contorno donde sus elementos son mas grandes que dim_min_contuor
def filtro_contornos(contuors, dim_min_contuor, dim_max_contuor): new_contuors = [] for contuor in contuors: if len(contuor) > dim_min_contuor and len(contuor) < dim_max_contuor: new_contuors.append(contuor) return new_contuors
[ "def consistance_noeuds(self):\n\n for c in self.contraintes:\n if c.dimension() == 1:\n # /!\\ iterer sur domaine[:], sinon on ne peut pas supprimer d'elements\n for v in c.variables[0].domaine[:]:\n if not c.est_valide(v):\n c.variables[0].domaine.remove(v)\n c.variables[0].label.remove(v)", "def Dimension(self):\r\n global dimension\r\n fenetreDim = Toplevel()\r\n fenetreDim.transient(self)\r\n fenetreDim.title('Quelle dimension ? ')\r\n dim = IntVar()\r\n dim.set(self.__dimension)\r\n def ajout(i):\r\n \"\"\" aux qui incrémente la dimension \"\"\"\r\n dim.set(dim.get()+i)\r\n \r\n for k in [-10,-1]:\r\n Button(fenetreDim,\r\n text=str(k),\r\n command=lambda i=k : ajout(i)).pack(side=LEFT)\r\n Label(fenetreDim,textvariable = dim).pack(side=LEFT,padx=20)\r\n for k in [1,10]:\r\n Button(fenetreDim,\r\n text='+'+str(k),\r\n command=lambda i=k : ajout(i)).pack(side=LEFT)\r\n \r\n def repondre():\r\n \"\"\" aux qui confirme la dimension \"\"\"\r\n global dimension\r\n self.__dimension = dim.get()\r\n dimension = self.get_dimension()\r\n self.NouvellePartie()\r\n fenetreDim.destroy()\r\n \r\n Button(fenetreDim,text='OK',command=repondre).pack(side=LEFT,padx=20)", "def minimum_rows(boxes, minimum):\n pass", "def addMines(self, numberOfMines):\n\t\twhile numberOfMines > 0 :\n\t\t\trow_num = random.randint(0, self.configuration.rows - 1)\n\t\t\tcol_num = random.randint(0, self.configuration.cols - 1)\n\t\t\tif self.elements[row_num][col_num].content == EMPTY:\t\t\t\t\n\t\t\t\tself.elements[row_num][col_num].content = BOMB\n\t\t\t\tnumberOfMines -= 1", "def ganador_vuelta(ciclista,tiempo,):\r\n tiempos = []\r\n for i in range(0, len(tiempo)):\r\n suma = 0\r\n for j in range(0, len(tiempo[i])):\r\n suma = suma + tiempo[i][j]\r\n tiempos.append(suma) \r\n minimo = None\r\n for i in range(0,len(tiempos)):\r\n if minimo is None or tiempos[i] < minimo:\r\n minimo = tiempos[i]\r\n quien = i \r\n return ciclista[quien]", "def getMinSize(self):\n minW = minH = 0 # Let's see if we need bigger than this.\n for e in self.elements:\n eMinW, eMinH = e.getMinSize()\n minW = max(minW, eMinW)\n minH += eMinH\n return minW, minH", "def compress(self):\n self.nodes = numpy.zeros([self.bounds[0] / 10 + 10, self.bounds[1] / 10 + 10], dtype='uint8')\n\n for row_index, row in enumerate(self.nodes):\n for node_index, node in enumerate(row):\n begin_x = node_index * 10\n begin_y = row_index * 10\n if numpy.count_nonzero(self.grid[begin_y:begin_y + 10, begin_x:begin_x + 10]): # temp fix by adding 10 nodes of wiggle room\n self.nodes[node_index][row_index] = 1", "def llenarTablero(self):\n y=0 \n for miniterminos in self.renglonesHoja:\n for mini in miniterminos.miniterminosSimpli:\n x=self.indices[mini]\n self.columnas[y][x]=1\n y=y+1", "def minObjects(*args, **kwargs):\n \n pass", "def expandeaza(self):\r\n succesori = []\r\n nod_graf_curent = self.nod_graf.info\r\n matrice = NodParcurgere.problema.matrice_clasa\r\n\r\n for (i, j) in [(nod_graf_curent[0], nod_graf_curent[1] - 1),\r\n (nod_graf_curent[0], nod_graf_curent[1] + 1),\r\n (nod_graf_curent[0] - 1, nod_graf_curent[1]),\r\n (nod_graf_curent[0] + 1, nod_graf_curent[1])]: # parcurge lista celor 4 posibili succesori ai nodului curent (self)\r\n if 0 <= i < len(matrice) and 0 <= j < len(matrice[0]):\r\n if matrice[i][j] != \"liber\": # verifica daca pozitia succesorului este ocupata de vreun elev\r\n if ((matrice[i][j], matrice[nod_graf_curent[0]][nod_graf_curent[1]]) not in NodParcurgere.problema.lista_suparati) and ((matrice[nod_graf_curent[0]][nod_graf_curent[1]], matrice[i][j]) not in NodParcurgere.problema.lista_suparati): # verifica daca elevul reprezentand nodul curent si cu elevul ce reprezinta posibilul succesor nu sunt certati\r\n if i in [len(matrice) - 1, len(matrice) - 2]: # verfica daca succesorul se afla pe ultimele doua linii\r\n nod_info = (i, j)\r\n if i == nod_graf_curent[0] + 1:\r\n directie = \"v\"\r\n elif i == nod_graf_curent[0] - 1:\r\n directie = \"^\"\r\n elif j == nod_graf_curent[1] + 1:\r\n if j % 2 == 1:\r\n directie = \">\"\r\n else:\r\n directie = \">>\"\r\n else:\r\n if j % 2 == 0:\r\n directie = \"<\"\r\n else:\r\n directie = \"<<\"\r\n succesori.append((Nod(nod_info, self.fct_h(nod_info)), 1, directie))\r\n\r\n elif not ((j == nod_graf_curent[1] + 1 and j % 2 == 0) or (j == nod_graf_curent[1] - 1 and j % 2 == 1)): # in acest caz succesorul nu se afla pe ultimele doua linii\r\n if i == nod_graf_curent[0] + 1:\r\n directie = \"v\"\r\n elif i == nod_graf_curent[0] - 1:\r\n directie = \"^\"\r\n elif j == nod_graf_curent[1] + 1:\r\n directie = \">\"\r\n else:\r\n directie = \"<\"\r\n nod_info = (i, j)\r\n succesori.append((Nod(nod_info, self.fct_h(nod_info)), 1, directie))\r\n\r\n return succesori", "def remove_small_short_objects(objects_id,\n area_objects,\n min_area,\n min_time,DT):\n\n #create final object array\n sel_objects = np.zeros(objects_id.shape,dtype=int)\n\n new_obj_id = 1\n for obj,_ in enumerate(area_objects):\n AreaTest = np.max(\n np.convolve(\n np.array(area_objects[obj]) >= min_area * 1000**2,\n np.ones(int(min_time/ DT)),\n mode=\"valid\",\n )\n )\n if (AreaTest == int(min_time/ DT)) & (\n len(area_objects[obj]) >= int(min_time/ DT)\n ):\n sel_objects[objects_id == (obj + 1)] = new_obj_id\n new_obj_id += 1\n\n return sel_objects", "def revisar(self):\n cambios = 0\n # verifico valores posibles únicos en el grupo\n for celda1 in self.celdas:\n if celda1.vacia():\n for valor in celda1.posible:\n cantidad = self.incluye([valor])\n if cantidad == 1:\n # mensaje(celda1,valor,\"Asumiendo por \" + self.tipo)\n celda1.setvalor(valor)\n cambios += 1\n\n # verifico combinaciones de N valores que se repiten en N celdas\n for celda in self.celdas:\n # recorro las combinaciones de distintas longitudes a partir de 2\n for largo in range(1, len(celda.posible)):\n for comb in combinations(celda.posible, largo):\n cantidad = self.incluye(comb)\n # si la cantidad es exactamente la longitud\n if cantidad == largo and largo == len(comb):\n cantidad_unitaria = self.incluye_unit(comb)\n # si no hay celdas que cumplan\n if cantidad_unitaria == 0:\n cambios += self.asignar(comb)\n return cambios", "def LayoutBoundsMinX(self) -> float:", "def set_minimum_weight(self):\n for i in range(len(self.particle_cloud)):\n if math.isnan(self.particle_cloud[i].w):\n self.particle_cloud[i].w = self.minimum_weight", "def aggiornamicro(self):\r\n self.Visualizzamicro.delete(1.0, END)\r\n stringa = self.CD.microistruzioni\r\n self.Visualizzamicro.insert(INSERT, stringa)\r\n self.Visualizzamicro.see(END)", "def euristica_admisibila_1(self, infoNod, tip_euristica):\n estimari = [self.diferenta_stari_1(infoNod, stare_finala) for stare_finala in self.scopuri]\n return min(estimari)", "def LayoutMarginMinX(self) -> float:", "def k_11():\n return np.ones((1, elements_layout[0] * elements_layout[1]))", "def expandeaza(self):\n succ = []\n nod_c = self.nod_graf\n arce = self.problema.arce\n for a in arce:\n if a.capat == nod_c.info:\n succ.append((problema.cauta_nod_nume(a.varf), a.cost))\n\n return succ" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the minified version of a template.
def minify(self): return htmlmin.minify(self.render().decode('utf8'), remove_comments=True, remove_empty_space=True)
[ "def get_source_template():", "def minify_js(file_path, src):\n in_str = StringIO(src)\n out_str = StringIO()\n JavascriptMinify().minify(in_str, out_str)\n src = out_str.getvalue()\n in_str.close()\n out_str.close()\n return src", "def minify_code(file_path) -> str:\n # Open the file and read it's content.\n with open(file_path, 'r') as f:\n source = f.read()\n\n # Get tokens from file.\n tokens = token_utils.listified_tokenizer(source)\n # Minify the file content based on the tokens\n minified = minification.minify(tokens, PyminiferOptions())\n # Recompute tokens from minified version.\n tokens = token_utils.listified_tokenizer(minified)\n # Final result on file minified.\n result = token_utils.untokenize(tokens)\n\n return result", "def Minify(source, filename):\n file_type = path.splitext(filename)[1]\n minifier = None\n if file_type == '.js':\n minifier = __js_minifier\n elif file_type == '.css':\n minifier = __css_minifier\n if not minifier:\n return source\n p = subprocess.Popen(\n minifier,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (stdout, stderr) = p.communicate(source)\n if p.returncode != 0:\n print('Minification failed for %s' % filename)\n print(stderr)\n sys.exit(p.returncode)\n return stdout", "def materialize(template, substitutions, outfile=None):\n materialized_str = template\n for param, val in substitutions.items():\n materialized_str = re.sub(param, val, materialized_str)\n\n if outfile:\n with open(outfile, \"w\") as of:\n of.write(materialized_str)\n\n return materialized_str", "def _make_jsmin(python_only=False):\n # pylint: disable = unused-variable\n # pylint: disable = too-many-locals\n\n if not python_only:\n try:\n import _rjsmin\n except ImportError:\n pass\n else:\n return _rjsmin.jsmin\n try:\n xrange\n except NameError:\n xrange = range # pylint: disable = redefined-builtin\n\n space_chars = r'[\\000-\\011\\013\\014\\016-\\040]'\n\n line_comment = r'(?://[^\\r\\n]*)'\n space_comment = r'(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)'\n space_comment_nobang = r'(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/)'\n bang_comment = r'(?:/\\*![^*]*\\*+(?:[^/*][^*]*\\*+)*/)'\n\n string1 = \\\n r'(?:\\047[^\\047\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]|\\r?\\n|\\r)[^\\047\\\\\\r\\n]*)*\\047)'\n string2 = r'(?:\"[^\"\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]|\\r?\\n|\\r)[^\"\\\\\\r\\n]*)*\")'\n string3 = r'(?:`(?:[^`\\\\]|\\\\.)*`)'\n strings = r'(?:%s|%s|%s)' % (string1, string2, string3)\n\n charclass = r'(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\])'\n nospecial = r'[^/\\\\\\[\\r\\n]'\n regex = r'(?:/(?![\\r\\n/*])%s*(?:(?:\\\\[^\\r\\n]|%s)%s*)*/)' % (\n nospecial, charclass, nospecial)\n space = r'(?:%s|%s)' % (space_chars, space_comment)\n newline = r'(?:%s?[\\r\\n])' % line_comment\n\n def fix_charclass(result):\n \"\"\" Fixup string of chars to fit into a regex char class \"\"\"\n pos = result.find('-')\n if pos >= 0:\n result = r'%s%s-' % (result[:pos], result[pos + 1:])\n\n def sequentize(string):\n \"\"\"\n Notate consecutive characters as sequence\n\n (1-4 instead of 1234)\n \"\"\"\n first, last, result = None, None, []\n for char in map(ord, string):\n if last is None:\n first = last = char\n elif last + 1 == char:\n last = char\n else:\n result.append((first, last))\n first = last = char\n if last is not None:\n result.append((first, last))\n return ''.join([\n '%s%s%s' % (chr(first), last > first + 1 and '-'\n or '', last != first and chr(last) or '')\n for first, last in result\n ]) # noqa\n\n return _re.sub(\n r'([\\000-\\040\\047])', # \\047 for better portability\n lambda m: '\\\\%03o' % ord(m.group(1)),\n (sequentize(result).replace('\\\\', '\\\\\\\\').replace(\n '[', '\\\\[').replace(']', '\\\\]')))\n\n def id_literal_(what):\n \"\"\" Make id_literal like char class \"\"\"\n match = _re.compile(what).match\n result = ''.join([chr(c) for c in xrange(127) if not match(chr(c))])\n return '[^%s]' % fix_charclass(result)\n\n def not_id_literal_(keep):\n \"\"\" Make negated id_literal like char class \"\"\"\n match = _re.compile(id_literal_(keep)).match\n result = ''.join([chr(c) for c in xrange(127) if not match(chr(c))])\n return r'[%s]' % fix_charclass(result)\n\n not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')\n preregex1 = r'[(,=:\\[!&|?{};\\r\\n]'\n preregex2 = r'%(not_id_literal)sreturn' % locals()\n\n id_literal = id_literal_(r'[a-zA-Z0-9_$]')\n id_literal_open = id_literal_(r'[a-zA-Z0-9_${\\[(!+-]')\n id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\\])\"\\047+-]')\n post_regex_off = id_literal_(r'[^\\000-\\040}\\])?:|,;.&=+-]')\n\n dull = r'[^\\047\"`/\\000-\\040]'\n\n space_sub_simple = _re.compile((\n # noqa pylint: disable = bad-continuation\n r'(%(dull)s+)' # 0\n r'|(%(strings)s%(dull)s*)' # 1\n r'|(?<=%(preregex1)s)'\n r'%(space)s*(?:%(newline)s%(space)s*)*'\n r'(%(regex)s)' # 2\n r'(%(space)s*(?:%(newline)s%(space)s*)+' # 3\n r'(?=%(post_regex_off)s))?'\n r'|(?<=%(preregex2)s)'\n r'%(space)s*(?:(%(newline)s)%(space)s*)*' # 4\n r'(%(regex)s)' # 5\n r'(%(space)s*(?:%(newline)s%(space)s*)+' # 6\n r'(?=%(post_regex_off)s))?'\n r'|(?<=%(id_literal_close)s)'\n r'%(space)s*(?:(%(newline)s)%(space)s*)+' # 7\n r'(?=%(id_literal_open)s)'\n r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)' # 8\n r'|(?<=\\+)(%(space)s)+(?=\\+)' # 9\n r'|(?<=-)(%(space)s)+(?=-)' # 10\n r'|%(space)s+'\n r'|(?:%(newline)s%(space)s*)+') % locals()).sub\n\n # print space_sub_simple.__self__.pattern\n\n def space_subber_simple(match):\n \"\"\" Substitution callback \"\"\"\n # pylint: disable = too-many-return-statements\n\n groups = match.groups()\n if groups[0]:\n return groups[0]\n elif groups[1]:\n return groups[1]\n elif groups[2]:\n if groups[3]:\n return groups[2] + '\\n'\n return groups[2]\n elif groups[5]:\n return \"%s%s%s\" % (\n groups[4] and '\\n' or '',\n groups[5],\n groups[6] and '\\n' or '',\n )\n elif groups[7]:\n return '\\n'\n elif groups[8] or groups[9] or groups[10]:\n return ' '\n else:\n return ''\n\n space_sub_banged = _re.compile((\n # noqa pylint: disable = bad-continuation\n r'(%(dull)s+)' # 0\n r'|(%(strings)s%(dull)s*)' # 1\n r'|(?<=%(preregex1)s)'\n r'(%(space)s*(?:%(newline)s%(space)s*)*)' # 2\n r'(%(regex)s)' # 3\n r'(%(space)s*(?:%(newline)s%(space)s*)+' # 4\n r'(?=%(post_regex_off)s))?'\n r'|(?<=%(preregex2)s)'\n r'(%(space)s*(?:(%(newline)s)%(space)s*)*)' # 5, 6\n r'(%(regex)s)' # 7\n r'(%(space)s*(?:%(newline)s%(space)s*)+' # 8\n r'(?=%(post_regex_off)s))?'\n r'|(?<=%(id_literal_close)s)'\n r'(%(space)s*(?:%(newline)s%(space)s*)+)' # 9\n r'(?=%(id_literal_open)s)'\n r'|(?<=%(id_literal)s)(%(space)s+)(?=%(id_literal)s)' # 10\n r'|(?<=\\+)(%(space)s+)(?=\\+)' # 11\n r'|(?<=-)(%(space)s+)(?=-)' # 12\n r'|(%(space)s+)' # 13\n r'|((?:%(newline)s%(space)s*)+)' # 14\n ) % locals()).sub\n\n # print space_sub_banged.__self__.pattern\n\n keep = _re.compile(\n (r'%(space_chars)s+|%(space_comment_nobang)s+|%(newline)s+'\n r'|(%(bang_comment)s+)') % locals()).sub\n keeper = lambda m: m.groups()[0] or ''\n\n # print keep.__self__.pattern\n\n def space_subber_banged(match):\n \"\"\" Substitution callback \"\"\"\n # pylint: disable = too-many-return-statements\n\n groups = match.groups()\n if groups[0]:\n return groups[0]\n elif groups[1]:\n return groups[1]\n elif groups[3]:\n return \"%s%s%s%s\" % (\n keep(keeper, groups[2]),\n groups[3],\n keep(keeper, groups[4] or ''),\n groups[4] and '\\n' or '',\n )\n elif groups[7]:\n return \"%s%s%s%s%s\" % (\n keep(keeper, groups[5]),\n groups[6] and '\\n' or '',\n groups[7],\n keep(keeper, groups[8] or ''),\n groups[8] and '\\n' or '',\n )\n elif groups[9]:\n return keep(keeper, groups[9]) + '\\n'\n elif groups[10] or groups[11] or groups[12]:\n return keep(keeper, groups[10] or groups[11] or groups[12]) or ' '\n else:\n return keep(keeper, groups[13] or groups[14])\n\n def jsmin(script, keep_bang_comments=False):\n r\"\"\"\n Minify javascript based on `jsmin.c by Douglas Crockford`_\\.\n\n Instead of parsing the stream char by char, it uses a regular\n expression approach which minifies the whole script with one big\n substitution regex.\n\n .. _jsmin.c by Douglas Crockford:\n http://www.crockford.com/javascript/jsmin.c\n\n :Parameters:\n `script` : ``str``\n Script to minify\n\n `keep_bang_comments` : ``bool``\n Keep comments starting with an exclamation mark? (``/*!...*/``)\n\n :Return: Minified script\n :Rtype: ``str``\n \"\"\"\n # pylint: disable = redefined-outer-name\n\n if keep_bang_comments:\n return space_sub_banged(space_subber_banged,\n '\\n%s\\n' % script).strip()\n else:\n return space_sub_simple(space_subber_simple,\n '\\n%s\\n' % script).strip()\n\n return jsmin", "def get_precompiled_template(self, service_id):\n return self.get(f\"/service/{service_id}/template/precompiled\")", "def build_js(self):\n babel = get_filter('babel', presets='babel-preset-env')\n for js in self.templates_path.rglob('*.js'):\n print(js)\n self.assets.register(js.name, Bundle(\n str(js),\n output=f'gen/js/{js.stem}.min.js',\n filters=[babel, 'rjsmin']\n ))", "def compress(js):\n import httplib, urllib, sys\n params = urllib.urlencode([\n ('js_code', js),\n ('compilation_level', 'SIMPLE_OPTIMIZATIONS'),\n ('output_format', 'text'),\n ('output_info', 'compiled_code'),\n ])\n headers = { \"Content-type\": \"application/x-www-form-urlencoded\" }\n conn = httplib.HTTPConnection('closure-compiler.appspot.com')\n conn.request('POST', '/compile', params, headers)\n response = conn.getresponse()\n return response.read()", "def get_mako_template(tmpl_name):\n tmpl_path = tfds.core.utils.get_tfds_path(\n 'scripts/documentation/templates/%s.mako.md' % tmpl_name)\n with tf.io.gfile.GFile(tmpl_path, 'r') as tmpl_f:\n tmpl_content = tmpl_f.read()\n return mako.lookup.Template(tmpl_content, default_filters=['str', 'trim'])", "def minify_manifest(src):\n return re.sub(r\"[\\t ]+\", r\"\\t\", minify_properties(src))", "def dump_template(self):\n return self.stack.template.body", "def minify_html (text: str):\n\n with HTMLMinifier() as minifier:\n minifier.feed(text)\n return minifier.get_result()", "def test_javascript_minify(client):\n resp = client.get(\"/js\")\n assert MINIFIED_JS == resp.data", "def rewrite_template(self, template_name: str):\n operand_to_type_mapping, code = parser.preprocess(template_name)\n annotations: List[Annotations] = parser.parse(operand_to_type_mapping, code)\n rewritten_program: str = self.compiler.apply_substitution(annotations, self.debug)\n return '\\n'.join([f' {line}' for line in rewritten_program.split('\\n')])", "def _templated(entry, template, keep_multiple=False):\n match = []\n for t in template:\n def compare(x):\n return abs(x - t)\n match.append(min(entry, key=compare))\n if not keep_multiple:\n match = list(set(match))\n return sorted(match)", "def get_template_ext(self) -> str:", "def get_js_contents(name, is_debug_mode=False):\n js_path = fs.join(path.SRC_ROOT, 'js', name + '.js')\n with open(js_path, \"r\") as f:\n code = f.read()\n\n # minimize code when in release mode\n if not is_debug_mode:\n code = remove_comments(code)\n code = remove_whitespaces(code)\n return code", "def get_source(self, environment, template):\n (source, filename, uptodate) = self.path.get_source(environment, template)\n source = \"\\n\".join(self.macros) + \"\\n\" + source\n return source, filename, uptodate" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the minified version of a template to the output directory. If use_subdir is True, then it is written to index.html in a subdirectory of OUTPUT_PATH with the same name as the template.
def export(self, use_subdir=True): if use_subdir: os.makedirs(os.path.join(OUTPUT_PATH, self.name)) path_to_write = os.path.join(OUTPUT_PATH, self.name, 'index.html') else: path_to_write = os.path.join(OUTPUT_PATH, 'index.html') with open(path_to_write, 'w') as outfile: outfile.write(self.minify().encode('utf8'))
[ "def main():\n dest_dir = \".public\"\n if os.path.isdir(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n\n env = Environment(\n loader=FileSystemLoader('templates'),\n autoescape=select_autoescape(['html'])\n )\n\n ignore_files = ignoreFile()\n files_in_dir = os.walk('templates')\n filenames = [filename for _, _, filename in files_in_dir]\n files = [filename for filename in filenames[0] if filename not in ignore_files]\n for i in files:\n template = env.get_template(i)\n final_html = template.render()\n\n\n write_prefix = glob.glob(\".public\")[0]\n write_path = os.path.join(write_prefix, i)\n print write_path\n try:\n html_file = codecs.open(write_path, 'w', 'utf8')\n html_file.write(final_html)\n finally:\n html_file.close()", "def minify_app(self):\n \n self._generate_application_directory_tree()\n \n if self.rootdir and self.outputdir:\n \n if self.rootdir.endswith('/'):\n \n self.rootdir=self.rootdir[:-1]\n \n outputdir=os.path.join(os.path.dirname(self.rootdir),self.outputdir)\n \n if not os.path.exists(outputdir):\n \n try:\n \n os.makedirs(outputdir)\n \n except IOError,e:\n \n print \"PHPApplicationMinify could not create the output directory : %s\"%outputdir\n \n return\n \n \n \n self._compress_files()\n \n self.statistics()", "def write_site_content(self):\n\n for root, dirs, files in os.walk(self.BASE_DIR):\n base = root.replace(self.BASE_DIR, '')\n base = base.lstrip(os.path.sep)\n\n for d in self.ignoreFilter(dirs):\n nd = os.path.join(self.DEPLOY_DIR, base, d)\n if not os.path.isdir(nd):\n os.makedirs(nd)\n dirs[:] = self.ignoreFilter(dirs)\n\n for f in self.ignoreFilter(files):\n if Page.transformable(f):\n Page(os.path.join(root, f),\n self.layouts,\n self.context).write()\n else:\n path = os.path.abspath(root)\n path = path.replace(os.path.abspath(self.BASE_DIR), '', 1)\n path = path.lstrip(os.path.sep)\n path = os.path.join(self.DEPLOY_DIR, path)\n if not os.path.isdir(path):\n os.makedirs(path)\n shutil.copy(os.path.join(root, f), os.path.join(path, f))", "def write(self):\n for key, value in self.templates.iteritems():\n template_in, template_out = value\n path = '{0}/{1}'.format(self.dest or '.', template_out)\n audit(\"Writing: {0}\".format(path))\n with open(path, 'w') as f:\n f.write(self.__generate_code(template_in))", "def write_output(self):\n env = Environment(loader=FileSystemLoader(str(self.template_path)))\n env.globals.update(len=len)\n\n for module in self.filesmap:\n # Create TOC file\n toc_out = self.output_path / f\"{module}.rst\"\n foldername = self.output_path / module\n basename = os.path.basename(module)\n innerdirs = []\n\n # Create list of submodules (innerdirs)\n if module in self.dirsmap:\n innerdirs = [(os.path.basename(module) +\n '/' + os.path.basename(innerdir))\n for innerdir in self.dirsmap[module]]\n\n # Create a list of documented modules\n # (represented by a doxygen file)\n innerfiles = [basename + \"/\" + os.path.basename(\n innerfile.rsplit(\".\", 1)[0])\n for innerfile in self.filesmap[module]]\n\n # Set jinja2-template, base or overloaded\n template = self.template_path / f\"{module}.rst.j2\"\n if template.exists():\n toc_template = env.get_template(f'{module}.rst.j2')\n else:\n toc_template = env.get_template(self.TOC_TEMPLATE)\n\n # Render template into a buffer\n buffer = toc_template.render(title=basename,\n refs=list(innerfiles) + innerdirs)\n\n # Create subdirectory on filesystem if needed\n os.makedirs(foldername, exist_ok=True)\n\n # Write out buffer to a file\n with toc_out.open(\"w\") as output:\n os.makedirs(os.path.dirname(toc_out), exist_ok=True)\n output.write(buffer)\n\n # Create doxygen ref-files\n for filename in self.filesmap[module]:\n file = filename.rsplit(\".\", 1)[0]\n # Set jinja2-template, base or overloaded\n template = self.template_path / module / f\"{file}.rst.j2\"\n if template.exists():\n template = f\"{module}/{file}.rst.j2\"\n doxygenfile_template = env.get_template(template)\n else:\n doxygenfile_template = env.get_template(\n self.DOXYFILE_TEMPLATE)\n\n # Render template into buffer\n buffer = doxygenfile_template.render(\n title=file, doxyfile=basename + \"/\" + filename)\n\n # Write out buffer to a file\n doxyfile_out = foldername / f\"{file}.rst\"\n with doxyfile_out.open(\"w\") as output:\n output.write(buffer)", "def write_template(self, outfile):\n if os.path.islink(outfile):\n return\n mode = os.stat(outfile)[stat.ST_MODE]\n with open(outfile, 'r') as f:\n template_src = f.read()\n template = jinja2.Template(template_src)\n with open(outfile, 'wb') as f:\n os.chmod(outfile, mode)\n f.write(template.render(self.template_context).encode('utf-8'))", "def copy_theme_static_files(self, context) -> None:\n\n # Generic files, copied over from mkdoc's build.py\n exclude_patterns = [\n \".*\",\n \"*/.*\",\n \"*.py\",\n \"*.pyc\",\n \"*.html\",\n \"*readme*\",\n \"mkdocs_theme.yml\",\n ]\n # Filenames for rendered documents\n exclude_patterns.extend(f\"*{x}\" for x in self.app.config.source_suffix.keys())\n\n def exclude_filter(name):\n for pattern in exclude_patterns:\n if fnmatch.fnmatch(name.lower(), pattern):\n return False\n return True\n\n to_write = []\n environment = self.templates.environment\n for path in environment.list_templates(filter_func=exclude_filter):\n path = os.path.normpath(path)\n\n for location in self.templates.translator.theme.dirs:\n if os.path.isfile(os.path.join(location, path)):\n to_write.append((location, path))\n break\n\n for location, path in to_write:\n source = os.path.join(location, path)\n destination = os.path.join(self.outdir, path)\n renderer = self.templates\n\n # Ensure directory exists\n parent_dir = os.path.dirname(destination)\n\n # HACK: We only \"render\" template-y files.\n if \"templates\" not in location:\n copy_asset(source, os.path.dirname(destination))\n continue\n\n os.makedirs(parent_dir, exist_ok=True)\n with open(source, \"r\") as fsrc:\n with open(destination, \"w\", encoding=\"utf-8\") as fdst:\n source_text = fsrc.read()\n result = renderer.render_string(source_text, context)\n fdst.write(result)", "def write_pack(\n input_pack: BasePack,\n output_dir: str,\n sub_path: str,\n indent: Optional[int] = None,\n zip_pack: bool = False,\n overwrite: bool = False,\n drop_record: bool = False,\n serialize_method: str = \"json\",\n) -> str:\n output_path = os.path.join(output_dir, sub_path)\n\n if overwrite or not os.path.exists(output_path):\n ensure_dir(output_path)\n input_pack.serialize(\n output_path,\n zip_pack=zip_pack,\n drop_record=drop_record,\n serialize_method=serialize_method,\n indent=indent,\n )\n else:\n logging.info(\"Will not overwrite existing path %s\", output_path)\n\n logging.info(\"Writing a pack to %s\", output_path)\n return output_path", "def setOutFileTemplate(self, outfiletemplate):\n self.outfiletemplate = outfiletemplate\n self.log.debug(\"Changed tempalte to %s\"%outfiletemplate)\n if self.__folderscreated: self.write()", "def minify_css_directory(gen, source, target):\n import rcssmin\n\n plugin_paths = gen.settings['PLUGIN_PATHS']\n for path in plugin_paths:\n source_ = os.path.join(path, 'pelican-brepository', source)\n target_ = os.path.join(path, 'pelican-brepository', target)\n if os.path.isdir(source_):\n if not os.path.exists(target_):\n os.makedirs(target_)\n\n for root, dirs, files in os.walk(source_):\n for current_file in files:\n if current_file.endswith(\".css\"):\n current_file_path = os.path.join(root, current_file)\n with open(current_file_path) as css_file:\n with open(os.path.join(target_, current_file.replace('.css', '.min.css')), \"w\") as minified_file:\n minified_file.write(rcssmin.cssmin(css_file.read(), keep_bang_comments=True))\n elif current_file.endswith(\".eot\") or current_file.endswith(\".svg\") or current_file.endswith(\".ttf\") or current_file.endswith(\".woff\"):\n current_file_path = os.path.join(root, current_file)\n target_file = os.path.join(target_, current_file)\n shutil.copyfile(current_file_path, target_file)", "def write(self, path: GenPath, ctx: GenContext):\n # TODO:mdrachuk:06.01.2020: warn if site, ctx, source are in props or front matter!\n path.create(self.template.render(\n site=ctx.site,\n ctx=ctx,\n content=self,\n markdown=self.render(ctx),\n **self.front_matter,\n **self._evaluated_props(ctx),\n ))", "def render_files(self):\n if self.keep_dir_structure:\n print \"keeping directory structure\"\n self.render_content_recursive(self.input_path, self.output_path)\n return\n ## Else recurse into directory, render files one by one\n files = ls_recursive(self.input_path)\n for f in files:\n filename = get_filename_from_pathname(f)\n outpath = os.path.join(self.output_path, self.get_output_filename(filename))\n if not self.tword or self.tword in f.split('.'):\n print \" rendering: %s\" % (f,)\n self.render_content(f, outpath)\n else:\n if self.copy_not_matching:\n print \" copying: %s\" % (outpath,)\n self.simple_copy(f, outpath)\n else:\n print \" ignoring: %s\" % (f,)", "def substitute_to_file(self, template_path, output_path):\n with open(output_path, 'wb') as of:\n of.write(self.substitute(template_path))", "def build_js(self):\n babel = get_filter('babel', presets='babel-preset-env')\n for js in self.templates_path.rglob('*.js'):\n print(js)\n self.assets.register(js.name, Bundle(\n str(js),\n output=f'gen/js/{js.stem}.min.js',\n filters=[babel, 'rjsmin']\n ))", "def write_page(self):\n self._write_page_stylesheet()\n self._copy_job_logs()\n webpath = os.path.join(self.config['AUTOCMS_WEBDIR'], self.testname)\n if not os.path.exists(webpath):\n os.makedirs(webpath)\n # write a 'index.html.new' file and then rename it to\n # 'index.html'. This prevents users from viewing a half\n # completed webpage when the page refreshes.\n newpagepath = os.path.join(webpath, 'index.html.new')\n pagepath = os.path.join(webpath, 'index.html')\n with open(newpagepath, 'w') as output_file:\n output_file.write(self.page)\n os.rename(newpagepath, pagepath)", "def prepareOutput():\r\n\r\n os.removedirs(\"output\")\r\n os.mkdir(\"output\")", "def generate(self, template_dir, defaults, overwrite=False):\n for root_dir, dirs, files in os.walk(template_dir):\n for file_name in files:\n template_filename = os.path.join(root_dir, file_name)\n # info('template_filename: %s' % template_filename)\n dest_filename = self.resolve_template_dir(str(template_filename.replace(template_dir, '.')),\n defaults['package'])\n self._render(template_filename, template_dir, dest_filename, defaults, overwrite=overwrite)", "def write_configs(self, template_map=None):\n if template_map is None:\n try:\n template_map = self.config['template_map']\n except KeyError:\n logger.error(\"Missing template_map from config.yaml\")\n raise\n for template_name, config_path in template_map.items():\n template = self.template_lookup.get_template(template_name)\n directory = os.path.dirname(config_path)\n if not os.path.exists(directory):\n logger.info(\"Creating directory: {}\".format(directory))\n os.makedirs(directory)\n\n render_to_files(template, config_path, **self.config)", "def write_template(filepath: str) -> None:\n with open(filepath, 'w') as f:\n f.write(Config.template())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the repr for a flip sign operator.
def test_repr(): op = qml.FlipSign([0, 1], wires=("a", "b")) expected = "FlipSign([0, 1], wires=['a', 'b'])" assert repr(op) == expected
[ "def _invert_signs(signs):\n return signs[0] < 0", "def is_sign_reversing(func):\r\n\tfor i in func.domain():\r\n\t\tif func(i).get_sign() != -i.get_sign():\r\n\t\t\treturn False\r\n\treturn True", "def _sign(x):\n if _copysign(1.0, x) == -1.0:\n return \"-\"\n else:\n return \"+\"", "def flip_signs(self):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation flip_signs')\n\n # Do the operation\n if self.sym is not None:\n self.sym[0] = ''.join(FLIP[i] for i in self.sym[0])\n self.ten.sym[0] = ''.join(FLIP[i] for i in self.ten.sym[0])", "def is_op_not_subtract(char):\n return char in OPS_WITHOUT_SUBTRACTION", "def flip(self):\n self.signal = '+' if self.signal == '-' else '-'\n self.keep()", "def change_sign_for_outcoming_fermion(self):\n \n flip_sign = []\n for i in range(1,len(self.spins),2):\n if self.spins[i] == 2:\n flip_sign.append(str(i))\n \n if not flip_sign:\n return self.lorentz_expr\n momentum_pattern = re.compile(r'\\bP\\(([\\+\\-\\d]+),(%s)\\)' % '|'.join(flip_sign))\n lorentz_expr = momentum_pattern.sub(r'P(\\1,\\2, -1)', self.lorentz_expr)\n return lorentz_expr", "def get_subtract_flag(self):\n return 0x40 & self.get_f()", "def checkPowerSign(self,sign):\n\n if sign == \"^\":\n return True\n else:\n return False", "def test_reverse_comp_hyphen(self):\n\t\thyphy = \"-\"\n\t\tself.assertEqual(self.funfil.ReverseCompliment(hyphy), hyphy)", "def flip_turn(symbol):\n if symbol == 'X':\n return 'O'\n else:\n return 'X'", "def isFlipNorm(*args, **kwargs):\n \n pass", "def is_inverted(key):\n ops = parse(key)\n return isinstance(ops[0], el.Invert)", "def checkPlusOrMinusSign(self,sign):\n\n if sign == \"+\" or sign == \"-\":\n return True\n else:\n return False", "def flipped(self) -> bool:\n return self.determinant < 0", "def direction_sign(self):\n\n for elem in self.direction:\n if elem < 0:\n self.direction = -self.direction", "def fix_minus(s):\n return (s.replace('-', '\\N{MINUS SIGN}')\n if mpl.rcParams['axes.unicode_minus']\n else s)", "def test_operation_inverse_defined(self, qnode_for_inverse):\n assert qnode_for_inverse.qtape.operations[0].name == \"RZ.inv\"\n assert qnode_for_inverse.qtape.operations[0].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[0].__class__, qml.operation.Operation)\n assert qnode_for_inverse.qtape.operations[1].name == \"RZ\"\n assert not qnode_for_inverse.qtape.operations[1].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[1].__class__, qml.operation.Operation)", "def getInverted(self) -> bool:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert error raised when length of basis state and wires length does not match
def test_length_not_match_error(self, n_status, n_wires): with pytest.raises( ValueError, match="Wires length and flipping state length does not match, they must be equal length ", ): qml.FlipSign(n_status, wires=n_wires)
[ "def test_param_invalid_input_array_param_length(self):\n\t\twith self.assertRaises(IndexError):\n\t\t\tresult = arrayfunc.takewhile('==', self.dataempty, self.dataout, 100.0)", "def test_validate_invalid_final_state_non_str(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.final_states = {4}\n self.ntm1.validate()", "def test_validate_invalid_initial_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.initial_state = 'q4'\n self.ntm1.validate()", "def test_invalid_init_vals_length_in_estimate(self):\n # Bundle the arguments used to construct the nested logit model\n constructor_args = [self.fake_df,\n self.alt_id_col,\n self.obs_id_col,\n self.choice_col,\n self.fake_specification,\n self.fake_names]\n # Bundle the kwargs for constructing the nested_logit_model\n constructor_kwargs = {\"nest_spec\": self.fake_nest_spec}\n\n # Create the mnl model object whose coefficients will be estimated.\n base_nl = nl.NestedLogit(*constructor_args, **constructor_kwargs)\n\n # Create an estimator object.\n zero_vector = np.zeros(self.fake_all_params.shape[0])\n estimator_args = [base_nl,\n base_nl.get_mappings_for_fit(),\n None,\n zero_vector,\n nl.split_param_vec]\n estimator_kwargs = {\"constrained_pos\": [1]}\n nested_estimator = nl.NestedEstimator(*estimator_args,\n **estimator_kwargs)\n\n # Alias the function being tested\n func = nested_estimator.check_length_of_initial_values\n\n # Test that the desired error is raised\n for i in [-1, 1]:\n init_values = np.arange(self.fake_all_params.shape[0] + i)\n\n self.assertRaisesRegexp(ValueError,\n \"values are of the wrong dimension\",\n func,\n init_values)\n\n return None", "def test_build_states_invalid_buffer_capacity():\n with pytest.raises(ValueError):\n build_states(\n threshold=None,\n system_capacity=None,\n buffer_capacity=0,\n )", "def test_validate_invalid_final_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.final_states = {'q4'}\n self.ntm1.validate()", "def test_param_invalid_output_array_param_length(self):\n\t\twith self.assertRaises(IndexError):\n\t\t\tresult = arrayfunc.takewhile('==', self.data, self.dataempty, 100.0)", "def test_line_outofrange(self):\n self.st.append( (4,0) )\n self.o.state = self.st\n self.assertTrue(self.o.timer == 0, \"timer is wrong\")\n self.assertTrue(self.o.state == (), \"state is wrong\")\n self.assertEqual(self.o.board.count(0), self.o.nbl*self.o.nbc,\n \"board is wrong\")", "def test_build_ops_error():\n qubit = cirq.LineQubit.range(1)\n with pytest.raises(ValueError):\n cirq_utils.qubit_op_to_gate('W', qubit[0])", "def test_invalid_qubit_state_vector(self, rep):\n dev = DefaultTensorTF(wires=2, representation=rep)\n state = np.array([0, 123.432])\n\n with pytest.raises(\n ValueError, match=r\"can apply QubitStateVector only to all of the 2 wires\"\n ):\n dev.execute([qml.QubitStateVector(state, wires=[0])], [], {})", "def test_same_wires(self):\n\n with pytest.raises(qml.QuantumFunctionError, match=\"The target wires and estimation wires\"):\n QuantumPhaseEstimation(np.eye(2), target_wires=[0, 1], estimation_wires=[1, 2])", "def test_length_less_than_2(self):\n self.assertFalse(can_romberg([]))\n self.assertFalse(can_romberg([1]))", "def test_validate_nwb_error(simple3_nwb: Path) -> None:\n validation_result = validate(simple3_nwb)\n assert len([i for i in validation_result if i.severity]) > 0", "def test_invalid_burst_cycle_count(self):\r\n with self.assertRaises(fygen.InvalidBurstCycleCountError):\r\n self.fy.set_modulation(burst_count=0)", "def _checkValidity(self) -> None:\n\n fresnel_zone_dist = np.sqrt(self._probe_params.wavelength * self._det_params.obj_dist)\n fresnel_zone_npix = fresnel_zone_dist / self._det_params.pixel_pitch\n\n error_str = (f\"Step size ({self._scan_params.scan_step_npix} is too small. \"\n + f\"Ensure that the step size is at least larger than the Fresnel zone width \"\n + f\"({fresnel_zone_npix}) to ensure diversity in the diffraction patterns.\")\n assert self._scan_params.scan_step_npix > fresnel_zone_npix, error_str", "def test_different_batch_sizes_raises_error(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n with pytest.raises(\n ValueError, match=\"Broadcasting was attempted but the broadcasted dimensions\"\n ):\n _ = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3, 4]), wires=2))", "def _check_dimensions(self, states):\n if not states.shape[1] == self.ndim:\n raise DimensionError('the input argument has the wrong '\n 'dimensions.')", "def test_check_sized_array_name():\n length = 5\n array = np.ones(length)\n\n for name in ('weights', 'x-data', 'data'):\n with pytest.raises(ValueError, match=f'length mismatch for {name}'):\n _validation._check_sized_array(array, length + 1, name=name)", "def validate_state(self, state: np.ndarray):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Espera hasta detectar el evento de impresion. grid_timer_traza connect here
def grid_detect_traza(self): if self.main.grid_traza_control: self.grid_timer_traza.stop() if self.scan_check.isChecked(): self.grid_scan_signal() else: self.grid_detect()
[ "def grid_detect_scan(self):\n if self.grid_scan_control:\n self.grid_timer_scan.stop()\n self.grid_detect()", "def process_timer(ctx, key, time):\n raise Exception('process_timer not implemented')", "def teleopPeriodic(self) -> None:\n ...", "def addTimerCallback(*args, **kwargs):\n \n pass", "def time_handler():\r\n global counter\r\n\r\n counter += 1", "def catch_alarm():\n comm_time_to_call_heart_beat = True", "def _nfvi_periodic_timer_event():\n while True:\n timer_id = (yield)\n DLOG.verbose(\"NFVI periodic timer called, timer_id=%s.\" % timer_id)\n\n host_table = tables.tables_get_host_table()\n for host in list(host_table.values()):\n host.periodic_timer()", "def timerEvent(self, event):\n\t\tQtGui.QLabel.timerEvent(self, event)\n\t\tif self.__refresh:\n\t\t\ttoUpdate = False\n\t\t\ttry:\n\t\t\t\ttoUpdate = self.__kinect.readDepth()\n\t\t\texcept self.__kinect.KinectError:\n\t\t\t\tself.killTimer(self.__timer)\n\t\t\t\tQtGui.QMessageBox.critical(self, u\"Kinect : connexion impossible\", u\"Connectez la Kinect correctement et relancez l'application.\")\n\t\t\tif toUpdate:\n\t\t\t\tself.__setKinectPixmap()", "def initiate_ping_event():\n pass", "def handle_create_timer(self, message):\n if self.neon_in_request(message):\n content = self._extract_alert_params(message, AlertType.TIMER)\n content[\"kind\"] = int(AlertType.TIMER)\n LOG.info(content)\n self.confirm_alert(\"timer\", content, message)", "def set_timer(self, timer):\n self.timer = timer", "def on_idle(self, *args):", "async def start_timer(app):\n asyncio.get_event_loop().create_task(check_proc())", "def timeFlow(self):", "def handle_timeout(self):\n\t\tpass", "def timer_callback(*unused_args):\n logging.debug(\"timer callback at %s\", datetime.datetime.now())", "def start_timer(self):\n if not self.running:\n if self.powerSpectra.is_running:\n print('Cant update while power spectra is running.')\n else:\n conditions = {}\n # Starts the timer for updating the GUI\n conditions['devs'] = self.devices\n conditions['accuracy'] = self._session.monitorTimeresol/1000 # In seconds\n self.trap.startMonitor(conditions)\n self.ctimer.start(self._session.monitorRefresh)\n\n self.running = True\n else:\n self.stop_timer()", "def startTimer(self, description):\r\n pbfProject = GetCurrentTogglProject()\r\n timeEntry = pytoggl.TimeEntry(description=description, pid=pbfProject.togglProject.id, created_with=\"TogglPBF\")\r\n pbfProject.togglAPI.timer.startTimer(timeEntry)", "def _start_timer(self):\r\n self.timer.Start(50)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Espera hasta detectar el scan completo. grid_timer_scan connect here
def grid_detect_scan(self): if self.grid_scan_control: self.grid_timer_scan.stop() self.grid_detect()
[ "def grid_detect_traza(self):\n if self.main.grid_traza_control:\n self.grid_timer_traza.stop()\n if self.scan_check.isChecked():\n self.grid_scan_signal()\n else:\n self.grid_detect()", "def man_scan(self, asynchron=True):\r\n interval = self.Stat.interval\r\n self.scan_start()\r\n\r\n def asynchron_timer(interval=interval):\r\n self.get_scancount()\r\n self.get_awn()\r\n if self.Stat.scancount != 0:\r\n self.step_next()\r\n timer = Timer(interval, asynchron_timer)\r\n timer.start()\r\n else:\r\n pass\r\n\r\n if asynchron is False:\r\n from time import sleep\r\n sleep(3)\r\n while True:\r\n self.get_scancount()\r\n if self.Stat.scancount == 0:\r\n break\r\n self.step_next()\r\n sleep(interval)\r\n\r\n else:\r\n from threading import Timer\r\n timer = Timer(interval, asynchron_timer)\r\n timer.start()", "def scan():\n global droneList\n if(isSim):\n connect()\n else:\n app.logger.info(\"scanning crazyflies\")\n updateDrones(droneList)\n return updateStats()", "def tray_scan_started(self):\n if not DBUS_AVAIL:\n return\n self._is_scanning = True\n self.init_network_menu()", "def test_running_scans():\n ipv4_hosts = [ipaddress.ip_address(u\"127.0.0.1\")]\n # ipv6_hosts = [\n # ipaddress.ip_address(u\"::1\")\n # ]\n\n scanner_handler = ScannerHandler(ipv4_hosts, [], [], [], mock=True)\n report = scanner_handler.run_scans()\n\n assert report.__class__ == Report\n assert report.nb_hosts == 1\n assert report.up == 1\n assert type(report.duration) == str\n assert \"127.0.0.1\" in [x.ip for x in report.results]\n # assert \"::1\" in [x.ip for x in report.results]", "def do_schedule(self, timer=True):\n if self.config[\"scan_type\"] == \"Complete Scan\":\n d = self.complete_scan()\n elif self.config[\"scan_type\"] == \"Quick Scan\":\n d = self.quick_scan()\n else:\n log.warning(\"Not expected scan_type option.\")\n\n d.addCallback(self.regulate_torrents)\n\n if timer:\n self.timer = reactor.callLater(self.config[\"check_rate\"] * 60,\n self.do_schedule)", "def scan_instance(instance):\n scanner = PortScanner()\n scanner.target = instance.public_ip_address\n scanner.start_port = args.start_port[0]\n scanner.end_port = args.end_port[0]\n scanner.threads = args.jobs[0]\n scanner.timeout = args.timeout[0]\n ports = scanner.scan()\n\n if len(ports) > 0:\n for port in ports:\n print(\"\\t\\t\\tPort: \"+str(port['Port'])+\"\\t\"+\"Service: \"+port['Service'])\n else:\n print(\"\\t\\t\\tNo open ports detected\")", "def _interface_scan_timeout_callback(self, _):\n interface_up = list()\n stats = net_if_stats()\n for interface in stats.keys():\n if interface != 'lo':\n if SysTools.is_if_oper_up(interface):\n interface_up.append(interface)\n reason = \"Current system up interface:{}, Cannot get any valid interfaces, reboot system\".format(interface_up)\n self.notify.error(rpd_event_def.RPD_EVENT_PROVISION_NO_INTERFACE_UP[0], \"\")\n SysTools.reboot_blocked(reason)", "def test_scanning_handling_skip_ping():\n host = ipaddress.ip_address(u\"82.64.28.100\")\n scanner_handler = ScannerHandler([host], [], [], [], mock=True, sudo=False)\n result = scanner_handler.run_scans()\n\n assert result.nb_hosts == 1\n assert result.up == 1", "def check_get_scan_timeout(self):\n if not self.received_scan:\n self.base.turn_off_motors()\n raise ValueError(\"getScan() never returned... aborting\")", "def check_get_scan_timeout(self):\n if not self.received_scan:\n self.base.turn_off_motors()\n raise ValueError(\"getScan() never returned... aborting\")\n # Should work out a better solution to shutdown.\n # Signaling with KeyboardInterrupt doesn't seem to work and process still hangs\n # Currently the workaround is that the node app will kill this\n # process if it receives an error", "def process_scans():\n logger.info(f'Process / process scans | start')\n try:\n print(f'##### Start update scans state job #####')\n logger.debug(f'Process / process scans | Calling scan provider - get not completed scans')\n scans = get_not_complete_scans()\n logger.debug(f'Process / process scans | Response from get not complete scans| scans = {scans}')\n if scans is not None:\n for scan in scans:\n update_scan_status(scan)\n logger.debug(f'Process / process scans | Response from scan provider - get not completed scans')\n except Exception as error:\n logger.error(\n f'Process / process scans | Ended with failure |'\n f'Error: type = {error.__class__.__name__}, message = {error}')\n else:\n logger.info(f'Process / process scans | Ended successfully')\n finally:\n print(f'##### End update scans state job #####')", "def check_get_scan_timeout(self):\n if not self.received_scan:\n raise ValueError(\"getScan() never returned... aborting\")\n # Should work out a better solution to shutdown.\n # Signaling with KeyboardInterrupt doesn't seem to work and process still hangs\n # Currently the workaround is that the node app will kill this\n # process if it receives an error", "def __wait_for_laser(self):\n ts = self.__state_collector.get_static_scan().header.stamp\n self.take_sim_step()\n scan = self.__state_collector.get_static_scan()\n begin = time.time()\n while len(scan.ranges) == 0 or scan.header.stamp <= ts:\n rospy.logdebug(\"Waiting for laser scan to get available.\")\n if(time.time() - begin > 1):\n self.take_sim_step()\n time.sleep(0.00001)\n scan = self.__state_collector.get_static_scan()\n return scan.ranges", "def wait_for_scan(self):\n while self.any_scans(bin(int(self.get_cond()))):\n time.sleep(1)", "def import_scan(self):\n pass", "def test_scanning_handling_unreachable():\n host = ipaddress.ip_address(u\"192.0.2.1\")\n scanner_handler = ScannerHandler([host], [], [], [], mock=True, sudo=True)\n result = scanner_handler.run_scans()\n\n assert result.nb_hosts == 1\n assert result.up == 0", "def scan(self):\n for addr in range(127):\n # Skip I2C addresses which are reserved.\n if addr <= 7 or addr >= 120:\n continue\n if self.ping(addr):\n self._log.debug('Detected device at address 0x{0:02x}.'.format(addr))", "def start_scan(self):\r\n try:\r\n out = self.get_output(\"scan on\")\r\n except BluetoothctlError, e:\r\n print(e)\r\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Watch for Hangouts bridge chat invites and add them to `hangouts_jids_seen`.
def roster_subscription_request(self, presence): from_jid = presence['from'] if from_jid.domain == TALK_BRIDGE_DOMAIN: # Hangouts users get these goofy jids. # Replying to them doesn't seem to work, but replying to resources under it will. # So, we store the bare jid, with a weird name thing stripped out, then # wait for a resource to become active. if '--' in from_jid.user: waiting_jid = from_jid.bare.partition('--')[-1] else: waiting_jid = from_jid.bare self.logger.info("saw hangouts jid %s. message %r", from_jid, presence) self.hangouts_jids_seen.add(waiting_jid)
[ "def detect_hangouts_jids(self, presence):\n\n # TODO this should probably be removed, since it's diverged from the normal handler\n\n from_jid = presence['from']\n if from_jid.bare in self.hangouts_jids_seen and from_jid.resource:\n self.hangouts_jids_seen.remove(from_jid.bare)\n if self._should_send_to(from_jid):\n # Message type is important; omitting it will silently discard the message.\n self.logger.info(\"responding to %s via presence. message %r\", from_jid, presence)\n self.send_message(mto=from_jid, mbody=self.response, mtype='chat')\n self._sent_reply(from_jid)", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n if p != settings.ROBOT_NICK+'@appspot.com':\n Notify(context, \"Hi, \" + p)", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n if p == 'k-appa@appspot.com':\n Setup(context)\n break", "def send_inactive_sms():\n inactive_harambees = Harambee.objects.extra(where=['last_login::date = date_joined::date'])\n if inactive_harambees:\n #values_list() crashes and burns the poor Django, therefore used a loop to extract the ids\n inactive_ids = list()\n for h in inactive_harambees:\n inactive_ids.append(h.id)\n used_ids = list(Harambee.objects.exclude(id__in=inactive_ids).values_list('id', flat=True))\n queryset = InactiveSMS.objects.all().order_by('days')\n for item in queryset:\n used_ids = sms_inactive_harambees(used_ids, item.days, item.message)", "def invite_participants(self,iSurveyID):", "def _on_hangups_event(self, conv_event, retry=0):\n try:\n logger.info(\"Hangups Event: \"+conv_event.__class__.__name__)\n if isinstance(conv_event, hangups.ChatMessageEvent):\n conv = self._conv_list.get(conv_event.conversation_id)\n user = conv.get_user(conv_event.user_id)\n sender = util.get_nick(user)\n hostmask = util.get_hostmask(user)\n channel = util.conversation_to_channel(conv)\n message = conv_event.text\n print((hostmask+' -> '+channel+' : '+conv_event.text).encode('utf-8'))\n if len(conv.users) < 3 and re.match('[0-9_-]',channel):\n print(\"[SKIPPING SMS]\")\n return\n yield from self.slack.hangoutsMessage(conv, user, message)\n elif isinstance(conv_event, hangups.RenameEvent):\n conv = self._conv_list.get(conv_event.conversation_id)\n yield from self.slack.onHangoutsRename(conv, conv_event.old_name, conv_event.new_name)\n# elif isinstance(conv_event, hangups.MembershipChangeEvent):\n# conv = self._conv_list.get(conv_event.conversation_id)\n# users = [conv.get_user(uid) for uid in conv_event.participant_ids]\n# if conv_event.type == MEMBERSHIP_CHANGE_TYPE_JOIN:\n# yield from self.slack.onHangoutsJoin(conv, users)\n# elif conv_event.type == MEMBERSHIP_CHANGE_TYPE_LEAVE:\n# yield from self.slack.onHangoutsLeave(conv, users)\n# else:\n# logger.warning(\"Unknown membership change type: \"+str(conv_event.type))\n# elif isinstance(conv_event, hangups.HangoutEvent):\n# conv = self._conv_list.get(conv_event.conversation_id)\n# if conv_event.type == HANGOUT_EVENT_TYPE_START:\n# logger.info(\">>>>Call Start<<<<\")\n# elif conv_event.type == HANGOUT_EVENT_TYPE_END:\n# logger.info(\">>>>Call End<<<<\")\n# else:\n# logger.warning(\"Unknown hangout call event type: \"+str(conv_event.type))\n\n except:\n logger.exception(\"Error handling hangouts event!\")\n if retry < 5:\n yield from asyncio.sleep(retry+0.1)\n logger.info(\"RETRYING\")\n yield from self._on_hangups_event(conv_event, retry+1)\n else:\n logger.critical(\"##########GAVE UP RETRYING############\")", "def collect_new_messages(self):\n new_msgs = self.client_session.fetch_new_messages(\n len(self.chat_messages_of_session))\n for each in new_msgs:\n self.chat_messages_of_session.append(each)", "async def cmd_loginvites(self, ctx):\n\n inviteLog = list()\n\n try:\n for invite in await ctx.guild.invites():\n invit = {'max_age' : invite.max_age, 'created_at' : invite.created_at.__str__(), 'uses' : invite.uses, \n 'max_uses' : invite.max_uses, 'code' : invite.id}\n\n if invite.inviter is None:\n invit['inviter'] = {'name' : \"N/A\", 'id' : \"N/A\", 'discriminator' : \"N/A\", 'mention': \"N/A\",\n 'avatar_url' : \"https://discordapp.com/assets/6debd47ed13483642cf09e832ed0bc1b.png?size=128\"}\n else:\n invit['inviter'] = {'name' : invite.inviter.name, 'id' : invite.inviter.id, 'discriminator' : invite.inviter.discriminator,\n 'avatar_url' : AVATAR_URL_AS(invite.inviter), 'mention' : invite.inviter.mention}\n\n invit['channel'] = {'name' : invite.channel.name, 'id' : invite.channel.id, 'mention' : invite.channel.mention}\n\n inviteLog.append(invit)\n\n except (discord.errors.Forbidden, discord.errors.HTTPException):\n inviteLog = None\n\n if inviteLog is not None and len(inviteLog) != 0:\n with open(os.path.join('data','inviteHistory.json'), 'w', encoding='utf-8') as logHistory:\n json.dump(inviteLog, logHistory)\n\n await ctx.channel.send(content=\"Current invite information has been logged.\", delete_after=15)\n\n else:\n await ctx.channel.send(content=\"Current invite information could not be found.\", delete_after=15)\n\n return", "async def _inv_list(self, ctx):\n invites = await self.bot.invites_from(ctx.message.server)\n if len(invites) == 0:\n await self.bot.say(\":warning: There currently no invites active.\")\n else:\n await self.bot.say(\"Invites: {0}\".format(\", \".join(map(str, invites))))", "def invite_all(self):\n pass", "def sms_inactive_harambees(used_ids, num_days, message):\n start = timezone.now() - timedelta(days=num_days + 1)\n end = timezone.now() - timedelta(days=num_days)\n queryset = Harambee.objects\\\n .filter(last_login__range=[start, end])\\\n .exclude(id__in=used_ids)\n for harambee in queryset:\n harambee.send_sms(message)\n return used_ids + list(queryset.values_list('id', flat=True))", "def get_known_jids():\n global _known_jids\n _known_jids_lock.acquire()\n if _known_jids == None:\n _known_jids = ThreadSafeList()\n _known_jids_lock.release()\n return _known_jids", "async def mark_all_notifs_seen(self) -> int:\n url = self.get_api_url(\n TYPE_NOTIFICATION, action=\"UserNotification.MarkAllSeen()\", format=\"json\")\n async with self._session.post(url) as resp:\n return (await resp.json())[\"d\"][\"count\"]", "def invitePlayers(self):\n # from the set of all players we make a list of players who want to join a game.\n listOfPlayersToJoinAGame = list()\n for player in self.setOfPlayers:\n if player.wantsToJoinAGame:\n listOfPlayersToJoinAGame.append(player)\n # update the message about how many players want to join a game\n m.xPlayersWantToJoinAGame.whatToTransmit[0] = str(len(listOfPlayersToJoinAGame))\n # transmit the message\n messenger.transmit(m.xPlayersWantToJoinAGame, positionInWhatToTransmitWhichShouldBeRandomized=1)\n # since we want the players to be seated at the table randomly, we will shuffle this list\n random.shuffle(listOfPlayersToJoinAGame)\n # for each player taking a seat at the table we will put him into the table's seats dictionary\n # and transmit a message saying which player took which seat\n for (player, seatNumber) in zip(listOfPlayersToJoinAGame, self.table.setOfEmptySeats()):\n self.table.seats[seatNumber] = player\n # update the message about player taking a seat\n m.playerTakesSeatNumberX.updatePlayerName(player)\n # update the seatnumber in the message\n m.playerTakesSeatNumberX.whatToTransmit[1] = str(seatNumber)\n messenger.transmit(m.playerTakesSeatNumberX)", "def scan_for_orphans():\n\n orphaned = 0\n ticketed_jobs = []\n\n\n # When the backend is busy / crashing / being upgraded, heartbeats can take a very long time or fail.\n # The default engine heartbeats every 30 seconds. Be careful when lowering this interval.\n\n query = {\n 'state': 'running',\n 'modified': {'$lt': datetime.datetime.utcnow() - datetime.timedelta(seconds=300)},\n '_id': { '$nin': ticketed_jobs },\n }\n\n while True:\n orphan_candidate = config.db.jobs.find_one(query)\n if orphan_candidate is None:\n break\n\n # If the job is currently attempting to complete, do not orphan.\n ticket = JobTicket.find(orphan_candidate['_id'])\n if ticket is not None and len(ticket) > 0:\n ticketed_jobs.append(orphan_candidate['_id'])\n continue\n\n # CAS this job, since it does not have a ticket\n select = { '_id': orphan_candidate['_id'] }\n\n doc = config.db.jobs.find_one_and_update(\n dict(query, **select),\n {\n '$set': {\n 'state': 'failed', },\n },\n return_document=pymongo.collection.ReturnDocument.AFTER\n )\n\n if doc is None:\n log.info('Job %s was heartbeat during a ticket lookup and thus not orhpaned', orphan_candidate['_id'])\n else:\n orphaned += 1\n j = Job.load(doc)\n Logs.add(j.id_, [{'msg':'The job did not report in for a long time and was canceled.', 'fd':-1}])\n new_id = Queue.retry(j)\n Logs.add(j.id_, [{'msg': 'Retried job as ' + str(new_id) if new_id else 'Job retries exceeded maximum allowed', 'fd':-1}])\n\n return orphaned", "async def _announce(self, connection):\n await asyncio.sleep(.5)\n location = str(connection.player.location)\n for uuid in self.plugins[\"player_manager\"].players_online:\n p = self.plugins[\"player_manager\"].get_player_by_uuid(uuid)\n if str(p.location) == location and p.connection != connection:\n send_message(p.connection, \"{} has beamed down to the planet!\"\n .format(connection.player.alias))\n if location in self.storage[\"greetings\"]:\n send_message(connection, self.storage[\"greetings\"][location])", "async def on_join(self, channel: str, who: Optional[str]):", "def rpc_list_bots(self, sender, *args):\n \n if (len(args) != 0):\n raise rpc.RPCFault(604, 'list_bots: no arguments')\n ls = [ act.jid for act in self.factory.actors.values() ]\n return ls", "def on_join(self, source):\n # They shouldn't be able to join twice\n if source.name in self.participants:\n log = self._parent.logger.entry()\n log.color(\"warn\")\n log.title(f\"You're already in the battle, {source.name}\")\n log.desc(\"Just be patient, it will begin soon\")\n log.buffer(self.ctx.channel)\n return\n\n self.participants[source.name] = source\n log = self._parent.logger.entry()\n log.title(f\"{source.name} has entered the battle field!\")\n log.desc(\"TODO: User descriptions\")\n log.buffer(self.ctx.channel)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Watch for Hangouts bridge jids coming online and respond to any in `hangouts_jids_seen`.
def detect_hangouts_jids(self, presence): # TODO this should probably be removed, since it's diverged from the normal handler from_jid = presence['from'] if from_jid.bare in self.hangouts_jids_seen and from_jid.resource: self.hangouts_jids_seen.remove(from_jid.bare) if self._should_send_to(from_jid): # Message type is important; omitting it will silently discard the message. self.logger.info("responding to %s via presence. message %r", from_jid, presence) self.send_message(mto=from_jid, mbody=self.response, mtype='chat') self._sent_reply(from_jid)
[ "def roster_subscription_request(self, presence):\n\n from_jid = presence['from']\n\n if from_jid.domain == TALK_BRIDGE_DOMAIN:\n # Hangouts users get these goofy jids.\n # Replying to them doesn't seem to work, but replying to resources under it will.\n # So, we store the bare jid, with a weird name thing stripped out, then\n # wait for a resource to become active.\n if '--' in from_jid.user:\n waiting_jid = from_jid.bare.partition('--')[-1]\n else:\n waiting_jid = from_jid.bare\n\n self.logger.info(\"saw hangouts jid %s. message %r\", from_jid, presence)\n self.hangouts_jids_seen.add(waiting_jid)", "def scan_for_orphans():\n\n orphaned = 0\n ticketed_jobs = []\n\n\n # When the backend is busy / crashing / being upgraded, heartbeats can take a very long time or fail.\n # The default engine heartbeats every 30 seconds. Be careful when lowering this interval.\n\n query = {\n 'state': 'running',\n 'modified': {'$lt': datetime.datetime.utcnow() - datetime.timedelta(seconds=300)},\n '_id': { '$nin': ticketed_jobs },\n }\n\n while True:\n orphan_candidate = config.db.jobs.find_one(query)\n if orphan_candidate is None:\n break\n\n # If the job is currently attempting to complete, do not orphan.\n ticket = JobTicket.find(orphan_candidate['_id'])\n if ticket is not None and len(ticket) > 0:\n ticketed_jobs.append(orphan_candidate['_id'])\n continue\n\n # CAS this job, since it does not have a ticket\n select = { '_id': orphan_candidate['_id'] }\n\n doc = config.db.jobs.find_one_and_update(\n dict(query, **select),\n {\n '$set': {\n 'state': 'failed', },\n },\n return_document=pymongo.collection.ReturnDocument.AFTER\n )\n\n if doc is None:\n log.info('Job %s was heartbeat during a ticket lookup and thus not orhpaned', orphan_candidate['_id'])\n else:\n orphaned += 1\n j = Job.load(doc)\n Logs.add(j.id_, [{'msg':'The job did not report in for a long time and was canceled.', 'fd':-1}])\n new_id = Queue.retry(j)\n Logs.add(j.id_, [{'msg': 'Retried job as ' + str(new_id) if new_id else 'Job retries exceeded maximum allowed', 'fd':-1}])\n\n return orphaned", "async def _announce(self, connection):\n await asyncio.sleep(.5)\n location = str(connection.player.location)\n for uuid in self.plugins[\"player_manager\"].players_online:\n p = self.plugins[\"player_manager\"].get_player_by_uuid(uuid)\n if str(p.location) == location and p.connection != connection:\n send_message(p.connection, \"{} has beamed down to the planet!\"\n .format(connection.player.alias))\n if location in self.storage[\"greetings\"]:\n send_message(connection, self.storage[\"greetings\"][location])", "def hiring_jobs(self):\n post_id = self.source.config.filter(config_key='post_id-who_is_hiring').first().config_value\n hn_item = self.client.get_item(post_id)\n # r'\\s*(?P<company>[^|]+?)\\s*\\|\\s*(?P<title>[^|]+?)\\s*\\|\\s*(?P<locations>[^|]+?)\\s*(?:\\|\\s*(?P<attrs>.+))?$'\n for comment_id in hn_item.kids:\n try:\n comment = self.client.get_item(comment_id)\n except hackernews.InvalidItemID as iiid:\n logger.warning('Tried to get non-existent comment with ID: %s; ex: %s', comment_id, iiid)\n continue\n if comment.text is None:\n logger.debug(\"Skipping blank comment: %s\", comment)\n continue\n post = self.parse_job_to_post(comment, subarea='who_is_hiring')\n post.title = 'Hiring - {}'.format(post.title)\n post.title = post.title[:255]\n yield post", "def _on_hangups_event(self, conv_event, retry=0):\n try:\n logger.info(\"Hangups Event: \"+conv_event.__class__.__name__)\n if isinstance(conv_event, hangups.ChatMessageEvent):\n conv = self._conv_list.get(conv_event.conversation_id)\n user = conv.get_user(conv_event.user_id)\n sender = util.get_nick(user)\n hostmask = util.get_hostmask(user)\n channel = util.conversation_to_channel(conv)\n message = conv_event.text\n print((hostmask+' -> '+channel+' : '+conv_event.text).encode('utf-8'))\n if len(conv.users) < 3 and re.match('[0-9_-]',channel):\n print(\"[SKIPPING SMS]\")\n return\n yield from self.slack.hangoutsMessage(conv, user, message)\n elif isinstance(conv_event, hangups.RenameEvent):\n conv = self._conv_list.get(conv_event.conversation_id)\n yield from self.slack.onHangoutsRename(conv, conv_event.old_name, conv_event.new_name)\n# elif isinstance(conv_event, hangups.MembershipChangeEvent):\n# conv = self._conv_list.get(conv_event.conversation_id)\n# users = [conv.get_user(uid) for uid in conv_event.participant_ids]\n# if conv_event.type == MEMBERSHIP_CHANGE_TYPE_JOIN:\n# yield from self.slack.onHangoutsJoin(conv, users)\n# elif conv_event.type == MEMBERSHIP_CHANGE_TYPE_LEAVE:\n# yield from self.slack.onHangoutsLeave(conv, users)\n# else:\n# logger.warning(\"Unknown membership change type: \"+str(conv_event.type))\n# elif isinstance(conv_event, hangups.HangoutEvent):\n# conv = self._conv_list.get(conv_event.conversation_id)\n# if conv_event.type == HANGOUT_EVENT_TYPE_START:\n# logger.info(\">>>>Call Start<<<<\")\n# elif conv_event.type == HANGOUT_EVENT_TYPE_END:\n# logger.info(\">>>>Call End<<<<\")\n# else:\n# logger.warning(\"Unknown hangout call event type: \"+str(conv_event.type))\n\n except:\n logger.exception(\"Error handling hangouts event!\")\n if retry < 5:\n yield from asyncio.sleep(retry+0.1)\n logger.info(\"RETRYING\")\n yield from self._on_hangups_event(conv_event, retry+1)\n else:\n logger.critical(\"##########GAVE UP RETRYING############\")", "def juggle(data):\n ticket_ids = data.get('ticket_ids')\n tickets = get_tickets(ticket_ids)\n location = None\n for ticket in tickets:\n ticket.status = TicketStatus.juggled\n ticket.hold_time = datetime.datetime.utcnow()\n ticket.rerequest_threshold = ticket.hold_time + datetime.timedelta(minutes=int(ConfigEntry.query.get(\"juggling_delay\").value))\n location = ticket.location\n emit_event(ticket, TicketEventType.juggle)\n db.session.commit()\n return get_next_ticket(location)", "def who_wants_jobs(self):\n post_id = self.source.config.filter(config_key='post_id-who_wants_to_be_hired').first().config_value\n hn_item = self.client.get_item(post_id)\n for comment_id in hn_item.kids:\n try:\n comment = self.client.get_item(comment_id)\n except hackernews.InvalidItemID as iiid:\n logger.warning('Tried to get non-existent comment with ID: %s; ex: %s', comment_id, iiid)\n continue\n if comment.text is None:\n logger.debug(\"Skipping blank comment: %s\", comment)\n continue\n post = self.parse_job_to_post(comment, subarea='who_wants_to_be_hired', insert_author=True)\n post.title = 'For Hire - {}'.format(post.title)\n post.title = post.title[:255]\n yield post", "def ping_bots():\n bot_manager = BotManager.instance()\n logging.debug(\"Pinging open botnet websockets\")\n\n for bot in bot_manager.all():\n wsocket = bot_manager.botnet[bot.wsock_uuid]\n wsocket.ping()\n bot.last_ping = datetime.now()\n bot_manager.save_bot(bot)\n\n for muuid in bot_manager.monitors:\n for monitor in bot_manager.monitors[muuid]:\n monitor.ping()", "def get_connected_zombies(connection):\n connected_zombies = []\n for ind, connection in enumerate(connection):\n if connection == \"1\":\n connected_zombies.append(ind)\n return connected_zombies", "def on_leave_jap(self, data: dict):\n app.logger.info(\n \"Leave jap \"\n + str(data[\"jap_event_id\"])\n + \" received from \"\n + str(data[\"user_id\"])\n )\n\n room = self.__get_jap_event_room(data[\"jap_event_id\"])\n self.__remove_from_event(data[\"user_id\"], room)\n answer = {**data, \"members\": self.connected_by_jap_event[room]}\n\n if \"table_id\" in data:\n self.__remove_from_table(data[\"user_id\"], data[\"table_id\"])\n answer[\"table_members\"] = self.connected_at_table[data[\"table_id\"]]\n\n emit(socket_messages[\"USER_LEFT_JAP\"], answer, room=room)\n\n leave_room(room)\n\n if \"table_id\" in data:\n leave_room(self.__get_table_room(data[\"table_id\"]))", "def on_join_jap(self, data: dict):\n app.logger.info(\"JOIN_JAP\")\n app.logger.info(request.sid)\n\n session_id = request.sid\n user_id = data[\"user_id\"]\n jap_event_id = data[\"jap_event_id\"]\n\n self.session_id_user_id[session_id] = {\n \"user_id\": user_id,\n \"jap_event_id\": jap_event_id,\n }\n\n jap_event = JapEventService.get_jap_event(jap_event_id)\n\n table = TableService.get_user_table(user_id, jap_event_id)\n\n new_member = UserService.get_user(user_id)\n new_member_dict = asdict(new_member)\n room = self.__get_jap_event_room(data[\"jap_event_id\"])\n\n if (\n room not in self.connected_by_jap_event\n or new_member_dict not in self.connected_by_jap_event[room]\n ):\n join_room(room)\n self.__add_to_event(new_member, room)\n emit(\n socket_messages[\"USER_JOINED_JAP\"],\n {\n \"jap_event_id\": data[\"jap_event_id\"],\n \"new_member\": new_member_dict,\n \"members\": self.connected_by_jap_event[room],\n },\n room=room,\n )\n if table:\n self.on_join_table(\n {\n \"user_id\": user_id,\n \"jap_event_id\": jap_event_id,\n \"table_id\": table.id,\n }\n )\n else:\n # checkt that the user is not a jap creator, otherwise it must have a table.\n if jap_event.creator_id == user_id:\n raise (\n Exception(\n \"Error at jap creation for jap creator, not added to a table\"\n )\n )", "def recordHandoutsPerBridge(self, bridgeRequest, bridges):\n\n handoutsPrefix = \"{}.handouts\".format(self.keyPrefix)\n\n if bridgeRequest is None or bridges is None:\n logging.warning(\"Given bridgeRequest and bridges cannot be None.\")\n return\n\n # Keep track of how many IPv4 and IPv6 requests we are seeing.\n ipVersion = bridgeRequest.ipVersion\n if ipVersion not in [4, 6]:\n logging.warning(\"Got bridge request for unsupported IP version \"\n \"{}.\".format(ipVersion))\n return\n else:\n self.inc(\"{}.ipv{}\".format(handoutsPrefix, ipVersion))\n\n # Keep track of how many times we're handing out a given bridge.\n for bridge in bridges:\n # Use bridge lines as dictionary key. We cannot use the bridge\n # objects because BridgeDB reloads its descriptors every 30\n # minutes, at which points the bridge objects change.\n key = bridge.getBridgeLine(bridgeRequest)\n num = self.bridgeHandouts.get(key, None)\n if num is None:\n self.bridgeHandouts[key] = 1\n else:\n self.bridgeHandouts[key] = num + 1\n\n # We need more than two handouts to calculate our statistics.\n values = self.bridgeHandouts.values()\n if len(values) <= 2:\n return\n\n # Update our statistics.\n self.set(\"{}.median\".format(handoutsPrefix),\n statistics.median(values))\n self.set(\"{}.min\".format(handoutsPrefix), min(values))\n self.set(\"{}.max\".format(handoutsPrefix), max(values))\n self.set(\"{}.unique-bridges\".format(handoutsPrefix),\n len(self.bridgeHandouts))\n # Python 3.8 comes with a statistics.quantiles function, which we\n # should use instead of numpy once 3.8 is available in Debian stable.\n q1, q3 = numpy.quantile(numpy.array(list(values)), [0.25, 0.75])\n self.set(\"{}.quartile1\".format(handoutsPrefix), q1)\n self.set(\"{}.quartile3\".format(handoutsPrefix), q3)\n # Determine our inter-quartile range (the difference between quartile 3\n # and quartile 1) and use it to calculate the upper and lower whiskers\n # as you would see them in a boxplot.\n iqr = q3 - q1\n lowerWhisker = min([x for x in values if x >= q1 - (1.5 * iqr)])\n upperWhisker = max([x for x in values if x <= q3 + (1.5 * iqr)])\n self.set(\"{}.lower-whisker\".format(handoutsPrefix), lowerWhisker)\n self.set(\"{}.upper-whisker\".format(handoutsPrefix), upperWhisker)", "def send_inactive_sms():\n inactive_harambees = Harambee.objects.extra(where=['last_login::date = date_joined::date'])\n if inactive_harambees:\n #values_list() crashes and burns the poor Django, therefore used a loop to extract the ids\n inactive_ids = list()\n for h in inactive_harambees:\n inactive_ids.append(h.id)\n used_ids = list(Harambee.objects.exclude(id__in=inactive_ids).values_list('id', flat=True))\n queryset = InactiveSMS.objects.all().order_by('days')\n for item in queryset:\n used_ids = sms_inactive_harambees(used_ids, item.days, item.message)", "def get_known_jids():\n global _known_jids\n _known_jids_lock.acquire()\n if _known_jids == None:\n _known_jids = ThreadSafeList()\n _known_jids_lock.release()\n return _known_jids", "def journeys(card_id):\n return render_template('journeys.html',\n journeys=g.account.card_journeys(card_id))", "def motif_judgement(self):\n for candidate in self.observer:\n if (candidate in self.cheaters.keys()):\n if (datetime.strptime(self.observer[candidate], '%Y-%m-%d %H:%M:%S.%f') + timedelta(days=5)).date() > (datetime.strptime(self.cheaters[candidate][0], '%Y-%m-%d')).date():\n if (datetime.strptime(self.observer[candidate], '%Y-%m-%d %H:%M:%S.%f')).date() < (datetime.strptime(self.cheaters[candidate][0], '%Y-%m-%d')).date():\n self.count +=1\n return self.count", "def poll_event():\n server = self._query_server()\n if server:\n players_new = set([player.name for player in server.players])\n players_connected = players_new - self.players\n if players_connected:\n announce = u'%s: %s connected' % (\n server.vars['sv_hostname'], self._natural_join(map(self._sub_color, players_connected)))\n self._announce(announce)\n players_disconnected = self.players - players_new\n if players_disconnected:\n announce = u'%s: %s disconnected' % (\n server.vars['sv_hostname'], self._natural_join(map(self._sub_color, players_disconnected)))\n self._announce(announce)\n self.players = players_new.copy()\n else:\n self.players = set()", "def rpc_list_bots(self, sender, *args):\n \n if (len(args) != 0):\n raise rpc.RPCFault(604, 'list_bots: no arguments')\n ls = [ act.jid for act in self.factory.actors.values() ]\n return ls", "def check_job_status_list(job_ids): \n logger.info(f\"monitoring job status for {len(job_ids)} submitted jobs\") \n \n while True: \n time.sleep(10) \n job_id_list, states = [], []\n for job_id in job_ids: \n _job_ids, _states = query_job_states(job_id) \n job_id_list += _job_ids \n states += _states \n # Sometimes query_job_state() does not return, so we wait again\n if not states or not job_id_list:\n continue\n if all([state == \"COMPLETED\" for state in states]): \n return 1 # Pass \n elif any([check in states for check in BAD_STATES]): # Any bad states? \n logger.info(\"atleast 1 system job returned a failing exit code\") \n for job_id, state in zip(job_ids, states): \n if state in BAD_STATES: \n logger.debug(f\"{job_id}: {state}\") \n return -1 # Fail " ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an Upvote instance
def create_upvote(count=0, upvoters=None): if upvoters is None: upvoters = [] return Upvote(count, upvoters)
[ "def upvote(self):\n self._authenticated_action_click(NinegagXPaths.Post.UPVOTE_BUTTON, 'Upvoting')", "def upvotePost(self):\n self.votes = self.votes + 1\n self.save()", "def upvote(checkin_id):\n\n\tvote = \"up\"\n\n\treturn update_vote(checkin_id, vote)", "def test_update_add_upvote(self):\n self.test_uuid = str(uuid.uuid4())\n u = Upvote(1, ['upvoter 1 uuid'])\n u.save(self.test_uuid, db)\n u.update_add_upvote('upvoter 2 uuid', 2, self.test_uuid, db)\n\n _u = Upvote.get(self.test_uuid, db)\n self.assertEqual(u.to_dict(), _u.to_dict())", "def test_update_remove_upvote(self):\n self.test_uuid = str(uuid.uuid4())\n u = Upvote(1, ['upvoter 1 uuid'])\n u.save(self.test_uuid, db)\n u.update_remove_upvote('upvoter 1 uuid', 0, self.test_uuid, db)\n\n _u = Upvote.get(self.test_uuid, db)\n self.assertEqual(u.to_dict(), _u.to_dict())", "def vote_up(self, question, user):\n vote, created = self.get_or_create(user=user, question=question,\n defaults={'vote': Vote.UP_VOTE})\n if not created:\n vote.vote = Vote.UP_VOTE\n vote.save()", "def upvote(request, issue_id):\n if request.method == 'POST':\n issue = get_object_or_404(Issue, pk=issue_id)\n issue.upvotes += 1\n issue.save()\n messages.success(request, 'Upvoted successfully!')\n return redirect('/issue/' +str(issue.id))", "def vote(self, user_id):\n already_voted = self.has_voted(user_id)\n vote_status = None\n if not already_voted:\n # vote up the post\n db.engine.execute(\n PostUpvote.insert(),\n user_id=user_id,\n post_id=self.id\n )\n self.votes = self.votes + 1\n vote_status = True\n else:\n # unvote the post\n db.engine.execute(\n PostUpvote.delete(\n db.and_(\n PostUpvote.user_id == user_id,\n PostUpvote.post_id == self.id\n )\n )\n )\n self.votes = self.votes - 1\n vote_status = False\n db.session.commit() # for the vote count\n return vote_status", "def upvote(self, count):\n count = self.validate_positive_integer(count)\n\n self.vote_score = self.vote_score + count\n return self.vote_score", "def update(self, request, pk):\n instance = Post.objects.get(pk=pk)\n instance.amount_of_upvotes += 1\n instance.save()\n\n serializer = self.get_serializer(instance)\n return Response(serializer.data)", "def upvote_view(self, request, obj_id):\n\n obj = get_object_or_404(self.model, pk=unquote(obj_id))\n obj.upvote()\n\n return HttpResponseRedirect(request.META['HTTP_REFERER'])", "def upvote(self, obj):\n\n # If no ID is present Quote has not yet been saved to database\n # so have upvote do nothing instead of cause a Interval Server Error (500).\n if obj.id:\n url = reverse(\"admin:{}_{}_upvote\".format(self.model._meta.app_label, self.model._meta.model_name), args=(obj.id, ))\n else:\n url = '#'\n\n return mark_safe('<a href=\"{}\"><div class=\"arrow-up\"></div></a>'.format(url))", "def upvote(request, topic_id):\n\tif request.method == 'PATCH':\n\t\ttopic_id = int(topic_id)\n\t\t# start critical section\n\t\tmutex_upvote.acquire()\n\t\ttry:\n\t\t\ttopic = topic_list.getTopicById(topic_id)\n\t\t\tif topic is not None:\n\t\t\t\ttopic.upvotes = topic.upvotes + 1\n\t\tfinally:\n\t\t\tmutex_upvote.release()\n\t\t# end critical section\n\t\treturn HttpResponse(status=200)\n\treturn HttpResponse(status=400)", "def upvote(username, source, recipe_id):\n\n recipe_upvotes = mongo.db.recipes.find_one({'_id': ObjectId(recipe_id)})\n mongo.db.recipes.update({'_id': ObjectId(recipe_id)}, { '$set': {'upvotes': recipe_upvotes['upvotes'] + 1}})\n recipe = mongo.db.recipes.find_one({'_id': ObjectId(recipe_id)})\n user = mongo.db.user.find_one({'username': recipe['user']})\n \n return render_template('recipedetails.html', username=username, source=source, recipe=recipe, user=user)", "def upvote_issue(request, pk):\n issue = Issue.objects.get(pk=pk)\n issue.issue_upvotes += 1\n issue.save()\n messages.success(request, 'You have successfully upvoted this issue !!')\n return redirect('view_issue', pk)", "def upvote(self, comment):\n if settings.UPVOTE_ENABLED:\n res = self.make_request(comment.upvote_url)\n print res.status_code, res.text\n\n # track comment to mixpanel\n mixpanel.track('Upvoted Comment', {\n 'Comment ID': comment.id,\n 'Text Length': len(comment.text),\n 'Debug Mode': not settings.UPVOTE_ENABLED,\n }.update(comment.sentiment['probability']))\n\n sleep(settings.VOTE_DELAY)\n else:\n print 'Would upvote %s' % comment", "def update_vote_rate_upvote(action, entries, **kwargs):\n\n rate = settings.VOTE_RATES[\"vote\"]\n\n if action == \"post_add\":\n entries.update(vote_rate=F(\"vote_rate\") + rate)\n else:\n entries.update(vote_rate=F(\"vote_rate\") - rate)", "def get_self_upvote_response(self, request, response, model):\n item = model.objects.get(pk=response.data['id'])\n create_vote(request.user, item, Vote.UPVOTE)\n\n serializer = self.get_serializer(item)\n headers = self.get_success_headers(serializer.data)\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED,\n headers=headers\n )", "def save_convo_upvote(convo, current_user):\n\ttry:\t\t\n\t\trelevantRels = get_convo_rels(convo, current_user)\n\t\tif relevantRels:\n\t\t\tif relevantRels.get('upvoted'):\n\t\t\t\t# if previously upvoted, remove upvote\n\t\t\t\tconvo.upvoters.remove(current_user)\n\t\t\telse:\n\t\t\t\t# add upvote\n\t\t\t\tconvo.upvoters.add(current_user)\n\t\t\t\t# convo.upvotes = convo.\n\t\t\t\n\t\t\t# regardless, remove downvote\n\t\t\tconvo.downvoters.remove(current_user)\n\t\t\tconvo.save()\n\n\t\treturn convo\n\texcept Exception as e:\n\t\tprint(str(e))\n\t\treturn None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests creation of Upvote model
def test_init(self): u = create_upvote() self.assertTrue(isinstance(u, Upvote))
[ "def test_update_add_upvote(self):\n self.test_uuid = str(uuid.uuid4())\n u = Upvote(1, ['upvoter 1 uuid'])\n u.save(self.test_uuid, db)\n u.update_add_upvote('upvoter 2 uuid', 2, self.test_uuid, db)\n\n _u = Upvote.get(self.test_uuid, db)\n self.assertEqual(u.to_dict(), _u.to_dict())", "def test_create_vote(self):\n\n res = self.client.post('/api/v1/votes', json=self.new_vote)\n data = res.get_json()\n\n self.assertEqual(data['status'], 201)\n self.assertEqual(data['message'], 'Success')\n self.assertEqual(res.status_code, 201)", "def test_success_vote_creation(self):\n\n votes_count = Vote.objects.count()\n form = VoteForm(self.params, current_user=self.user, obj=self.answer)\n form.submit()\n self.assertEqual(Vote.objects.count(), votes_count + 1)", "def test_vote_rating(self):\n\n Vote.objects.create(type=True, user=self.user, tip=self.tip) #Up vote by user\n\n self.assertTrue(self.tip.get_rating == {'positive':1, 'negative':0})\n self.assertTrue(self.tip.vote_set.count() == 1)\n\n Vote.objects.create(type=True, user=self.user1, tip=self.tip) #Up vote by user1\n\n self.assertTrue(self.tip.get_rating == {'positive':2, 'negative':0})\n self.assertTrue(self.tip.vote_set.count() == 2)\n\n Vote.objects.create(type=False, user=self.user2, tip=self.tip) #Down vote by user2\n\n self.assertTrue(self.tip.get_rating == {'positive':2, 'negative':1}) # rating should be 1\n self.assertTrue(self.tip.vote_set.count() == 3) # vote count 3", "def test_failed_vote_creation(self):\n\n votes_count = Vote.objects.count()\n form = VoteForm()\n form.submit()\n self.assertEqual(Vote.objects.count(), votes_count)", "def test_ajax_vote(self):\n # Create a different user to vote with\n user2 = User.objects.create(username=\"user\", email=\"user@tested.com\", password=\"tested\")\n\n answer = models.Post.objects.create(title=\"answer\", author=user2, content=\"tested foo bar too for\",\n type=models.Post.ANSWER, parent=self.post)\n\n self.preform_votes(post=answer, user=self.owner)\n self.preform_votes(post=self.post, user=self.owner)\n self.preform_votes(post=self.post, user=user2)\n\n return", "def test_get_new_voter(self):\n voter = self.data_import._get_voter_object('Test Voter 2')\n self.assertEqual(voter.vote.name, False, 'Test Voter should be created')", "def test_update_remove_upvote(self):\n self.test_uuid = str(uuid.uuid4())\n u = Upvote(1, ['upvoter 1 uuid'])\n u.save(self.test_uuid, db)\n u.update_remove_upvote('upvoter 1 uuid', 0, self.test_uuid, db)\n\n _u = Upvote.get(self.test_uuid, db)\n self.assertEqual(u.to_dict(), _u.to_dict())", "def setUp(self):\n self.subject = \"Is Django the best?\"\n self.constant_time = timezone.now()\n self.vote_taken = self.constant_time\n self.ayes = 10\n self.nays = 15\n self.vote = Votes.objects.create(\n subject=self.subject,\n vote_taken=self.vote_taken,\n ayes=self.ayes,\n nays=self.nays\n )\n self.vote_serializer = VoteSerializer(self.vote)", "def test_autogenerated_vote_url(self):\n answer_response = self.client.get(f'/question/{self.question.id}/{self.answer.id}/')\n vote_url = answer_response.data.get('vote_url')\n\n response = self.client.post(vote_url, {})\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(Answer.objects.get(id=self.answer.id).votes_count, 1)\n self.assertEqual(Question.objects.get(id=self.question.id).total_votes, 1)\n self.assertIn(self.question, self.user.userprofile.voted_posts.all())", "def test_vote_view(self):\n # ACCEPTED\n response = self.client.post(f'/question/{self.question.id}/votefor/{self.answer.id}/', {})\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(Answer.objects.get(id=self.answer.id).votes_count, 1)\n self.assertEqual(Question.objects.get(id=self.question.id).total_votes, 1)\n self.assertIn(self.question, self.user.userprofile.voted_posts.all())\n\n # UNAUTHORIZED\n unauth_client = APIClient()\n response = unauth_client.post(f'/question/{self.question.id}/votefor/{self.answer.id}/', {})\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # NOT FOUND\n response = self.client.post(f'/user/votefor/{self.question.id}/answer/999/', {})\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_can_vote_on_submission(self):\n self.as_user(self.rw_user)\n response = self.client.post(\"/submissions\", sample_submission)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n\n itemurl = response.data[\"url\"]\n\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.UP},\n format=\"json\",\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 1)\n self.assertEqual(sub.votes.first().owner, self.rw_user)\n self.assertEqual(sub.votes.first().value, Vote.Values.UP)\n\n # vote again = change vote\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.DOWN},\n format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 1)\n self.assertEqual(sub.votes.first().owner, self.rw_user)\n self.assertEqual(sub.votes.first().value, Vote.Values.DOWN)\n\n # vote as a different user = now there's two votes\n self.as_user(self.rw_user2)\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.FLAG},\n format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 2)\n self.assertEqual(\n [x.owner for x in sub.votes.all()], [self.rw_user, self.rw_user2]\n )\n self.assertEqual(\n [x.value for x in sub.votes.all()], [Vote.Values.DOWN, Vote.Values.FLAG]\n )\n\n # retrieving the submission cases: user is not moderator and submission is not theirs, nothing returned\n self.as_user(self.rw_user2)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data)\n\n # retrieving the submission cases: user is not moderator and submission is theirs, returned, 2 votes\n self.as_user(self.rw_user)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(\n response.data[\"votes\"],\n [{\"value\": Vote.Values.DOWN}, {\"value\": Vote.Values.FLAG}],\n )\n\n # retrieving the submission cases: user is a moderator and submission is not theirs, returned, 2 votes\n # with extra parameters that only mods see\n self.as_user(self.rw_mod)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(\n response.data[\"votes\"],\n [{\"value\": Vote.Values.DOWN}, {\"value\": Vote.Values.FLAG}],\n )", "def test_create_vote_twice(self):\n\n self.client.post('/api/v1/votes', json=self.new_vote)\n res = self.client.post('/api/v1/votes', json=self.new_vote)\n data = res.get_json()\n\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['error'], 'You can only vote once per office')\n self.assertEqual(res.status_code, 400)", "def create_upvote(count=0, upvoters=None):\n if upvoters is None:\n upvoters = []\n return Upvote(count, upvoters)", "def test_success_response_if_vote_already_exist(self):\n\n Vote.objects.create(content_type=ContentType.objects.get_for_model(\n Answer\n ), value=1, object_id=self.answer.id)\n votes_count = Vote.objects.count()\n form = VoteForm(self.params, current_user=self.user, obj=self.answer)\n form.submit()\n self.assertEqual(Vote.objects.count(), votes_count)", "def test_create_using_post1(self):\n pass", "def upvotePost(self):\n self.votes = self.votes + 1\n self.save()", "def upvote(self):\n self._authenticated_action_click(NinegagXPaths.Post.UPVOTE_BUTTON, 'Upvoting')", "def vote_up(self, question, user):\n vote, created = self.get_or_create(user=user, question=question,\n defaults={'vote': Vote.UP_VOTE})\n if not created:\n vote.vote = Vote.UP_VOTE\n vote.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests adding a new upvote
def test_update_add_upvote(self): self.test_uuid = str(uuid.uuid4()) u = Upvote(1, ['upvoter 1 uuid']) u.save(self.test_uuid, db) u.update_add_upvote('upvoter 2 uuid', 2, self.test_uuid, db) _u = Upvote.get(self.test_uuid, db) self.assertEqual(u.to_dict(), _u.to_dict())
[ "def test_create_vote(self):\n\n res = self.client.post('/api/v1/votes', json=self.new_vote)\n data = res.get_json()\n\n self.assertEqual(data['status'], 201)\n self.assertEqual(data['message'], 'Success')\n self.assertEqual(res.status_code, 201)", "def test_ajax_vote(self):\n # Create a different user to vote with\n user2 = User.objects.create(username=\"user\", email=\"user@tested.com\", password=\"tested\")\n\n answer = models.Post.objects.create(title=\"answer\", author=user2, content=\"tested foo bar too for\",\n type=models.Post.ANSWER, parent=self.post)\n\n self.preform_votes(post=answer, user=self.owner)\n self.preform_votes(post=self.post, user=self.owner)\n self.preform_votes(post=self.post, user=user2)\n\n return", "def test_update_remove_upvote(self):\n self.test_uuid = str(uuid.uuid4())\n u = Upvote(1, ['upvoter 1 uuid'])\n u.save(self.test_uuid, db)\n u.update_remove_upvote('upvoter 1 uuid', 0, self.test_uuid, db)\n\n _u = Upvote.get(self.test_uuid, db)\n self.assertEqual(u.to_dict(), _u.to_dict())", "def test_autogenerated_vote_url(self):\n answer_response = self.client.get(f'/question/{self.question.id}/{self.answer.id}/')\n vote_url = answer_response.data.get('vote_url')\n\n response = self.client.post(vote_url, {})\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(Answer.objects.get(id=self.answer.id).votes_count, 1)\n self.assertEqual(Question.objects.get(id=self.question.id).total_votes, 1)\n self.assertIn(self.question, self.user.userprofile.voted_posts.all())", "def test_success_vote_creation(self):\n\n votes_count = Vote.objects.count()\n form = VoteForm(self.params, current_user=self.user, obj=self.answer)\n form.submit()\n self.assertEqual(Vote.objects.count(), votes_count + 1)", "def test_init(self):\n u = create_upvote()\n self.assertTrue(isinstance(u, Upvote))", "def test_can_vote_on_submission(self):\n self.as_user(self.rw_user)\n response = self.client.post(\"/submissions\", sample_submission)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n\n itemurl = response.data[\"url\"]\n\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.UP},\n format=\"json\",\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 1)\n self.assertEqual(sub.votes.first().owner, self.rw_user)\n self.assertEqual(sub.votes.first().value, Vote.Values.UP)\n\n # vote again = change vote\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.DOWN},\n format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 1)\n self.assertEqual(sub.votes.first().owner, self.rw_user)\n self.assertEqual(sub.votes.first().value, Vote.Values.DOWN)\n\n # vote as a different user = now there's two votes\n self.as_user(self.rw_user2)\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.FLAG},\n format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 2)\n self.assertEqual(\n [x.owner for x in sub.votes.all()], [self.rw_user, self.rw_user2]\n )\n self.assertEqual(\n [x.value for x in sub.votes.all()], [Vote.Values.DOWN, Vote.Values.FLAG]\n )\n\n # retrieving the submission cases: user is not moderator and submission is not theirs, nothing returned\n self.as_user(self.rw_user2)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data)\n\n # retrieving the submission cases: user is not moderator and submission is theirs, returned, 2 votes\n self.as_user(self.rw_user)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(\n response.data[\"votes\"],\n [{\"value\": Vote.Values.DOWN}, {\"value\": Vote.Values.FLAG}],\n )\n\n # retrieving the submission cases: user is a moderator and submission is not theirs, returned, 2 votes\n # with extra parameters that only mods see\n self.as_user(self.rw_mod)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(\n response.data[\"votes\"],\n [{\"value\": Vote.Values.DOWN}, {\"value\": Vote.Values.FLAG}],\n )", "def upvote(self):\n self._authenticated_action_click(NinegagXPaths.Post.UPVOTE_BUTTON, 'Upvoting')", "def test_create_vote_twice(self):\n\n self.client.post('/api/v1/votes', json=self.new_vote)\n res = self.client.post('/api/v1/votes', json=self.new_vote)\n data = res.get_json()\n\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['error'], 'You can only vote once per office')\n self.assertEqual(res.status_code, 400)", "def test_vote_view(self):\n # ACCEPTED\n response = self.client.post(f'/question/{self.question.id}/votefor/{self.answer.id}/', {})\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(Answer.objects.get(id=self.answer.id).votes_count, 1)\n self.assertEqual(Question.objects.get(id=self.question.id).total_votes, 1)\n self.assertIn(self.question, self.user.userprofile.voted_posts.all())\n\n # UNAUTHORIZED\n unauth_client = APIClient()\n response = unauth_client.post(f'/question/{self.question.id}/votefor/{self.answer.id}/', {})\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # NOT FOUND\n response = self.client.post(f'/user/votefor/{self.question.id}/answer/999/', {})\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_get_new_voter(self):\n voter = self.data_import._get_voter_object('Test Voter 2')\n self.assertEqual(voter.vote.name, False, 'Test Voter should be created')", "def test_vote(self):\n self.resource._request.register_uri(\n 'PUT', '/post/259787/vote')\n\n response = self.resource.vote(259787, 1)\n\n self.assertEqual(response['ok'], 1)\n self.assertTrue('server_time' in response)", "def test_vote_rating(self):\n\n Vote.objects.create(type=True, user=self.user, tip=self.tip) #Up vote by user\n\n self.assertTrue(self.tip.get_rating == {'positive':1, 'negative':0})\n self.assertTrue(self.tip.vote_set.count() == 1)\n\n Vote.objects.create(type=True, user=self.user1, tip=self.tip) #Up vote by user1\n\n self.assertTrue(self.tip.get_rating == {'positive':2, 'negative':0})\n self.assertTrue(self.tip.vote_set.count() == 2)\n\n Vote.objects.create(type=False, user=self.user2, tip=self.tip) #Down vote by user2\n\n self.assertTrue(self.tip.get_rating == {'positive':2, 'negative':1}) # rating should be 1\n self.assertTrue(self.tip.vote_set.count() == 3) # vote count 3", "def upvotePost(self):\n self.votes = self.votes + 1\n self.save()", "def test_update_button_with_preexisting_vote(self):\n vote = ProposalVoteFactory.create(\n proposal=self.proposal, voter=self.user\n )\n response = self._test_button_text(\"Update Review\", \"Submit Review\")\n # Vote comment should appear twice - once in the list of votes\n # and once in the form field.\n self.assertContains(response, vote.comment, 2)", "def test_failed_vote_creation(self):\n\n votes_count = Vote.objects.count()\n form = VoteForm()\n form.submit()\n self.assertEqual(Vote.objects.count(), votes_count)", "def upvote(request, issue_id):\n if request.method == 'POST':\n issue = get_object_or_404(Issue, pk=issue_id)\n issue.upvotes += 1\n issue.save()\n messages.success(request, 'Upvoted successfully!')\n return redirect('/issue/' +str(issue.id))", "def post(self, answerid):\n db = Database()\n votes = db.get_by_argument(\"answers\", \"answer_id\", answerid)\n if votes:\n upvote = votes[5] + 1\n db.update_answer_record(\"answers\", \"up_vote\",\n upvote, \"answer_id\", answerid)\n return{\"message\": \"answer upvoted\"}, 201\n return{\"message\": \"No answer by that answer_id\"}, 404", "def test_nvp_with_vote(self):\n song = Song.objects.create(artist='Van Morrison',\n album='The Healing Game',\n title='Sometimes We Cry',\n genre='Blues',\n score=0,\n played=0,\n family=0,\n global_score=0)\n\n client = Client()\n client.login(username='admin_search', password='admintest')\n\n response = client.get('/playlist/add/%d' % song.id)\n\n response = client.get('/songs/never-played/')\n self.assertContains(response, song.title, status_code=200)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests removing an existing upvote
def test_update_remove_upvote(self): self.test_uuid = str(uuid.uuid4()) u = Upvote(1, ['upvoter 1 uuid']) u.save(self.test_uuid, db) u.update_remove_upvote('upvoter 1 uuid', 0, self.test_uuid, db) _u = Upvote.get(self.test_uuid, db) self.assertEqual(u.to_dict(), _u.to_dict())
[ "def remove_votes(checkin, vote):\n\n\t# update database\n\tif vote == \"up\":\n\t\t# remove upvote\n\t\tcheckin.upvotes -= 1\n\telif vote == \"down\":\n\t\t# remove downvote\n\t\tcheckin.downvotes -= 1\n\n\t# commit changes to database\n\tmodel.db.session.commit()", "def test_update_add_upvote(self):\n self.test_uuid = str(uuid.uuid4())\n u = Upvote(1, ['upvoter 1 uuid'])\n u.save(self.test_uuid, db)\n u.update_add_upvote('upvoter 2 uuid', 2, self.test_uuid, db)\n\n _u = Upvote.get(self.test_uuid, db)\n self.assertEqual(u.to_dict(), _u.to_dict())", "def test_deleting_vote_if_values_are_different(self):\n\n Vote.objects.create(content_type=ContentType.objects.get_for_model(\n Answer\n ), value=-1, object_id=self.answer.id)\n votes_count = Vote.objects.count()\n form = VoteForm(self.params, current_user=self.user, obj=self.answer)\n form.submit()\n self.assertEqual(Vote.objects.count(), votes_count - 1)", "def delete(self, request, pk):\n answer = get_object_or_404(Answer, pk=pk)\n user = request.user\n\n answer.up_voters.remove(user)\n answer.save()\n\n serializer_context = {\"request\": request}\n serializer = self.serializer_class(answer, context=serializer_context)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def test_unsave_substitute(self):\n\n # count substitutes number to be able to compare later to new substitutes number\n old_substitutes = Substitute.objects.count()\n product_id = self.substitute.id\n self.client.get(reverse('open_food_facts:unsave', args=(product_id,)))\n new_substitutes = Substitute.objects.count()\n # if there's one less substitute in the database after calling the unsave view,\n # it means the substitute was deleted\n self.assertEqual(new_substitutes, old_substitutes - 1)", "def delete(self, request, pk):\n answer = get_object_or_404(Answer, pk=pk)\n user = request.user\n\n answer.down_voters.remove(user)\n answer.save()\n\n serializer_context = {\"request\": request}\n serializer = self.serializer_class(answer, context=serializer_context)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def test_ticket_remove_ok(self):\n insert_ticket(self.env)\n rv, output = self.execute('ticket remove 1')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)", "def upvote(self):\n self._authenticated_action_click(NinegagXPaths.Post.UPVOTE_BUTTON, 'Upvoting')", "def test_delete_upload(self):\n pass", "def test_delete_escalation(self):\n pass", "def downvote(self):\n self._authenticated_action_click(NinegagXPaths.Post.DOWNVOTE_BUTTON, 'Downvoting')", "def test_delete_muveto_pmt_item(self):\n pass", "def test_d_elete_webhookchave(self):\n pass", "def test_queue_remove(self):\n q1 = self.party.enqueue_song(self.user, 't123')\n q2 = self.party.enqueue_song(self.user, 't456')\n q2.upvote(self.user2)\n next_entry = self.party.dequeue_next_song()\n self.assertEquals(next_entry, q2)\n self.party.save(self.redis)\n p = Party.get(self.redis, self.party.id)\n self.assertEquals(p.queue[0].id, q1.id)", "def test_can_vote_on_submission(self):\n self.as_user(self.rw_user)\n response = self.client.post(\"/submissions\", sample_submission)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n\n itemurl = response.data[\"url\"]\n\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.UP},\n format=\"json\",\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 1)\n self.assertEqual(sub.votes.first().owner, self.rw_user)\n self.assertEqual(sub.votes.first().value, Vote.Values.UP)\n\n # vote again = change vote\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.DOWN},\n format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 1)\n self.assertEqual(sub.votes.first().owner, self.rw_user)\n self.assertEqual(sub.votes.first().value, Vote.Values.DOWN)\n\n # vote as a different user = now there's two votes\n self.as_user(self.rw_user2)\n response = self.client.post(\n f\"{itemurl}/votes\",\n {\"value\": Vote.Values.FLAG},\n format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n sub = Submission.objects.first()\n self.assertEqual(sub.votes.count(), 2)\n self.assertEqual(\n [x.owner for x in sub.votes.all()], [self.rw_user, self.rw_user2]\n )\n self.assertEqual(\n [x.value for x in sub.votes.all()], [Vote.Values.DOWN, Vote.Values.FLAG]\n )\n\n # retrieving the submission cases: user is not moderator and submission is not theirs, nothing returned\n self.as_user(self.rw_user2)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data)\n\n # retrieving the submission cases: user is not moderator and submission is theirs, returned, 2 votes\n self.as_user(self.rw_user)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(\n response.data[\"votes\"],\n [{\"value\": Vote.Values.DOWN}, {\"value\": Vote.Values.FLAG}],\n )\n\n # retrieving the submission cases: user is a moderator and submission is not theirs, returned, 2 votes\n # with extra parameters that only mods see\n self.as_user(self.rw_mod)\n response = self.client.get(itemurl, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(\n response.data[\"votes\"],\n [{\"value\": Vote.Values.DOWN}, {\"value\": Vote.Values.FLAG}],\n )", "def destroy_test(self, test_id):", "def test_model_delete_tag(self):\n # Arrange:\n self.tag.save()\n self.old_count = Tag.objects.count()\n # Act\n Tag.objects.filter(pk=self.tag.pk).delete()\n # Assert\n self.new_count = Tag.objects.count()\n self.assertEquals(\n self.old_count - 1, self.new_count, 'The Tag was not deleted'\n )", "def test_vote_view(self):\n # ACCEPTED\n response = self.client.post(f'/question/{self.question.id}/votefor/{self.answer.id}/', {})\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(Answer.objects.get(id=self.answer.id).votes_count, 1)\n self.assertEqual(Question.objects.get(id=self.question.id).total_votes, 1)\n self.assertIn(self.question, self.user.userprofile.voted_posts.all())\n\n # UNAUTHORIZED\n unauth_client = APIClient()\n response = unauth_client.post(f'/question/{self.question.id}/votefor/{self.answer.id}/', {})\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # NOT FOUND\n response = self.client.post(f'/user/votefor/{self.question.id}/answer/999/', {})\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def delete(self):\n if not is_admin():\n vote_ns.abort(403, 'You don\\'t have sufficient rights to access this resource')\n delete_all_votes()\n return '', 204" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Form a density from observing masss in specific bins. This is different from hist in that the bins are specified and not assumed equally spaced. The height of the bars is the mass within the bin divided by the length of bin.
def PL_density(mass, binpoints, facecolor='red', alpha=0.8, edgecolor='black', ax=None, **opts): if ax is None: fig = plt.gcf() ax = fig.add_subplot(111) opts['facecolor'] = facecolor opts['alpha'] = alpha opts['edgecolor'] = edgecolor mass = np.array(mass, np.float) n = mass.sum() / 100. # get the corners of the rectangles for the histogram diff = np.diff(binpoints) left = np.array(binpoints[:-1]) right = np.array(binpoints[1:]) bottom = np.zeros(len(left)) top = bottom + mass / (n * diff) # we need a (numrects x numsides x 2) numpy array for the path helper # function to build a compound path XY = np.array([[left,left,right,right], [bottom,top,top,bottom]]).T # get the Path object barpath = path.Path.make_compound_path_from_polys(XY) # make a patch out of it patch = patches.PathPatch(barpath, **opts) ax.add_patch(patch) # update the view limits ax.set_xlim(left[0], right[-1]) ax.set_ylim(bottom.min(), top.max()) return ax
[ "def create_mass_hist(dfr, costh_bin=None):\n if costh_bin is not None:\n plot_data = dfr[get_bin_cut_df(dfr, lambda d: d.costh_HX.abs(),\n *costh_bin)]\n else:\n plot_data = dfr\n\n return create_histogram(plot_data.chicMass, (75, 3.325, 3.725))", "def compute_diameter_bins(self, diameters):\n #Freedman-Diaconis bin size estimation\n h = 2 * self.IQR(diameters) * len(diameters)**(-1.0/3.0)\n logger.debug(\"Diameter Bin size(based on %d samples) is: %10.2E\"%(len(diameters), h))\n\n d_min = np.amin(diameters)\n d_max = np.amax(diameters)\n logger.debug(\"Minimum Particle Diameter: %10.2E\"%(d_min))\n logger.debug(\"Maximum Particle Diameter: %10.2E\"%(d_max))\n \n\n num_bins = 1\n d_bins = [d_min]\n d_start = d_min\n all_bins_found = False\n while all_bins_found == False:\n d_bins.append(d_bins[-1] + h)\n if d_bins[-1] <= d_max:\n num_bins += 1\n else:\n all_bins_found = True\n logger.debug(\"Number of diameter bins is: %d\"%(num_bins))\n\n #Create vector of particle diameter bin coordinates\n bin_coords = [{'d_min': d_min, 'd_max': d_min + h}]\n for i in range(1, num_bins):\n entry = {'d_min': bin_coords[-1]['d_max'] , 'd_max': bin_coords[-1]['d_max'] + h}\n bin_coords.append(entry) \n\n for i, bin_coord in enumerate(bin_coords):\n logger.debug(\"Diameter Bin %d \\t%10.2E\\t%10.2E\\n\"%(i + 1, bin_coord['d_min'], bin_coord['d_max']))\n \n return bin_coords", "def histogram(x: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:\n pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)\n\n return pdf", "def toDistribution (bins, xmin: float, xmax: float):\n xbins = len(bins)\n x = np.linspace(xmin, xmax, xbins)\n\n dist = EmpiricalDistribution1D ([xmin, xmax], xbins)\n dist._mass = bins\n dist._scale = np.sum(bins)\n dist._wsum = np.average(x, weights=dist._mass) * dist._scale\n dist._ileft = 0\n dist._iright = xbins-1\n return dist", "def hist_moments123(hist,edges):\n\n centers = (edges[:,1:]+edges[:,-1:])/2.0\n nm = np.sum(hist,axis=1).reshape(-1,1)\n nu1 = np.sum(centers*hist/nm,axis=1) \n nu2 = np.sum(centers**2*hist/nm,axis=1) \n nu3 = np.sum(centers**3*hist/nm,axis=1) \n return np.array(np.c_[nu1,nu2,nu3])", "def normed_hist(list_,bins=None,normalization='over total',opacity=1,plot_label=''):\n counts_array,edges = np.histogram(list_,bins=bins)\n bin_widths=edges[1]-edges[0]\n \n if normalization=='over total':\n heights=100*counts_array/counts_array.sum()\n ylabel='counts / sum(counts) (%)'\n elif normalization=='over max':\n heights=100*counts_array/counts_array.max()\n ylabel='counts / max(counts) (%)'\n elif normalization=='none':\n heights=counts_array\n ylabel='counts'\n else:\n raise RuntimeError('unsupported normalization input!') \n \n plt.bar(edges[:-1],heights,bin_widths,label=plot_label,alpha=opacity)\n plt.ylabel(ylabel)", "def _kde_histsum(x, bin_x, bin_entries, band_width, n_total):\n if not isinstance(x, (float, int, np.number)):\n raise RuntimeError('x has wrong type')\n return np.sum(bin_entries * norm.pdf(x, loc=bin_x, scale=band_width)) / n_total", "def binarize(i, bins):\n\n hist, edges = np.histogram(i, bins=bins, range=[10, 2000], normed=True)\n edges = (edges[:-1] + edges[1:])/2\n hist *= edges\n\n return hist", "def plot_histogram(self, nbin=10, width=0.01,\n *args, **kwargs):\n\n adjuster = 0.00001\n bins = np.linspace(self.min - adjuster, self.max, nbin)\n ncols = []\n velocity = []\n lower_v = self.min - adjuster\n upper_v = 0\n\n for upper_v in bins:\n ncol = 0\n for v in self.velocity['velocity']:\n if lower_v < v <= upper_v:\n ncol += 1\n\n velocity.append((lower_v + upper_v)/2.)\n ncols.append(ncol)\n lower_v = upper_v - adjuster\n\n velocity.append(upper_v + adjuster)\n ncols.append(0)\n\n plt.bar(velocity, ncols, width, *args, **kwargs)", "def _create_bins(self):\n min_conf = self.data[self.conf].min()\n max_conf = self.data[self.conf].max()\n\n if self.bin_width == -1:\n self.bin_width = (max_conf - min_conf)/100\n if self.bin_spacing == -1:\n self.bin_spacing = (max_conf - min_conf)/10\n\n # define the bins (according to width)\n self.bins = np.arange(min_conf, max_conf + self.bin_width, self.bin_spacing)\n return self.bins", "def hist(self, x, bins=10, normed=0, bottom=None,\n align='edge', orientation='vertical', width=None,\n log=False, **kwargs):\n if not self._hold: self.cla()\n n, bins = npy.histogram(x, bins, range=None, normed=normed)\n if width is None: width = 0.9*(bins[1]-bins[0])\n if orientation == 'horizontal':\n patches = self.barh(bins, n, height=width, left=bottom,\n align=align, log=log)\n elif orientation == 'vertical':\n patches = self.bar(bins, n, width=width, bottom=bottom,\n align=align, log=log)\n else:\n raise ValueError, 'invalid orientation: %s' % orientation\n for p in patches:\n p.update(kwargs)\n return n, bins, cbook.silent_list('Patch', patches)", "def dist_hist():\n\n cosmos_index, dist_2d = match_cat()\n\n fig = plt.figure(figsize = (8,8))\n sp = fig.add_subplot(111)\n\n heights, bins = np.histogram(np.log10(dist_2d), bins = 30)\n\n sp.step(bins[:-1] + (0.5*(bins[1] - bins[0])), heights, color = 'k', linewidth = 2)\n\n sp.set_xlabel('$log_{10}$[Separation/Arcsec]', fontdict = font, fontsize = 24)\n sp.set_ylabel('Frequency', fontdict = font, fontsize = 24)", "def get_n_m_kde(magnitude, bin_centre, area, bandwidth=0.2):\n kde_skl = KernelDensity(bandwidth=bandwidth)\n kde_skl.fit(magnitude[:, np.newaxis])\n pdf = np.exp(kde_skl.score_samples(bin_centre[:, np.newaxis]))\n return pdf/area*len(magnitude)/np.sum(pdf)", "def histogram(hist, bins, transposition=False, **kwargs):\n # calculate width of each bars by alpha\n alpha = 0.7\n width = alpha * (bins[1] - bins[0])\n # calculate the center point of entire histogram\n center = (bins[1:] + bins[:-1]) / 2\n # create new figure\n if not transposition:\n pl.bar(center, hist, align='center', width=width, **kwargs)\n else:\n pl.barh(center, hist, align='center', height=width, **kwargs)", "def bond_histogram(bond_list, save_location=None, dpi=300, graph_min=0, graph_max=2):\n \n lengths = []\n for atoms, bond_length in bond_list.items():\n lengths.append(bond_length)\n \n bins = np.linspace(graph_min, graph_max)\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n plt.xlabel('Bond Length (angstrom)')\n plt.ylabel('Number of Bonds')\n \n \n ax.hist(lengths, bins=bins)\n \n # Save figure\n if save_location:\n plt.savefig(save_location, dpi=dpi)\n \n return ax", "def marginalize(self,iaxis,bin_range=None):\n\n h = Histogram(self._axes[(iaxis+1)%2],style=self._style) \n\n if iaxis == 1:\n\n if bin_range is None: \n h._counts = np.apply_over_axes(np.sum,self._counts,[1]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,self._var,[1]).reshape(h._counts.shape)\n else:\n c = self._counts[:,bin_range[0]:bin_range[1]]\n v = self._var[:,bin_range[0]:bin_range[1]]\n\n h._counts = np.apply_over_axes(np.sum,c,[1]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,v,[1]).reshape(h._counts.shape)\n else:\n\n if bin_range is None: \n h._counts = np.apply_over_axes(np.sum,self._counts,[0]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,self._var,[0]).reshape(h._counts.shape)\n else:\n c = self._counts[bin_range[0]:bin_range[1],:]\n v = self._var[bin_range[0]:bin_range[1],:]\n\n h._counts = np.apply_over_axes(np.sum,c,[0]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,v,[0]).reshape(h._counts.shape)\n\n return h", "def histogram_distances(self, ax=None, **kwargs):\n _ = ax.hist(self.d_ij, **kwargs)\n ax.set_ylabel('Num. pairs')\n ax.set_xlabel('Separation distance')", "def easy_hist(list_,distribution_is_continuous=True,normalization='over total',bins=None,label='',opacity=1):\n array=np.array(list_)\n mean=array.mean()\n std=array.std()\n mean_std_string=' (mean: %.2f, std: %.2f)'%(mean,std)\n \n plot_label=label+mean_std_string\n if distribution_is_continuous:\n normed_hist(list_,bins=bins,normalization=normalization,plot_label=plot_label,opacity=opacity)\n else:\n discrete_hist(list_,normalization=normalization,plot_label=plot_label,opacity=opacity)\n plt.xlabel(label)\n plt.title(label+' distribution\\n'+mean_std_string)\n plt.grid(True)\n return mean,std", "def histogram2d(\n x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10\n) -> torch.Tensor:\n\n _, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon)\n _, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon)\n\n pdf = joint_pdf(kernel_values1, kernel_values2)\n\n return pdf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the Fleetspeak connector.
def Init(service_client=None): global CONN global label_map if service_client is None: service_client_cls = fs_client.InsecureGRPCServiceClient fleetspeak_message_listen_address = ( config.CONFIG["Server.fleetspeak_message_listen_address"] or None) fleetspeak_server = config.CONFIG["Server.fleetspeak_server"] or None if fleetspeak_message_listen_address is None and fleetspeak_server is None: logging.warning( "Missing config options `Server.fleetspeak_message_listen_address', " "`Server.fleetspeak_server', at least one of which is required to " "initialize a connection to Fleetspeak; Not using Fleetspeak.") return service_client = service_client_cls( "GRR", fleetspeak_message_listen_address=fleetspeak_message_listen_address, fleetspeak_server=fleetspeak_server, threadpool_size=50) label_map = {} for entry in config.CONFIG["Server.fleetspeak_label_map"]: key, value = entry.split(":") label_map[key.strip()] = value.strip() CONN = service_client logging.info("Fleetspeak connector initialized.")
[ "def setup_connector():\n connector = ConnectorMatrix(\n {\n \"room\": \"#test:localhost\",\n \"mxid\": \"@opsdroid:localhost\",\n \"password\": \"hello\",\n \"homeserver\": \"http://localhost:8008\",\n }\n )\n return connector", "def initialize_framing(self):\n if self.connection_mode == \"ETHERNET\":\n self.sut_adapter = EthernetAdapter.EthernetAdapter()\n\n if self.sut_mac != \"\":\n self.sut_adapter.dut_mac = self.sut_mac\n else:\n self.sut_adapter.sut_ip = self.sut_ip\n\n self.sut_adapter.sut_interface = self.sut_interface\n\n self.sut_adapter.start()\n\n self.seq_nr = 1\n \n self.initialized = True", "def test_init(self):\n connector = RocketChat({\n 'name': 'rocket.chat',\n 'access-token': 'test',\n 'user-id': 'userID'\n }, opsdroid=OpsDroid())\n self.assertEqual(\"general\", connector.default_target)\n self.assertEqual(\"rocket.chat\", connector.name)", "def __init__(self, link_uri):\n\n self._cf = Crazyflie()\n\n self._cf.connected.add_callback(self.connected)\n self._cf.disconnected.add_callback(self.disconnected)\n self._cf.connection_failed.add_callback(self.connectionFailed)\n self._cf.connection_lost.add_callback(self.connectionLost)\n\n self._cf.appchannel.packet_received.add_callback(self.appPacketReceived)\n\n self._cf.open_link(link_uri)\n\n print('Connecting to %s' % link_uri)", "def __init__(self):\n socketIO.emit(\"status\", \"Walabot initializing\")\n self.wlbt = Walabot()", "def setup_twitter(self):\n # Setup Twitter connection.\n #self.logprint(\"consumer key/secret:\", self.cfg.get('twitter_consumer_key'), self.cfg.get('twitter_consumer_secret'))\n #self.logprint(\"ouath token/secret:\", self.cfg.get('twitter_oauth_token'), self.cfg.get('twitter_oauth_token_secret'))\n try:\n self.auth = tweepy.OAuthHandler(self.cfg.get('twitter_consumer_key'), self.cfg.get('twitter_consumer_secret'))\n self.auth.set_access_token(self.cfg.get('twitter_oauth_token'), self.cfg.get('twitter_oauth_token_secret'))\n streamtwitter = self.cfg.get_bool('twitter_stream')\n #username = self.cfg.get('twitter_username')\n #password = self.cfg.get('twitter_password')\n except KeyError, ke:\n print \"Couldn't find twitter authentication information in config file:\", ke\n sys.exit(1)\n self.twit = tweepy.API(self.auth)\n\n # Listen to Twitter stream.\n try:\n if streamtwitter:\n self.stream_twitter()\n else:\n self.twitter_loop()\n except KeyboardInterrupt:\n print \"Quitting...\"\n sys.exit(0)", "def setup(self):\n self.set_stream_listener()\n self.setup_mq()\n self.start_listener()", "def __init__(self):\n self.wlbt = WalabotAPI\n self.wlbt.Init()\n self.wlbt.SetSettingsFolder()", "def _init_connectors(self, netcode, wallet_connector_type, wallet_credentials=None):\n\n if netcode == 'BTC':\n # configure mainnet providers\n provider_list = providers.providers_for_config_string(PYCOIN_BTC_PROVIDERS, 'BTC')\n\n blockio_index = -1\n for idx, val in enumerate(provider_list):\n print(idx, val)\n if isinstance(val, BlockrioProvider):\n blockio_index = idx\n\n if blockio_index > -1:\n provider_list[blockio_index] = BlockrBroadcaster('BTC')\n else:\n provider_list.append(BlockrBroadcaster('BTC'))\n\n provider_list.append(InsightBroadcaster('https://insight.bitpay.com/', 'BTC'))\n\n # initialize payment connectors based on config file\n if wallet_connector_type == 'blockchain.info':\n provider_list.append(LocalBlockchainInfoConnector(wallet_credentials))\n else:\n provider_list.append(BitcoindConnector('BTC'))\n\n providers.set_default_providers_for_netcode('BTC', provider_list)\n\n elif netcode == 'XTN':\n # initialize testnet providers\n testnet_list = []\n testnet_list.append(BitcoindConnector('XTN'))\n providers.set_default_providers_for_netcode('XTN', testnet_list)\n\n else:\n logging.error('Unrecognized chain %s', netcode)\n raise UnrecognizedChainError('Unrecognized chain ' + netcode)", "def initialize(self):\n self.initialized = False\n self.initialize_cameras()\n self.initialize_electronics()\n self.initialized = True", "def connection_setup(self):\n\n self.logger.debug(\"Create the connection to the mgr....\")\n # Create a connection to Hal driver mgr\n self.mgrConnection = HalTransport(HalTransport.HalTransportClientMgr,\n HalTransport.HalClientMode,\n disconnectHandlerCb=self.connectionDisconnectCb)\n\n # create the poller\n if self.poller is None:\n self.poller = self.dispatcher.get_poll()\n\n # register the mgr socket\n self.dispatcher.fd_register(self.mgrConnection.socket, self.dispatcher.EV_FD_IN, self.host_management_cb)\n self.dispatcher.fd_register(self.mgrConnection.monitor, self.dispatcher.EV_FD_IN, self.host_management_cb)", "def _connect(self):\n\n wrapper_headers, wrapper_body = self._create_wrapper_request()\n\n self.wrapper_user = self._get_wrapper_user(wrapper_headers)\n self.wrapper_key = self._get_wrapper_key(wrapper_body)\n\n self.websocket = self._get_websocket()\n\n return self.init()", "def init_mlflow():\n if \"SPARK_HOME\" in os.environ:\n if \"databricks\" in os.environ[\"SPARK_HOME\"]:\n mlflow.set_tracking_uri(\"databricks\")", "def init(config=None):\n if config:\n Cuebot.Config = config\n Cuebot.Timeout = config.get('cuebot.timeout', Cuebot.Timeout)\n if os.getenv(\"CUEBOT_HOSTS\"):\n Cuebot.setHosts(os.getenv(\"CUEBOT_HOSTS\").split(\",\"))\n else:\n facility_default = Cuebot.Config.get(\"cuebot.facility_default\")\n Cuebot.setFacility(facility_default)\n if Cuebot.Hosts is None:\n raise CueException('Cuebot host not set. Please ensure CUEBOT_HOSTS is set ' +\n 'or a facility_default host is set in the yaml pycue config.')\n Cuebot.setChannel()", "def init_phonebook(self) -> None:\n self.fph = FritzPhonebook(\n address=self.host,\n user=self.username,\n password=self.password,\n )\n self.update_phonebook()", "def initialize(self):\n try:\n api_key = self._pomodoro_service.get_config(\"task.asana\", \"api_key\")\n self.asana_api = self._get_asana_api(api_key)\n except Exception as ex:\n logger.error(\"Error initializing plugin: {0}\".format(ex))", "def __init__(self):\n self.config = ConfigUtil.ConfigUtil('../../../data/ConnectedDevicesConfig.props')\n self.config.loadConfig()\n print('Configuration data...\\n' + str(self.config)) \n print('============= Setting Done! =============')\n self.host = self.config.getProperty(ConfigConst.COAP_GATEWAY_SECTION, ConfigConst.DEFAULT_HOST )\n self.port = int(self.config.getProperty(ConfigConst.COAP_GATEWAY_SECTION, ConfigConst.DEFAULT_COAP_PORT))\n self.serverAddr = (self.host, self.port)\n print('URL(IP): ' + str(self.serverAddr))\n self.url = \"coap://\" + self.host + \":\" + str(self.port) + \"/temp\"", "def __init__(self):\n self.updater = Updater(TELEGRAM_SECRET_TOKEN, use_context=True)\n\n # Get the dispatcher to register handlers\n dp = self.updater.dispatcher\n\n # log all errors\n dp.add_error_handler(self.error)\n\n for c in self.commands:\n dp.add_handler(CommandHandler(c['cmd'], getattr(self, c['func'])))\n\n # handle poll answers\n dp.add_handler(PollAnswerHandler(self.receive_poll_answer))\n\n # Button Handler\n # self.updater.dispatcher.add_handler(CallbackQueryHandler(self.button))\n\n # Start the Bot\n self.updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n # self.updater.idle()", "async def init(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The x, y, z acceleration values returned in a
def acceleration(self): # read the 6 bytes of acceleration data current_range = self.range scale = 1.0 if current_range == 3: scale = 512.0 if current_range == 2: scale = 1024.0 if current_range == 1: scale = 2048.0 if current_range == 0: scale = 4096.0 # shift down to the actual 14 bits and scale based on the range x, y, z = [((i >> 2) / scale) * _STANDARD_GRAVITY for i in self._xyz_raw] return (x, y, z)
[ "def get_vector(self):\n return self.x, self.y, self.z", "def acceleration(self) -> Tuple[float, float, float]:\n # pylint: disable=no-else-return\n # This needs to be refactored when it can be tested\n # Read 6 bytes for 16-bit X, Y, Z values.\n self._read_into(_MMA8451_REG_OUT_X_MSB, self._BUFFER, count=6)\n # Reconstruct signed 16-bit integers.\n x, y, z = struct.unpack(\">hhh\", self._BUFFER)\n x >>= 2\n y >>= 2\n z >>= 2\n # Scale values based on current sensor range to get proper units.\n _range = self.range\n if _range == RANGE_8G:\n return (\n x / 1024.0 * _SENSORS_GRAVITY_EARTH,\n y / 1024.0 * _SENSORS_GRAVITY_EARTH,\n z / 1024.0 * _SENSORS_GRAVITY_EARTH,\n )\n elif _range == RANGE_4G:\n return (\n x / 2048.0 * _SENSORS_GRAVITY_EARTH,\n y / 2048.0 * _SENSORS_GRAVITY_EARTH,\n z / 2048.0 * _SENSORS_GRAVITY_EARTH,\n )\n elif _range == RANGE_2G:\n return (\n x / 4096.0 * _SENSORS_GRAVITY_EARTH,\n y / 4096.0 * _SENSORS_GRAVITY_EARTH,\n z / 4096.0 * _SENSORS_GRAVITY_EARTH,\n )\n else:\n raise RuntimeError(\"Unexpected range!\")", "def accel_data(self):\n\n t, speed, speed_polyfit = self.speed_data()\n\n time_s = self.t / 1000\n\n accel = np.gradient(speed, time_s)\n accel_polyfit = np.gradient(speed_polyfit, time_s)\n\n return self.t, accel, accel_polyfit", "def accel():\n x,y,z = unpack('>hhh',i2c.readfrom_mem(0x68, 0x3B, 6)) \n\n x = x / 16384\n y = y / 16384\n z = z / 16384\n\n return x,y,z", "def get_accelerations(self):\n position = self.get_smooth_positions(\n self.motion_flagspace.flags.TELESCOPE)\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n acceleration_coordinates = int_nf.calculate_2d_accelerations(\n position.coordinates.to('radian').value,\n self.info.instrument.sampling_interval.to('second').value\n ) * units.Unit('radian/second2')\n\n acceleration = Coordinate2D(unit=self.info.instrument.get_size_unit()\n / units.Unit('s2'))\n acceleration.set(acceleration_coordinates)\n return acceleration", "def get_accel_data(self, g=False):\n x = self.read_i2c_word(self.ACCEL_XOUT0) - self.sum_data[0]\n y = self.read_i2c_word(self.ACCEL_YOUT0) - self.sum_data[1]\n z = self.read_i2c_word(self.ACCEL_ZOUT0)\n\n accel_scale_modifier = None\n accel_range = self.getRange() << 3 # self.read_accel_range(True)\n\n if accel_range == self.ACCEL_RANGE_2G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n elif accel_range == self.ACCEL_RANGE_4G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G\n elif accel_range == self.ACCEL_RANGE_8G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G\n elif accel_range == self.ACCEL_RANGE_16G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G\n else:\n print(\"Unkown range {} - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G\".format(accel_range))\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n\n x = x / accel_scale_modifier\n y = y / accel_scale_modifier\n z = z / accel_scale_modifier\n\n if g is True:\n return {'x': x, 'y': y, 'z': z}\n elif g is False:\n x = x * self.GRAVITIY_MS2\n y = y * self.GRAVITIY_MS2\n z = z * self.GRAVITIY_MS2\n return {'x': x, 'y': y, 'z': z}", "def get_accel_z(self):\n return self.read_i2c_word(self.ACCEL_ZOUT0)", "def get_coords(self):\n\t\treturn self.x, self.y, self.z", "def read_accelerometer(self):\n x, y, z = self._read_vector(BNO055_ACCEL_DATA_X_LSB_ADDR)\n return (x/100.0, y/100.0, z/100.0)", "def acceleration(self):\n if self.state.lightning:\n return self.character.acceleration / 2\n else:\n return self.character.acceleration", "def acceleration(self, t):\n pass", "def acceleration(self, state, unused_t):\n ax = 0.0\n ay = 0.0\n for p in g_listOfPlanets:\n if p is self or p._merged:\n continue # ignore ourselves and merged planets\n dx = p._st._x - state._x\n dy = p._st._y - state._y\n dsq = dx*dx + dy*dy # distance squared\n dr = math.sqrt(dsq) # distance\n force = GRAVITYSTRENGTH*self._m*p._m/dsq if dsq > 1e-10 else 0.\n # Accumulate acceleration...\n ax += force*dx/dr\n ay += force*dy/dr\n return (ax, ay)", "def z0(self):\n\t\treturn self.__xyz0[2]", "def axial_motion(self) -> Tuple[float, float]:\n velocity_x = math.cos(math.radians(self.angle)) * self.coefficent\n velocity_y = math.sin(math.radians(self.angle)) * self.coefficent\n \n return velocity_x, velocity_y", "def velocity_z(x):\n return 3 * np.cos(8 * x[0]) * np.sin(2 * x[1]) * np.cos(4 * x[2])", "def coeffs(self):\n\t\treturn [self.a,self.b,self.c,self.d]", "def accel(self):\n return self.force()/self.mass", "def xz(self):\n return -self._3d_rotational_inertia[0, 2]", "def get_accelerations(states):\n\n def get_accel_helper(n):\n \"\"\"Return the x and y acceleration for the nth planet from the Sun.\"\"\"\n global G\n mi = m[[k for k in range(9) if k != n]]\n xi = states[[k for k in range(36) if k % 4 == 0 and k != 4*n]]\n yi = states[[k for k in range(36) if (k-1) % 4 == 0 and k != 4*n+1]]\n xx = states[4*n]\n yy = states[4*n+1]\n ri = np.vstack((xi-xx, yi-yy))\n wi = G*mi/np.sqrt((xi-xx)**2+(yi-yy)**2)**3\n\n return np.sum(np.vstack((wi, wi)) * ri, axis=1)\n\n accels = np.zeros(18)\n for n in range(9):\n a = get_accel_helper(n)\n accels[2*n] = a[0]\n accels[2*n+1] = a[1]\n\n return accels", "def get_3d_position(self):\r\n answer = dict(\r\n position=BASS_3DVECTOR(),\r\n orientation=BASS_3DVECTOR(),\r\n velocity=BASS_3DVECTOR(),\r\n )\r\n bass_call(\r\n BASS_ChannelGet3DPosition,\r\n self.handle,\r\n pointer(answer[\"position\"]),\r\n pointer(answer[\"orientation\"]),\r\n pointer(answer[\"velocity\"]),\r\n )\r\n return answer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables tap detection with configurable parameters.
def enable_tap_detection( self, *, tap_count=1, threshold=25, long_initial_window=True, long_quiet_window=True, double_tap_window=TapDuration.DURATION_250_MS ): self._tap_shock = not long_initial_window self._tap_quiet = long_quiet_window self._tap_threshold = threshold self._tap_count = tap_count if double_tap_window > 7 or double_tap_window < 0: raise ValueError("double_tap_window must be a TapDuration") if tap_count == 1: self._single_tap_int_en = True elif tap_count == 2: self._tap_duration = double_tap_window self._double_tap_int_en = True else: raise ValueError("tap must be 1 for single tap, or 2 for double tap")
[ "def addTap(id):\n logging.debugv(\"functions/linux.py->addTap(id)\", [id])\n dev = 'tap' + str(id)\n if chkIf(dev):\n if ifUp(dev):\n return dev\n else:\n return False\n cmd = [locations.OPENVPN, '--mktun', '--dev', dev]\n if runWrapper(cmd):\n if ifUp(dev):\n return dev\n else:\n return False\n else:\n return False", "def SetShouldVerifyAp(self, value):\n self.config.set(\"Settings\", \"should_verify_ap\", int(value), write=True)\n self.wifi.should_verify_ap = misc.to_bool(value)", "def _configure_aparams(self, aparams=None):\n\n log.debug(\"%r: _configure_aparams: aparams=%s\" % (self._platform_id, aparams))\n\n self._configure_aparams_arg = None\n\n if self._aam:\n # if we already have an alert manager, do the thing right away (but\n # this is not the case at time of writing).\n self._do_configure_aparams(aparams)\n else:\n # save the argument for subsequent processing in on_start:\n self._configure_aparams_arg = aparams", "def handle_on(self, args: \"List[str]\") -> None:\n if len(args) == 0:\n print(\"on takes one or more arguments.\")\n return\n\n try:\n flags = [expand_flag(arg) for arg in args]\n except HERAError as e:\n print(e)\n return\n\n for flag in flags:\n setattr(self.debugger.vm, flag, True)", "def enable_breakpoint(self, args=None):\n self.session_handler.dispatch_event(\"enable_breakpoint\", args)", "def setup_tap(homebrew_dir, tap_repository):\n logging.info(\"Tapping {}\".format(tap_repository))\n brew(homebrew_dir, [\"tap\", tap_repository])\n logging.info(\"Tapped {}\".format(tap_repository))", "def add_enabled_tests(self, *enabled_tests):\n # Disallow setting both enabled_tests and *disabled_tests.\n assert not self._disabled_tests\n # Only '*.QEMU_DISABLED_*' is allowed.\n assert len(self._qemu_disabled_tests) < 2\n self._enabled_tests += list(enabled_tests)", "def _install_tap_linux(tap_name):\n global tapdev_fd\n\n IFF_TAP = 0x0002\n IFF_NO_PI = 0x1000\n TUNSETIFF = 0x400454CA # This is derived by printf() of TUNSETIFF\n TUNSETOWNER = TUNSETIFF + 2\n\n tapdev_fd = open('/dev/net/tun', 'rw')\n ifr = struct.pack('16sH', tap_name, IFF_TAP | IFF_NO_PI)\n fcntl.ioctl(tapdev_fd, TUNSETIFF, ifr)\n fcntl.ioctl(tapdev_fd, TUNSETOWNER, os.getegid())\n\n time.sleep(1) # required to give the new device settling time\n pipe = _doexec(\n *(['ip', 'link', 'set', 'dev', str(tap_name), 'up']))\n pipe.wait()", "def _setupAntialiasing(self):\n technique = self.settings.antialiasingTechnique\n self.debug(\"Creating antialiasing handler for\", technique)\n\n if technique == \"None\":\n self.antialias = AntialiasingTechniqueNone()\n elif technique == \"SMAA\":\n self.antialias = AntialiasingTechniqueSMAA()\n elif technique == \"FXAA\":\n self.antialias = AntialiasingTechniqueFXAA()\n else:\n self.error(\n \"Unkown antialiasing technique\", technique, \"-> using None:\")\n self.antialias = AntialiasingTechniqueNone()\n\n if self.occlusion.requiresBlurring():\n self.antialias.setColorTexture(\n self.blurOcclusionH.getColorTexture())\n else:\n if self.haveCombiner and self.settings.enableTemporalReprojection:\n self.antialias.setColorTexture(self.combiner.getColorTexture())\n else:\n self.antialias.setColorTexture(\n self.lightingComputeContainer.getColorTexture())\n\n self.antialias.setDepthTexture(self.deferredTarget.getDepthTexture())\n self.antialias.setVelocityTexture(self.deferredTarget.getAuxTexture(1))\n self.antialias.setup()", "def enable_hw_switches(self):\n self.log.debug(\"Enabling Diverter for hw switch: %s\",\n self.config['activation_switches'])\n if self.config['type'] == 'hold':\n\n for switch in self.config['activation_switches']:\n\n self.platform.set_hw_rule(\n sw_name=switch.name,\n sw_activity=1,\n driver_name=self.config['activation_coil'].name,\n driver_action='hold',\n disable_on_release=False,\n **self.config)\n\n # If there's a activation_time then we need to watch for the hw\n # switch to be activated so we can disable the diverter\n\n if self.config['activation_time']:\n self.machine.switch_controller.add_switch_handler(\n switch.name,\n self.schedule_deactivation)\n\n elif self.config['type'] == 'pulse':\n\n for switch in self.config['activation_switches']:\n\n self.platform.set_hw_rule(\n sw_name=switch.name,\n sw_activity=1,\n driver_name=self.config['activation_coil'].name,\n driver_action='pulse',\n disable_on_release=False,\n **self.config)", "def enable(self, timeout):", "def set_motion_detection(self, enabled=1):\n result, current_config = self.get_motion_detect_config()\n current_config[\"isEnable\"] = enabled\n self.set_motion_detect_config(current_config)", "def enable_throttling(self, enable_throttling):\n\n self._enable_throttling = enable_throttling", "def setDecisionThreshold(self, thresh) -> None:\n ...", "def __configure_scanner(\n self, zap_scanner: ascan, scanner_config: collections.OrderedDict\n ):\n\n logging.debug(\"Trying to configure the ActiveScan\")\n self.configure_scripts(config=scanner_config)\n\n if self._is_not_empty_integer(\"maxRuleDurationInMins\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_max_rule_duration_in_mins(\n integer=str(scanner_config[\"maxRuleDurationInMins\"])\n ),\n method_name=\"set_option_max_rule_duration_in_mins\",\n )\n if self._is_not_empty_integer(\"maxScanDurationInMins\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_max_scan_duration_in_mins(\n integer=str(scanner_config[\"maxScanDurationInMins\"])\n ),\n method_name=\"set_option_max_scan_duration_in_mins\",\n )\n if self._is_not_empty_integer(\"threadPerHost\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_thread_per_host(\n integer=str(scanner_config[\"threadPerHost\"])\n ),\n method_name=\"set_option_thread_per_host\",\n )\n if self._is_not_empty_integer(\"delayInMs\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_delay_in_ms(\n integer=str(scanner_config[\"delayInMs\"])\n ),\n method_name=\"set_option_delay_in_ms\",\n )\n\n if self._is_not_empty_bool(\"addQueryParam\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_add_query_param(\n boolean=str(scanner_config[\"addQueryParam\"])\n ),\n method_name=\"set_option_add_query_param\",\n )\n if self._is_not_empty_bool(\"handleAntiCSRFTokens\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_handle_anti_csrf_tokens(\n boolean=str(scanner_config[\"handleAntiCSRFTokens\"])\n ),\n method_name=\"set_option_handle_anti_csrf_tokens\",\n )\n if self._is_not_empty_bool(\"injectPluginIdInHeader\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_inject_plugin_id_in_header(\n boolean=str(scanner_config[\"injectPluginIdInHeader\"])\n ),\n method_name=\"set_option_inject_plugin_id_in_header\",\n )\n if self._is_not_empty_bool(\"scanHeadersAllRequests\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_scan_headers_all_requests(\n boolean=str(scanner_config[\"scanHeadersAllRequests\"])\n ),\n method_name=\"set_option_scan_headers_all_requests\",\n )\n if self._is_not_empty_string(\"defaultPolicy\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_default_policy(\n string=str(scanner_config[\"defaultPolicy\"])\n ),\n method_name=\"set_option_default_policy\",\n )\n else:\n # Ensure a default value even if nothing is defined\n scanner_config[\"defaultPolicy\"] = \"Default Policy\"", "def configure(self):\n super(DPSClientAugmentedAggregateDirectoryTest, self).configure()\n self.patch(config.Authentication.Wiki, \"Enabled\", True)", "def animal_enable(flag=True):\n global animal_enabled\n animal_enabled = flag", "def _setup_detection_callback(self) -> None:\n if self._adv_data_callback is None:\n return\n self._cancel_callback()\n super().register_detection_callback(self._adv_data_callback)\n assert HA_BLEAK_SCANNER is not None\n self._detection_cancel = HA_BLEAK_SCANNER.async_register_callback(\n self._callback, self._mapped_filters\n )", "def set_identify(self, on, duration):\n raise exc.UnsupportedFunctionality()", "def set_press_listener(self, listener):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`True` if a single or double tap was detected, depending on the value of the\ ``tap_count`` argument passed to ``enable_tap_detection``
def tapped(self): if self._tap_count == 0: return False motion_int_status = self._motion_int_status if motion_int_status == 0: # no interrupts triggered return False if self._tap_count == 1 and motion_int_status & 1 << 5: return True if self._tap_count == 2 and motion_int_status & 1 << 4: return True return False
[ "def supports_multi_touch(self):\n ret = self._get_attr(\"supportsMultiTouch\")\n return ret", "def multi_touch_supported(self):\n ret = self._get_attr(\"multiTouchSupported\")\n return ret", "def was_touched(self,touch):\n if \"on_touch\" in self.__dict__:\n if touch.button in self.on_touch:\n if str(type(self.object_class))==\"<class 'kivy.graphics.vertex_instructions.Ellipse'>\":\n #FIXME: not working with angled shapes\n this_center=(self.object_class.pos[0]+(self.object_class.size[0]/2),self.object_class.pos[1]+(self.object_class.size[1]/2))\n if ((touch.pos[0]-this_center[0])**2)/((self.object_class.size[0]/2)**2)+((touch.pos[1]-this_center[1])**2)/((self.object_class.size[1]/2)**2) <= 1: #Reference matematica: https://math.stackexchange.com/questions/76457/check-if-a-point-is-within-an-ellipse\n return True\n else:\n return False\n elif str(type(self.object_class))==\"<class 'kivy.graphics.vertex_instructions.Rectangle'>\":\n #FIXME: not working with angled shapes\n if (self.object_class.pos[0]<=touch.pos[0] and self.object_class.pos[1]<=touch.pos[1]) and (self.object_class.size[0]+self.object_class.pos[0]>=touch.pos[0] and self.object_class.size[1]+self.object_class.pos[1]>=touch.pos[1]):\n return True\n else:\n return False\n else:\n return False\n else:\n return False", "def test_tap(self):\n self.notify('Press and release \"a\"')\n self.assert_keys(\n 'Failed to register event',\n ('a', True), ('a', False))", "def was_gesture(self, gesture):\n \n check = gesture in self.__observed_gestures\n self.__observed_gestures = []\n return check", "def _is_vice_playback_occurring(self):\n\n point_to_grab = self.window_vice_bottom_right - self.size_vice_playback_area\n\n lower_bottom_rect = (point_to_grab.x, point_to_grab.y, self.window_vice_bottom_right.x, self.window_vice_bottom_right.y)\n\n # The argument 'all_screens=True' is required in order for PIL to\n # properly perform a .grab() outside of the primary monitor.\n image_pillow = ImageGrab.grab(lower_bottom_rect, all_screens=True)\n #image.show()\n\n image_opencv = pillow_to_opencv_image(image_pillow)\n\n result = cv2.matchTemplate(image_opencv, self.image_vice_playback_template, cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n\n if max_val > self.detection_threshold_playback_tile:\n #print(\"playback detected\")\n return True\n else:\n #print(\"NO playback detected\")\n return False", "def is_touched(self, release_timeout = 2):\n if not self.shm.touch_p > 0: return False\n self.wait_for_release(release_timeout)\n return True", "def isTouching(pixel1,pixel2):\n return isDiagonalTo(pixel1,pixel2) or isLine(pixel1,pixel2)", "def hasA(self, dtype):\n if dtype in self.devices:\n return 1\n else:\n return 0", "def is_checked(self, num):\n if not self.detectors is None:\n return self.detectors[num]\n else:\n return False", "def animal_detected(self, cid, offset=0):\n if self.outdoor_lastEvent[cid][\"video_status\"] == \"recording\":\n for e in self.outdoor_lastEvent[cid][\"event_list\"]:\n if e[\"type\"] == \"animal\" and e[\"time\"] + offset > int(time.time()):\n return True\n return False", "def light_detection(self) -> bool:\n return self.details['light_detection_switch']", "def _should_handle_mouse_press(self, buttons: int) -> bool:\n return (\n self.definition.on_press is not None\n # Also handle if on_release is defined so we can record which mouse button was used.\n or self.definition.on_release is not None\n or self.definition.depressed_color is not None\n )", "def capture_gesture(self, label):\n self.exp_ges.label = ord(label.upper()) - 64\n print \"LABEL\", label\n # self.exp_ges.label = label\n if not self.exp_ges.wait_for_connection():\n return False\n if not self.exp_ges.is_calibrated():\n self.exp_ges.calibration.calibrate()\n self.data_collected.extend(self.exp_ges.extract_features())\n return True", "def IsDetected(self):\n # We need some time for system to detect the device.\n for i in xrange(self._DETECT_RETRY, 0, -1):\n try:\n self._av_sync_probe = AVSyncProbeSerial()\n except IOError:\n logging.info('AVSyncProbe: Retry detecting device (%d left).', i)\n time.sleep(1)\n else:\n return True\n return False", "def starts_with_thumb(self):\n return self.fingers[0] == 1", "def should_hit(self):\n \n return self.hand.compute_bj_count() < 17", "def _should_arm_activate(self):\n current = self._get_time()\n timed_out_at = self.last_detected + self.arm_activity_timeout\n return current > timed_out_at", "def check_devices(self) -> bool:\n\t\tpass", "def _is_device_active(self):\n return switch.is_on(self.hass, self.heater_entity_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an array of integer values, identify the three values that can create the triangle with the largest perimeter. In the case of multiple triangles with the same perimeter, choose the one with the longest side. In the case of a tie, choose one with the longest minimum side.
def identify_maximum_perimeter_triangle(values): if len(values) < 3: raise ValueError('Unable to identify triangle with less than three values') sorted_values = sorted(values) for index in range(len(sorted_values) - 1, 1, -1): if sorted_values[index] < sorted_values[index - 1] + sorted_values[index - 2]: return Triangle(longest_side=sorted_values[index], middle_side=sorted_values[index - 1], shortest_side=sorted_values[index - 2]) return None
[ "def find_largest_block(array):\n best_size = 0\n best_index = 0\n for i in range(len(array)):\n size = array[i][2] * array[i][3]\n if size > best_size:\n best_size = size\n best_index = i\n return array[best_index]", "def longest_side(a,b):\n\treturn math.sqrt(a*a+b*b)", "def largest_polygon(i):\n x = [p.area for p in list(i)]\n return(x.index(max(x)))", "def findLargest(shapes):\n ## TO DO\n largestArea = 0\n largestShapeList = []\n for shape in shapes:\n if largestArea < shape.area():\n largestArea = shape.area()\n largestShapeList = [shape,]\n elif largestArea == shape.area():\n largestShapeList.append(shape)\n largestShapeTuple = tuple(largestShapeList)\n return largestShapeTuple", "def best_polygon_for_pivot(p1):\n\tbest = 0\n\tfor p2 in historical_points:\n\t\tfor p3 in historical_points:\n\t\t\tif p1<p2 and p2<p3:\n\t\t\t\tt = Triangle(p1,p2,p3)\n\t\t\t\tb = best_polygon_for_first_triangle(t)\n\t\t\t\tbest = b if b > best else best\n\treturn best", "def best_polygon_for_first_triangle(t):\n\tif _calculated_best_polygon_for_first_triangle[t]:\n\t\treturn _best_polygon_for_first_triangle[t]\n\n\tif _points_inside_triangle[t] == -1:\n\t\t_best_polygon_for_first_triangle[t] = -1\n\t\t_calculated_best_polygon_for_first_triangle[t] = True\n\t\treturn _best_polygon_for_first_triangle[t]\n\t\n\tp1,p2,p3 = t.p1,t.p2,t.p3\n\tbest_recursive = 3\n\tfor recursive_p in historical_points:\n\t\tif recursive_p > p3:\n\t\t\tif check_internal_angle(p1,p2,recursive_p) and check_internal_angle(p1,p3,recursive_p):\n\t\t\t\trecursive_t = Triangle(p2,p3,recursive_p)\n\t\t\t\tbest_recursive = max(best_polygon_for_first_triangle(recursive_t)+1, best_recursive)\n\n\t_best_polygon_for_first_triangle[t] = best_recursive + _points_inside_triangle[t]\n\t_calculated_best_polygon_for_first_triangle[t] = True\n\treturn _best_polygon_for_first_triangle[t]", "def findMaxPathDownTriangle(triangle):\n dp_table = TriangleOfNumbers()\n n_rows = len(triangle.data)\n\n for irow in xrange(0,n_rows):\n dp_table.add_row([0]*(irow+1))\n\n irow = 0\n icol = 0\n\n for irow in xrange(0, n_rows):\n n_cols = len(triangle.data[irow])\n\n for icol in xrange(0, n_cols):\n current_value = triangle.data[irow][icol]\n max_value_so_far = dp_table.get_max_value_going_into_cell(irow, icol)\n print current_value, max_value_so_far, current_value + max_value_so_far\n dp_table.data[irow][icol] = current_value + max_value_so_far\n\n return dp_table.get_max_value_in_row(n_rows-1)", "def generate_triangles(callback, max_side=default_max_side):\n\n # Loop through a>b>c where:\n # 0 < a < max_side\n # a/2 < b ≤ a\n # [if b ≤ a/2, then 2b≤a. But then b+c≤2b≤a, contradiction triangle ineq]\n # a-b < c ≤ b\n # [if c ≤ a-b, then b+c≤a, contradicting triangle inequality]\n # [nb - using triangle inquality for 'real' triangles: b+c > a. b+c=a results in\n # a degenerate triangle]\n for a in range (1,max_side):\n for b in range (a/2,a+1):\n for c in range (a-b+1,b+1):\n sides = (a,b,c) # sides of the triangle\n area = hero_area(*sides) # area, from Heron's formula\n heights = (2*area/a,2*area/b,2*area/c) # Vertical heights from corresponding side\n \n #First check for integer area. NB does not guarantee integer height\n if area == 0 or not isinteger(area): continue\n\n #Cycle through potential bases, checking if we have an integer height\n for i, h in enumerate(heights):\n if isinteger(h): # woo, we have one\n triangle = Triangle(\n base = sides[i],\n side1 = sides[(i+1)%3],\n side2 = sides[(i+2)%3],\n height = h)\n\n callback(triangle)", "def smallest_perimeter(length: int, width: int, height: int) -> int:\n\n return (\n sorted([length, width, height])[0] * 2\n + sorted([length, width, height])[1] * 2\n )", "def best_hand(hand):\n return max(itertools.combinations(hand, 5), key=hand_rank)", "def minMaxRiddle(arr):\n # the most brutal brute force can get, i think cubic time:\n # n = len(arr)\n # window_maxs = []\n # for w in range(1, n + 1):\n # window_max = 0\n # for i in range(n - w + 1):\n # window = arr[i : i + w]\n # window_min = min(window)\n # if window_min > window_max:\n # window_max = window_min\n # window_maxs.append(window_max)\n # return window_maxs\n\n # little better, quyadratic time\n n = len(arr)\n mins = [[0 for _ in range(n)] for _ in range(n)]\n mins[0] = arr\n maxes = [max(mins[0])]\n for i in range(1, n):\n curr_max = 0\n for j in range(n - i):\n mins[i][j] = min(mins[i - 1][j], mins[i - 1][j + 1])\n if mins[i][j] > curr_max:\n curr_max = mins[i][j]\n maxes.append(curr_max)\n return maxes", "def best_hand(hand):\n\treturn max(itertools.combinations(hand, 5), key=hand_rank)", "def largest_of_three(a: int, b: int, c: int) -> int:\n return(max(a,b,c))", "def smallest_side(length: int, width: int, height: int) -> int:\n\n return (\n sorted([length, width, height])[0] * sorted([length, width, height])[1]\n )", "def num_triangles(p):\n num = 0\n for a in range(1, p):\n numer = p*p - 2*p*a\n denom = 2*(p-a)\n b = numer // denom\n if a > b or b > (p - a - b):\n break\n elif b * denom == numer:\n num += 1\n return num", "def find_max_crossing_subarray(arr,low,mid,high):\n \n # Look at the left \n left_sum = -Inf\n sm = 0 # sm for sum\n for i in range(mid,low-1,-1):\n sm += arr[i]\n if sm >= left_sum:\n left_sum = sm\n max_left = i\n \n # Look at the right\n right_sum = -Inf\n sm = 0\n for j in range(mid+1,high+1):\n sm += arr[j]\n if sm >= right_sum:\n right_sum = sm\n max_right = j\n\n return (max_left,max_right,left_sum+right_sum)", "def majority_element(array: List[int]):\n return recursive_majority_element(array, 0, len(array) - 1)", "def MaximalSquare(strArr):\n # code goes here\n # opt 1\n # rows = len(strArr)\n # columns = len(strArr[0]) if rows > 0 else 0\n\n # dp = [[0 for j in range(columns)] for i in range(rows)]\n # maxlen = 0\n\n # for i in range(rows):\n # for j in range(columns):\n # if i == 0 or j == 0:\n # dp[i][j] = int(strArr[i][j])\n # if i > 0 and j > 0 and int(strArr[i][j]) == 1:\n # dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1\n # maxlen = max(dp[i][j], maxlen)\n\n # return maxlen * maxlen\n # opt 2\n rows = len(strArr)\n columns = len(strArr[0]) if rows > 0 else 0\n dp = [0 for j in range(columns)]\n maxlen = 0\n prev = 0\n\n for i in range(rows):\n for j in range(columns):\n temp = dp[j]\n if i > 0 and j > 0 and int(strArr[i][j]) == 1:\n dp[j] = min(dp[j], dp[j-1], prev) + 1\n maxlen = max(dp[j], maxlen)\n else:\n dp[j] = int(strArr[i][j])\n prev = temp\n\n return maxlen * maxlen", "def problem086():\n\n # solutions[k] is the set of all solutions where the largest side has length k.\n # A solution is a triple (x, y, z) such that 0 < x <= y <= z, and in the rectangular prism with dimensions x * y * z,\n # the shortest surface path from one vertex to the opposite vertex has an integral length.\n solutions = []\n\n # Generates all solutions where the largest side has length less than 'limit'.\n def generate_solutions():\n # Pythagorean triples theorem:\n # Every primitive Pythagorean triple with a odd and b even can be expressed as\n # a = st, b = (s^2-t^2)/2, c = (s^2+t^2)/2, where s > t > 0 are coprime odd integers.\n # Now generate all Pythagorean triples, including non-primitive ones.\n for s in itertools.count(3, 2):\n for t in range(s - 2, 0, -2):\n if s * s // 2 >= limit * 3:\n return\n\n if math.gcd(s, t) == 1:\n for k in itertools.count(1):\n a = s * t * k\n b = (s * s - t * t) // 2 * k\n c = (s * s + t * t) // 2 * k\n if a >= limit and b >= limit:\n break\n find_splits(a, b, c)\n find_splits(b, a, c)\n\n # Assumes that a^2 + b^2 = c^2.\n def find_splits(a, b, c):\n z = b\n for x in range(1, a):\n y = a - x\n if y < x:\n break\n if c * c == min(\n (x + y) * (x + y) + z * z,\n (y + z) * (y + z) + x * x,\n (z + x) * (z + x) + y * y,\n ):\n temp = max(x, y, z)\n if temp < limit:\n # Add canonical solution\n item = tuple(sorted((x, y, z)))\n solutions[temp].add(item)\n\n # cumulativesolutions[m] = len(solutions[0]) + len(solutions[1]) + ... + len(solutions[m]).\n cumulativesolutions = [0]\n\n limit = 1\n while True:\n # Extend the solutions list with blank sets\n while len(solutions) < limit:\n solutions.append(set())\n\n generate_solutions()\n\n # Compute the number of cumulative solutions up to and including a certain maximum size\n for i in range(len(cumulativesolutions), limit):\n sum = cumulativesolutions[i - 1] + len(solutions[i])\n cumulativesolutions.append(sum)\n if sum > 1000000:\n return i\n\n # Raise the limit and keep searching\n limit *= 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is it a runfolder? >>> print is_runfolder('090630_HWUSIEAS999_0006_30LNFAAXX') True >>> print is_runfolder('hello') False
def is_runfolder(name): if re.match("^[0-9]{6}_[-A-Za-z0-9_]*$", name): return True else: return False
[ "def is_folder(path: str) -> bool:\n pass", "def is_folder(file):\r\n return file.rfind('.') < 0", "def isfolder(self):\n return False", "def is_folder(inputfolder, boolean=False):\n if not inputfolder or not isdir(inputfolder):\n if boolean:\n return False\n logger.error('Argument %s is not a folder!' % inputfolder)\n sys.exit(0)\n inputfolder = realpath(inputfolder)\n return inputfolder", "def is_folder(self, name):\n return self.clear_name(name) in self.folders() or name == \"..\"", "def isFolder(path):\n try:\n if os.path.isdir(path):\n return True\n else:\n return False\n except OSError:\n print(\"Sequestum: Folder checking failed\")", "def supports_folder() -> bool:\n return False", "def is_folder( self, url ):\n path = self.get_url_path( url )\n path = [ p for p in path.split('/') if len( p ) > 0 ]\n return len( path ) <= 2 or path[2] == 'tree'", "def is_valid_folder(self):\n if not os.path.isdir(self.path):\n return False\n if self.is_hidden_file():\n return False\n if self.name in INVALID_DIR:\n return False\n return True", "def is_folder(self, path):\n try:\n return stat.S_ISDIR(self.ftp_client.stat(path).st_mode)\n except IOError:\n return False", "def exists_folder(route):\n if os.path.exists(route):\n return True\n else:\n return False", "def check_if_folder_exists(folder_name, root_input=\"\"):\n result = os.path.isdir(Path(root_input) / folder_name)\n\n return result", "def belongs_to_folder(path, fileName):\r\n if not path.endswith(os.path.sep):\r\n path += os.path.sep\r\n return fileName.startswith(path)", "def _parse_folder_name(fn):\n stuff = osp.normpath(fn).split(os.sep)[-1].split('-')\n if len(stuff) != 3:\n # This is not a folder with strax data\n raise InvalidFolderNameFormat(fn)\n return stuff", "def is_camus_dir(dirname):\n return bool(re.search(\"d_[0-9]{8}\\-[0-9]{4}\", dirname))", "def is_a_spark_directory(filename):\n if (filename.find(\"/_SUCCESS\") > 0) \\\n or (filename.find(\"/part-0\") > 0) \\\n or (filename.find(\"/.part-0\") > 0) \\\n or (filename.find(\"/._\") > 0):\n return True\n return False", "def test_valid_pid_one_folder(self):\n self.assertEqual(len(get_folders_in_project(self.pid)), 1)", "def __validateStreamStatsDataFolder__(self, folder=None,subfolder=None):\n state = os.path.basename(folder).lower()\n\n #validate state\n if state.upper() not in self.states:\n self.__sm__('You did not select a valid state folder: ' + folder, 'ERROR')\n arcpy.AddError('You did not select a valid state folder: ' + folder)\n sys.exit()\n if os.path.isdir(folder + '/' + subfolder):\n return True\n else:\n self.__sm__('Subfolder does not exist: ' + subfolder, 'ERROR')\n arcpy.AddError('Subfolder does not exist: ' + subfolder)", "def get_runfolder(self, runfolder):\n runfolders = self.get_runfolders()\n matching_name = list([r for r in runfolders if r.name == runfolder])\n if len(matching_name) > 1:\n raise AssertionError(\"Found more than 1 runfolder matching: \".format(r))\n if len(matching_name) > 0 and matching_name[0]:\n return matching_name[0]\n else:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the directory in path that is a subdirectory of root. e.g. >>> print get_top_dir('/a/b/c', '/a/b/c/d/e/f') d >>> print get_top_dir('/a/b/c/', '/a/b/c/d/e/f') d >>> print get_top_dir('/a/b/c', '/g/e/f') None >>> print get_top_dir('/a/b/c', '/a/b/c')
def get_top_dir(root, path): if path.startswith(root): subpath = path[len(root):] if subpath.startswith('/'): subpath = subpath[1:] return subpath.split(os.path.sep)[0] else: return None
[ "def get_topdir():\n path = Path(os.path.dirname(__file__))\n while True:\n if (path / \".top\").exists():\n return path\n if path.parent == path:\n # Seems like we reached the home /\n raise ValueError(\"Couldn't determine root directory.\")\n path = path.parent", "def get_the_only_directory_under(dirpath):\n dirs = [name for name in os.listdir(dirpath) if isdir(join(dirpath, name))]\n if len(dirs) != 1:\n raise ValueError(\"In 'get_the_only_directory_under' call, \"\n \"found more than 1 directory under: %s\" % dirpath)\n return dirs[0]", "def find_dir(path, curr_path):\n # type: (str, Optional[Union[str, Path]]) -> Optional[Path]\n path = Path(path)\n if path.is_absolute():\n return path\n\n curr_path = Path(curr_path).parent if curr_path else Path('.')\n\n ret = curr_path / path\n return ret if ret.is_dir() else None", "def get_dir_without_last_slash(path):\n return \"/\".join(path.split(\"/\")[:-1])", "def get_path(root, path):\n\n return join(dirname(root), path)", "def get_parent_dir(path):\n\treturn os.path.dirname(os.path.abspath(path))", "def first_dir(path_string):\n parts = path_string.split(os.path.sep)\n return parts[0]", "def getDirName(self, path, lastOnly=False, levelsUp=None):\n # self.log('Get directory name of path: %s' % path,9)\n if path is None:\n raise TypeError('Path is not passed in system.fs.getDirName')\n dname = os.path.dirname(path)\n dname = dname.replace(\"/\", os.sep)\n dname = dname.replace(\"//\", os.sep)\n dname = dname.replace(\"\\\\\", os.sep)\n if lastOnly:\n dname = dname.split(os.sep)[-1]\n return dname\n if levelsUp is not None:\n parts = dname.split(os.sep)\n if len(parts) - levelsUp > 0:\n return parts[len(parts) - levelsUp - 1]\n else:\n raise RuntimeError(\"Cannot find part of dir %s levels up, path %s is not long enough\" %\n (levelsUp, path))\n return dname + os.sep", "def DirUnder( parentDir, subDir ):\n\n return subDir if subDir.startswith( '/' ) or subDir.startswith( '.' ) else os.path.join( parentDir, subDir )", "def getDirName(self, path,lastOnly=False,levelsUp=None):\n pylabs.q.logger.log('Get directory name of path: %s' % path,9)\n if path is None:\n raise TypeError('Path is not passed in system.fs.getDirName')\n #try:\n dname=os.path.dirname(path)\n dname=dname.replace(\"/\",os.sep)\n dname=dname.replace(\"//\",os.sep)\n dname=dname.replace(\"\\\\\",os.sep)\n if lastOnly:\n dname=dname.split(os.sep)[-1]\n return dname\n if levelsUp<>None:\n parts=dname.split(os.sep)\n if len(parts)-levelsUp>0:\n return parts[len(parts)-levelsUp-1]\n else:\n raise RuntimeError (\"Cannot find part of dir %s levels up, path %s is not long enough\" % (levelsUp,path))\n return dname+os.sep\n #except:\n #raise RuntimeError('Failed to get directory name of the given path: %s'% path)", "def get_working_directory(self, root_path):\n\n roots = tuple(item.replace('.', os.path.sep) for item in self._test_roots)\n if len(roots) > 0:\n for item in roots:\n if root_path.endswith(item):\n return root_path.replace(item, '').rstrip(os.path.sep)\n\n return root_path", "def get_parent_directory(path, levels_count=1):\n\n if not levels_count:\n return path\n\n return get_parent_directory(os.path.dirname(path), levels_count=levels_count - 1)", "def get_subsubdirs(path):\n leveltwo_subdirs = []\n immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()]\n\n for scan in immediate_subdirs:\n for subdir in scan:\n leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else None\n\n return leveltwo_subdirs", "def path2ParentDirectory(cls, path):\n return '/'.join(path.split('/')[:-1])", "def find_path(path, topdir):\n paths = list(base_to_top_paths(topdir, path))\n if paths:\n return paths[-1]\n else:\n raise IOError(\"Unable to find the relative path '{}' in the repository hierarchy\".format(path))", "def get_parent_path(base, directory_name):\n done = False\n while not done:\n base = os.path.dirname(base)\n if base == \"/\":\n return None\n if os.path.split(base)[-1] == directory_name:\n done = True\n else:\n done = False\n return base", "def path_to_sub_directory(path, name):\n result = os.path.join(path, name)\n if os.path.isdir(result):\n return result", "def is_rootDir(path):\n if path == '':\n return False\n\n return os.path.dirname(path) == path", "def _search_new_path(self, path):\n if not path.startswith(os.sep):\n return None\n path = path.split(os.sep)[1:]\n # First get name and remove it from path\n name = None\n for i in range(len(path)-1, -1, -1):\n if path[i] != \"\":\n name = path[i]\n path = path[:i]\n break\n if name is None:\n return None\n\n # Walk the directory hierarchy\n cur_dir = self.root_dir\n for node in path:\n if node == \"\":\n continue\n if not isinstance(cur_dir, Dir):\n # A file - doesn't have children\n return None\n try:\n cur_dir = cur_dir.files[node]\n except KeyError:\n return None\n return cur_dir, name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the json file containing holdings info. See sample for more information on formatting
def read_data(data_file): with open(data_file) as json_data: all_data = json.load(json_data) urls = all_data["urls"] holdings = all_data["holdings"] return urls, holdings
[ "def get_standings_raw(self, league_id):\n with open(self.dir_path + \"/sample.standings.json\", \"r\") as f:\n return json.load(f)", "def read_data():\n with open(\"stagnationPointNu.json\", \"r\") as ifile:\n data = json.load(ifile)\n return data", "def _read_json(self,fname):\n\n with open(fname) as f:\n data = json.load(f)\n\n return data", "def load_rentals_file(filename):\n logging.debug(\"Loading input file %s...\", filename)\n\n try:\n with open(filename) as file:\n try:\n data = json.load(file)\n except ValueError:\n logging.error(\"Could not locate input file (value error)\")\n sys.exit()\n except FileNotFoundError:\n logging.error(\"Could not locate input file (file did not exist)\")\n sys.exit()\n\n return data", "def parseJson(json_file):\n with open(json_file, \"r\") as f:\n items = loads(f.read())[\n \"Items\"\n ] # creates a Python dictionary of Items for the supplied json file\n \n # Open all data files\n with (\n open(\"items.dat\", \"a\") as item_file,\n open(\"category.dat\", \"a\") as category_file,\n open(\"bids.dat\", \"a\") as bid_file,\n open(\"users.dat\", \"a\") as user_file,\n ):\n\n times1 = \"\"\n for item in items:\n ID = item[\"ItemID\"]\n seller = item[\"Seller\"]\n sellerID = seller[\"UserID\"]\n bids = item[\"Bids\"]\n category = item['Category'] # array of categories\n \n\n if ID is None:\n item_file.write('NULL|')\n else:\n item_file.write(f\"{ID}|\")\n\n if 'Name' not in item or item['Name'] is None:\n item_file.write('NULL|')\n else:\n item_file.write(f\"\\\"{formatStr(item['Name'])}\\\"|\")\n \n if 'Currently' not in item or item['Currently'] is None:\n item_file.write('NULL')\n else:\n item_file.write(f\"{transformDollar(item['Currently'])}|\")\n \n if 'First_Bid' not in item or item['First_Bid'] is None:\n item_file.write('NULL')\n else:\n item_file.write(f\"{transformDollar(item['First_Bid'])}|\")\n \n if 'Number_of_Bids' not in item or item['Number_of_Bids'] is None:\n item_file.write('NULL')\n else:\n item_file.write(f\"{item['Number_of_Bids']}|\")\n \n if 'Started' not in item or item['Started'] is None:\n item_file.write('NULL|')\n else:\n item_file.write(f\"{transformDttm(item['Started'])}|\")\n \n if 'Ends' not in item or item['Ends'] is None:\n item_file.write('NULL|')\n else:\n item_file.write(f\"{transformDttm(item['Ends'])}|\")\n \n item_file.write(f\"\\\"{formatStr(sellerID)}\\\"|\")\n \n if 'Description' not in item or item['Description'] is None:\n item_file.write('NULL')\n else:\n item_file.write(f\"\\\"{formatStr(item['Description'])}\\\"\")\n \n item_file.write(\"\\n\")\n\n\n ## For BIDS table\n if item['Bids'] is not None:\n bids = item['Bids']\n \n # For ecah palced Bid/Row in bid table\n for b in bids:\n bid = b['Bid']\n bidder = bid['Bidder']\n \n bid_file.write(f\"{ID}|\")\n \n if 'UserID' not in bidder or bidder['UserID'] is None:\n bid_file.write(\"NULL|\")\n else:\n bid_file.write(f\"\\\"{formatStr(bidder['UserID'])}\\\"|\")\n user_file.write(f\"\\\"{formatStr(bidder['UserID'])}\\\"|\")\n \n if 'Time' not in bid or bid['Time'] is None:\n bid_file.write(\"NULL|\")\n else:\n bid_file.write(f\"{transformDttm(bid['Time'])}|\")\n \n if 'Amount' not in bid or bid['Amount'] is None:\n bid_file.write(\"NULL\")\n else:\n bid_file.write(f\"{transformDollar(bid['Amount'])}\")\n \n \n ## User table, Bidder\n if 'Rating' not in bidder or bidder['Rating'] is None:\n user_file.write(\"NULL|\")\n else:\n user_file.write(f\"{bidder['Rating']}|\")\n \n if 'Location' not in bidder or bidder['Location'] is None:\n user_file.write(\"NULL|\")\n else:\n user_file.write(f\"\\\"{formatStr(bidder['Location'])}\\\"|\")\n \n if 'Country' not in bidder or bidder['Country'] is None:\n user_file.write(\"NULL\")\n else:\n user_file.write(f\"\\\"{formatStr(bidder['Country'])}\\\"\")\n \n bid_file.write('\\n')\n user_file.write('\\n')\n \n ## For User table, sellers\n user_file.write(f\"\\\"{formatStr(sellerID)}\\\"|\")\n \n if 'Rating' not in seller or seller['Rating'] is None:\n user_file.write(\"NULL|\")\n else:\n user_file.write(f\"{seller['Rating']}|\")\n \n if 'Location' not in item or item['Location'] is None:\n user_file.write(\"NULL|\")\n else:\n user_file.write(f\"\\\"{formatStr(item['Location'])}\\\"|\")\n \n if 'Country' not in item or item['Country'] is None:\n user_file.write(\"NULL\")\n else:\n user_file.write(f\"\\\"{formatStr(item['Country'])}\\\"\")\n \n user_file.write('\\n')\n \n ## for Category table\n # Remove duplicate categories for a given item\n for c in list(set(category)):\n category_file.write(f\"{ID}|\")\n category_file.write(f\"\\\"{formatStr(c)}\\\"\")\n category_file.write(\"\\n\")", "def liberapay_json_read(name):\n url = 'https://liberapay.com/{}/public.json'.format(name)\n content = requests.get(url)\n return content.json()", "def read_pilot_timing():\n\n pilot_timing_dictionary = {}\n\n path = os.path.join(os.environ.get('PILOT_HOME', ''), config.Pilot.timing_file)\n if os.path.exists(path):\n pilot_timing_dictionary = read_json(path)\n\n return pilot_timing_dictionary", "def RAW_STRUCTURES() -> List[dict]:\n with open(\n Path(__file__).parent.joinpath(\"raw_test_structures.json\"), \"r\"\n ) as raw_data:\n return json.load(raw_data)", "def _parseJSON(self):\r\n #print self.filename + \"\\n\"\r\n tempMap = json.load(open(self.filename))\r\n self.version = tempMap[\"version\"]\r\n self.data = tempMap[\"Material library\"]", "def _get_bank_info():\n with open(BANK_INFO) as file:\n return json.load(file)", "def jsonread(filename): \n res = None", "def __read(path):\n with open(path, \"r\") as f:\n data = json.load(f)\n logger = __logger()\n logger.info(\"Read the whole json, from path: {}.\".format(path))\n return data", "def read_level_data(filename):\n with open(filename, 'r') as f:\n return json.loads(f.read())", "def read_timetolive(filename):\n if not os.path.isfile(filename):\n return {}\n ttl_file = open(filename, 'r')\n data = json.load(ttl_file)\n ttl_file.close()\n return data", "def read_data(filename):\n\n logger.info(\"Reading stored viewer data from %s\", filename)\n\n with open(filename) as f:\n return json.load(f)", "def load_guests_data():\n data = None\n try:\n with open('guests.json') as datafile:\n data = json.load(datafile)\n except:\n data = None\n\n if data == None:\n data = json.loads('{}')\n \n return data", "def read(self):\n with open(self.filename) as json_file:\n data = json.load(json_file)\n if 'nodes' in data:\n nodes = data['nodes']\n for n in nodes:\n if 'host' in n:\n node = self.add_node(n['host'])\n if 'goal' in n:\n node.set_goal(n['goal'])\n if 'authentication' in data:\n auth = data['authentication']\n if 'username' in auth:\n self.usernm = auth['username']\n if 'password' in auth:\n self.passwd = auth['password']", "def read_json_files():\n\n jsons = dict()\n with open('json_files/config.json') as file:\n data_conf = json.load(file)\n jsons['base_url'] = data_conf['base_url']\n jsons['implicit_wait'] = data_conf['implicit_wait']\n jsons['os'] = data_conf['os']\n jsons['is_headless'] = (data_conf['headless'] == 'True')\n\n with open('json_files/state.json') as file:\n data_states = json.load(file)\n jsons['list_states'] = data_states['states']\n\n with open('json_files/district.json') as file:\n jsons['dict_districts'] = json.load(file)\n\n with open('json_files/sub_district.json') as file:\n jsons['dict_sub_districts'] = json.load(file)\n\n with open('json_files/gram_panchayat.json') as file:\n jsons['dict_gram_panchayats'] = json.load(file)\n\n with open('json_files/village.json') as file:\n jsons['dict_villages'] = json.load(file)\n\n return jsons", "def __read_file(self):\r\n \r\n try:\r\n \r\n return gpd.read_file(self.path,encoding='utf-8')\r\n \r\n \r\n except FileNotFoundError as err:\r\n \r\n print(\"File could not be found,ensure you enter a valid geojson file\")\r\n \r\n raise err" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Printer the header for the table
def print_headers(): print("symbol\t count\t price\t\t total") print("-" * 71)
[ "def print_header():\n header = \"| {:<18} | {:<18} | {:<21} | {:<21} |\".format(\"ROLL_NUMBER\",\n \"NAME\",\n \"DATE-OF-BIRTH\",\n \"REGISTRATION_DATE\")\n print(header, '\\n', \"_\"*(len(header)), \"\\n\")", "def print_header():\r\n\tprint('\\n')\r\n\tprint('======================================================================')\r\n\tprint('######## ## ## ######## ## ####### ## ## #### ######## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ##### ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ####### ## ######## ####### ## ## #### ## ')\r\n\tprint('======================================================================')\r\n\tprint('\\n')", "def _Header(numCols):\n return \"\\\\begin{center}\\n\\\\begin{tabular}{\" + \"|c\" * numCols + \"|}\\n\"", "def print_header():\n print(\"STEM Center Temperature Project\")\n print(\"Shaotong Wen\")", "def printTableTitle(self):\n print(\"%12s %8s %8s %12s %12s %8s %8s %12s %12s %8s %12s\" %('Time[Myr]','id1','id2','semi[R*]','ecc','kw1(i)','kw2(i)','m1[M*](i)','m2[M*](i)','kw(f)','m[M*](f)'))", "def print_header():\n\n print('------------------------------------')\n print(' CAT FACTORY')\n print('------------------------------------')", "def print_latex_header():\n print(\n \"\"\"\n\\\\documentclass[11pt]{article}\n\\\\usepackage{amsmath}\n\\\\usepackage{fullpage}\n\\\\usepackage{booktabs}\n\\\\begin{document}\n\\\\begin{Large}\n\\\\thispagestyle{empty}\n\\\\sffamily\n\\\\begin{center}\n\\\\begin{tabular}{rrrrrr}\n\\\\toprule\n\\\\multicolumn{1}{c}{mph} & \\\\multicolumn{1}{c}{1mi} & \\\\multicolumn{1}{c}{5k} & \\\\multicolumn{1}{c}{10k} & \\\\multicolumn{1}{c}{half} & \\\\multicolumn{1}{c}{full} \\\\\\\\ \\\\midrule\"\"\"\n )", "def tbl_header():\n header = ['REGION', 'DEL/DUP', 'CNV LENGTH', 'ZSCORE', 'MEAN DEPTH', 'NUMBER OF PROBES', 'TOTAL ALLELES',\n 'POP DEL COUNT', 'POP DEL AF', 'POP DUP COUNT', 'POP DUP AF', 'GENES']\n return header", "def print_data_headers(self):\n for header in self.header_dict.keys():\n print header", "def print_header():\n txt = 'breadth of dirs examined longest pathname deepest directory'\n sys.stdout.write(txt + linesep)", "def draw_header(self, stream, header):\n stream.writeln(header)\n stream.writeln('~' * len(header))\n stream.writeln()", "def print_infoheader():\n\tprint(\" _______.__ _______.\")\n\tprint(\"|_ _|__|.-----.--.--.| __|.----.----.-----.-----.-----.\")\n\tprint(\" | | | || | | ||__ || __| _| -__| -__| |\")\n\tprint(\" |___| |__||__|__|___ ||_______||____|__| |_____|_____|__|__|\")\n\tprint(\" |_____| © P.Bartels - https://www.kangafoo.de\\n\")", "def _generate_header(self):\n margin_str = ' ' * self.column_margin\n top = '┌'\n headings = '│'\n heading_sep = '╞'\n row_sep = '├'\n self._bottom = '└'\n for i, col in enumerate(self.columns, start=1):\n top += ('─' * (col.width + 2 * self.column_margin)\n + ('┐' if i == len(self.columns) else '┬'))\n headings += margin_str + col.get_header_cell() + margin_str + '│'\n heading_sep += ('═' * (col.width + 2 * self.column_margin)\n + ('╡' if i == len(self.columns) else '╪'))\n row_sep += ('─' * (col.width + 2 * self.column_margin)\n + ('┤' if i == len(self.columns) else '┼'))\n self._bottom += ('─' * (col.width + 2 * self.column_margin)\n + ('┘' if i == len(self.columns) else '┴'))\n if self.title:\n self._text_lines.append(self.title)\n self._text_lines.append(top)\n if self.include_headings:\n self._text_lines.append(headings)\n self._text_lines.append(heading_sep)\n self._row_separator = row_sep if self.use_row_separators else None", "def format_medical_table_headers(self):\n med_cols = ['B', 'C', 'D', 'E']\n for col in med_cols:\n cell = f'{col}{self.title_final_row + 1}'\n self.format_cell_as_header(cell)", "def printTable(self):\n tableParser = TableParser(DBManOld=self.dbMan,\n tableWidget=self.tableWidget)\n tableParser.writeTable(self.tableNameLabel)", "def report_header(col_names, col_widths):\n s = \"\\nDonor Report\"\n s = \"{}\\n{}\".format(s, report_row_separator(\"=\", col_widths))\n s = \"{}\\n{}\".format(s, format_str(col_names[0], col_widths[0]))\n for n in range(1, len(col_names)):\n s = \"{} | {}\".format(s, format_str(col_names[n], col_widths[n]))\n s = \"{}\\n{}\".format(s, report_row_separator(\"-\", col_widths))\n return s", "def synth_header(self):\n\n header = \"n,imbalanced,num_c,internoiselvl,intranoiselvl,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,kvs_sze,kvs_fsze,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_sze_GT,l2_fsze_GT,l1_sze_GT,l1_fsze_GT,l2_usze_G, th_usze_G,l2_ufsze_G, th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)", "def tableHead(self,page,y):\n ## Now there's the real Table-Stuff\n y = y + self.ymm(1) # first create some distance to top\n\n headlines = ['Lfd','Mitgl.Nr.','Name','Vorname','Aufnahme','Bezahlt','Beitrag','Bezahlt','USt']\n col = 0\n for headline in headlines:\n self.tableCol(page,col,y,headline)\n col = col + 1\n\n y = y + self.ymm(1) # create some distance to the line\n \n for col in range(0,6):\n self.tableCol(page,col,y,'')\n\n page.drawLine(self.xmm(0),y,self.xmm(170),y)\n\n return y", "def table_header(request):\n html = ''\n for field_name in settings.TABLE_COLUMNS:\n col_name = COLUMN_NAMES.get(field_name, None)\n if col_name is None:\n continue\n col_style = settings.COLUMN_STYLES.get(field_name, DEFAULT_CULUMN_STYLE)\n html += '<th data-width=\"{width}\" data-ds=\"{defaultstate}\" id=\"id-col-{col_name}\">{link}</th>'.format(\n width=col_style['width'],\n defaultstate=col_style['default_state'],\n col_name=col_name,\n link=sort_link(request, col_name, field_name))\n return html" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the price from a given URL
def get_price(url): global ALLOWANCE source = "" try: source = requests.get(url).text source = json.loads(source) ALLOWANCE = source["allowance"]["remaining"] except: print("\nError loading {}:\n{}".format(url, source)) return "0" return source["result"]["price"]
[ "def get_price(url, retries=0):\n findprice = '<span id=\"totalPrice\">(.*)</span>'\n\n if retries > 10: return\n try:\n htmlpage = urllib2.urlopen(url).read()\n price = re.findall(findprice, htmlpage)[0]\n int_price = float(price.replace('$','').replace(',',''))\n return int_price\n except IndexError:\n retries += 1\n return get_price(url, retries)", "def get_price(url):\r\n price_list = []\r\n my_soup = make_soup(url)\r\n\r\n try:\r\n this_item_price = my_soup.findAll(class_=\"option-board cp-col-xs-24 cp-col-sm-24 cp-col-md-10\")\r\n discounted_price = this_item_price[0]\r\n discounted = discounted_price.find(class_=\"money-amount sale-price\").text\r\n\r\n original_price = this_item_price[0].find(class_=\"money-amount list-price price-off\")\r\n if original_price is not None:\r\n original = original_price.text\r\n else: # The original price is the discounted price if there is no original price\r\n original = discounted\r\n\r\n currency_regex = re.compile(r'\\D+')\r\n price_regex = re.compile(r'\\d+\\D+\\d+')\r\n\r\n # Find the price and currency using regex\r\n # A list is returned by findall function hence the first item from the list is retrieved\r\n currency = currency_regex.findall(original)[0]\r\n ori_price = price_regex.findall(original)[0]\r\n disc_price = price_regex.findall(discounted)[0]\r\n\r\n price_list.append(currency)\r\n price_list.append(ori_price)\r\n price_list.append(disc_price)\r\n\r\n return price_list\r\n\r\n except:\r\n print(\"PRICE RETRIEVAL FAILED: \", url)", "def get_quote(symbol): \n url = 'http://www.nasdaq.com/symbol/' + symbol\n i = 0\n while True:\n try:\n page = req.urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n price_box = soup.find('div', attrs={'class':'qwidget-dollar'})\n price = float(re.findall(\"\\d+\\.\\d+\",price_box.text)[0])\n break\n except:\n if i == 1: \n price = None\n break\n i += 1\n return price", "def get_investing_data(name, url):\n\n with requests.get(url, headers=HEADERS) as response:\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n latest_data = soup.find(\n \"div\", {\"class\": \"top bold inlineblock\"}).text.strip().split()\n\n price = latest_data[0]\n percentage = latest_data[2]\n\n return (name, price, percentage)", "def ex_get_pricing(self):\r\n action = '/pricing/'\r\n response = self.connection.request(action=action, method='GET')\r\n return response.object", "def get_btcprice():\n bitcoin_api_url = \"https://api.alternative.me/v2/ticker/bitcoin/?convert=CAD\"\n response = requests.get(bitcoin_api_url)\n response_json = response.json()\n price_cad = parse_float(response_json[\"data\"][\"1\"][\"quotes\"][\"CAD\"][\"price\"])\n return price_cad", "def _parse_price(html_chunk):\n price = get_first_content(\n html_chunk.find(\"div\", {\"class\": \"prices\"})\n )\n\n if not price:\n return None\n\n # it is always in format Cena:\\n150kč\n price = dhtmlparser.removeTags(price)\n price = price.split(\"\\n\")[-1]\n\n return price", "def get_price(client, pair:str):\n return float(client.get_recent_trades(symbol=pair, limit=1)[0][\"price\"])", "def get_price(coin, base_currency):\n\n try:\n url = \"https://api.cryptonator.com/api/ticker/{}-{}\".format(\n coin, base_currency)\n request = requests.get(url)\n if request.status_code == 200:\n data = request.json()\n except requests.exceptions.RequestException:\n return \"Coin not found\"\n\n if not data['success']:\n raise Exception(\"Coin not found\")\n else:\n return data['ticker']['price']", "def _extract_sale_price(self, response):\n query = queries[\"extract_sale_price\"]\n \n try:\n price = response.css(query).extract()[0]\n except IndexError:\n price = None\n\n return price", "def get_the_price(self, t):\r\n try:\r\n return float(self.price.loc[t])\r\n except:\r\n print(\"couldn't find the price at time of \" + self.ticker + \" \" + t)\r\n return", "def parse_cost_from_html(html):\n try:\n soup = BS4(html)\n article = soup.find(\"article\", class_=\"sub_details\")\n h3 = article.find(\"h3\")\n strong = h3.find(\"strong\")\n text = strong.text.strip()\n text = text.replace(\"$\", \"\")\n text = text[:-1]\n cost = float(text)\n return cost\n except:\n return None", "def getbalance(url):\n return Channel.get(url).our.nValue", "def get_price(search_term):\n try:\n match = process.extractOne(search_term, ITEM_NAMES)[0]\n except TypeError:\n return \"no result found for \" + search_term\n\n match_id = next(id for id\n in OSRS_ITEMS.keys()\n if OSRS_ITEMS[id][\"name\"] == match)\n\n params = {\"i\": match_id, \"a\": \"guidePrice\"}\n result = requests.get(RSBUDDY_API, params=params).json()\n\n price = result[\"overall\"]\n\n # sometimes rsbuddy returns price of 0 if there is not enough traded items.\n # try old price search for those\n if price == 0:\n try:\n match, price = legacy_osrs_price(match)\n except Exception as e:\n print \"grand exchange error:\", e\n return \"no result found for \" + search_term\n else:\n price = \"{:,}\".format(int(price))\n return \"{}: {} gp\".format(match, price)", "def price_oz():\n rs = grequests.get(Gold.URL, timeout=2)\n response = grequests.map([rs], exception_handler=lambda x, y: \"\")[0]\n if hasattr(response, \"status_code\") and response.status_code == 200:\n return float(response.json()[0].split(\",\")[1])\n return 0.0", "def get_price(price_id):\n price = storage.get(Price, price_id)\n if not price:\n abort(404)\n return jsonify(price.to_dict())", "def test_get_price(self):\n self.assertEqual(get_price('unused_string'), 24)", "def get_price(self):\n\t\treturn self._price_p_night", "def price_on_amazon(self):\n #Getting html page data from amazon url\n res = requests.get(self.url_data[1], headers=self.headers)\n res.raise_for_status()\n soup = BeautifulSoup(res.text, 'lxml')\n #Filtering the data\n price = soup.find(\"span\", {\"id\": \"priceblock_dealprice\"})\n if price == None:\n price = soup.find(\"span\", {\"id\": \"priceblock_ourprice\"})\n if price == None:\n return ['Not Found', 0]\n product_name = soup.find(\"span\", \n {\"id\": \"productTitle\"}).text.replace('\\n', '')\n #Purifying filtered data and converting into desired format\n price = price.text\n price = price.split('.')[0]\n price = price.replace(',', '')\n current_price = ''\n if not price.isnumeric():\n price = price[1:]\n for txt in list(price):\n if txt in [str(i) for i in range(10)]:\n current_price += txt\n \n data = [f\"{product_name[:35]}...\", int(current_price)]\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logic that sets keys from state definition that are used to uniquely identify IAM Roles
def _set_unique_keys(self): self.unique_keys = IAMRole.UNIQUE_KEYS
[ "def generate_state_key(self, state, role):\n\n pass", "def id_replace(self):\n aws_lookup = self.lookup()\n var_lookup_list = pcf_util.find_nested_vars(self.desired_state_definition, var_list=[])\n for (nested_key, id_var) in var_lookup_list:\n if id_var[0] == \"lookup\":\n resource = id_var[1]\n names = id_var[2].split(':')\n var = aws_lookup.get_id(resource, names)\n pcf_util.replace_value_nested_dict(curr_dict=self.desired_state_definition,\n list_nested_keys=nested_key.split('.'), new_value=var)", "def setAttributePermission( self, state_id, attr_id, permission, acquired, roles ):\n # in this context acquired means 'take all from state or superCategory`s attribute'\n # so if acquired - drop all roles for permission\n \n sapr = self.state_attr_permission_roles\n if sapr is None:\n self.state_attr_permission_roles = sapr = PersistentMapping()\n if acquired:\n roles = [] #list(roles)\n else:\n roles = tuple(roles)\n \n pr = sapr.get( (state_id, attr_id), {})\n pr[permission] = roles\n sapr[ (state_id, attr_id) ] = pr", "def test_putroles_item(self):\n pass", "def _update_state_key(self, old_state_key, action, elapsed_time):\n pass", "def _parse_roles(self):\n roles = {}\n for keystone_role, flask_role in self.config.roles.items():\n roles.setdefault(flask_role, set()).add(keystone_role)\n return roles", "def update_roles():\n sub_id = kv().get('charm.azure.sub-id')\n known_roles = {}\n for role_file in Path('files/roles/').glob('*.json'):\n role_name = role_file.stem\n role_data = json.loads(role_file.read_text())\n role_fullname = role_data['Name'].format(sub_id)\n scope = role_data['AssignableScopes'][0].format(sub_id)\n role_data['Name'] = role_fullname\n role_data['AssignableScopes'][0] = scope\n try:\n # assume already exists, so try updating first\n _azure('role', 'definition', 'update',\n '--role-definition', json.dumps(role_data))\n log('Updated existing role {}', role_fullname)\n except DoesNotExistAzureError:\n # doesn't exist, so create\n _azure('role', 'definition', 'create',\n '--role-definition', json.dumps(role_data))\n log('Created new role {}', role_fullname)\n known_roles[role_name] = role_fullname\n kv().set('charm.azure.roles', known_roles)", "def Update_AccessKey(iam,username: str,status: str,key_ind=0):\n\t\t\t\taccesskeyid = AWS.IAM.User.List_AccessKeys(iam,username)[key_ind].id\n\t\t\t\treturn iam.resource.meta.client.update_access_key(UserName=username,AccessKeyId=accesskeyid,Status=status)", "def __init__(self,RoleName=\"\"):\n self.RoleName=RoleName\n self.client = boto3.client(\"iam\")\n self.RoleArn=self.get_rolearn()", "def init_role(role): # -> None:\n ...", "def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'ROOT': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:ROOT': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'DESIGNATED': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:DESIGNATED': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'ALTERNATE': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:ALTERNATE': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'BACKUP': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:BACKUP': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}},), is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-spanning-tree:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'ROOT': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:ROOT': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'DESIGNATED': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:DESIGNATED': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'ALTERNATE': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:ALTERNATE': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'BACKUP': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:BACKUP': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}},), is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()", "def test_roles_update(self):\n pass", "def createNewState(self,name):\n self.state[\"name\"]=name\n self.state[\"version\"]=1\n self.state[\"asics\"]=[]", "def generate_state():\r\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\r\n for x in range(32))\r\n login_session['state'] = state", "def _init_role(self):\n assume_role_policy_document = _json_dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": {\n \"Effect\": \"Allow\",\n \"Principal\": {\"Service\": \"ec2.amazonaws.com\"},\n \"Action\": \"sts:AssumeRole\"}})\n\n iam_resource = self._session.resource('iam')\n with _ExceptionHandler.catch(filter_error_codes='EntityAlreadyExists'):\n role = iam_resource.create_role(\n RoleName=self._role,\n AssumeRolePolicyDocument=assume_role_policy_document,\n Description=_utl.gen_msg('accelize_generated'))\n\n _get_logger().info(\n _utl.gen_msg('created_named', 'IAM role', role))\n\n iam_client = self._session.client('iam')\n arn = iam_client.get_role(RoleName=self._role)['Role']['Arn']\n\n return arn", "def invalidate_role_names_cache(mapper, content, target):\n # pylint: disable=unused-argument\n if hasattr(flask.g, \"global_role_names\"):\n del flask.g.global_role_names", "def create_iam_role(self):\n # 1.1 Create the role,\n try:\n print(\"1.1 Creating a new IAM Role\")\n self.iam_client.create_role(\n Path='/',\n RoleName=self.dwh_iam_role_name,\n Description=\"Allows Redshift clusters to call AWS services on your behalf.\",\n AssumeRolePolicyDocument=json.dumps(\n {'Statement': [{'Action': 'sts:AssumeRole',\n 'Effect': 'Allow',\n 'Principal': {'Service': 'redshift.amazonaws.com'}}],\n 'Version': '2012-10-17'})\n )\n except Exception as e:\n print(e)\n print(\"1.2 Attaching Policy\")\n # Attaching s3 ReadOnly Policy\n self.iam_client.attach_role_policy(RoleName=self.dwh_iam_role_name,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\")\n print(\"1.3 Get the IAM role ARN\")\n role_arn = self.iam_client.get_role(RoleName=self.dwh_iam_role_name)['Role']['Arn']\n self.role_arn = role_arn\n # self.config[\"IAM_ROLE\"] = {\"ARN\": role_arn}\n self.config.set(\"IAM_ROLE\", \"arn\", role_arn)\n with open(\"dwh.cfg\", \"w+\") as config_file:\n self.config.write(config_file)", "def _get_role_arn(self):\n if self.stack.cloudformation_service_role:\n return {\"RoleARN\": self.stack.cloudformation_service_role}\n else:\n return {}", "def __configureAWSKeyID__(self, AWSKeyID):\n #create AWS CLI command\n cmd=\"aws configure set aws_access_key_id \" + AWSKeyID\n\n try:\n output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n arcpy.addError('Configure not successful. Please make sure you have installed the AWS CLI.')\n self.__sm__(e.output, 'ERROR')\n arcpy.AddError(e.output)\n tb = traceback.format_exc()\n self.__sm__(tb, 'ERROR')\n arcpy.AddError(tb)\n sys.exit()\n else:\n self.__sm__('Finished configuring AWS Key ID')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for IAM Policy parents and sets the IAM Policy IDs
def get_iam_policies(self): desired_policy = self.custom_config.get('policy_arns', []) if len(self.parents) > 0: iam_policy_parents = list(filter(lambda x: x.flavor == IAMPolicy.flavor, self.parents)) if iam_policy_parents: for policy_parent in iam_policy_parents: policy_parent.sync_state() if policy_parent.policy_arn not in desired_policy: self.custom_config['policy_arns'].append(policy_parent.policy_arn)
[ "def process_parents(self):\n\t\tfor student in self.students.get_objects():\n\t\t\tparent = self.parents.make_parent(student)\n\t\t\tstudent.add_parent(parent)", "def setParents(self,parents):\n if not isinstance(parents, list):\n raise TypeError, utils.mapping(_(\"Parents ($1) must be a list: $2\"),\n (str(parents), self.__code))\n for parent in parents:\n if not utils.is_valid_code(parent)[0]:\n raise ValueError, utils.mapping(_(\"Invalid parent code ($1) \" \\\n \"in the record: $2\"), (str(padre), self.__code))\n self.__parents = parents", "def set_parent_exercises(self, parents):\n # Set the new list of parents\n self.parent_exercises = parents\n\n # Apply the changes to parents and subtypes\n self.data['p1'] = self.data['ex1'].apply(self.get_parent)\n self.data['p2'] = self.data['ex2'].apply(self.get_parent)\n self.data['s1'] = self.data['ex1'].apply(self.get_parent)\n self.data['s2'] = self.data['ex2'].apply(self.get_parent)", "def parents(self, request, pk=None):\n if(request.method != 'GET'):\n new_parents = [get_object_or_404(models.Object, id=i) for i in request.data]\n\n obj = self.get_object()\n serializer = self.serializer_class(obj, context=dict(request=request))\n \n if(request.method == 'GET'):\n parents = obj.parents.all()\n else:\n new_parents = serializer.validate_parents(new_parents)\n if request.method in ('POST', 'PUT'):\n models.Relationship.objects.filter(child=obj).delete()\n models.Relationship.objects.bulk_create([\n models.Relationship(child=obj, parent=p) for p in new_parents\n ])\n parents = new_parents\n elif request.method == 'PATCH':\n models.Relationship.objects.bulk_create([\n models.Relationship(child=obj, parent=p) for p in new_parents\n ])\n parents = obj.parents.all()\n \n return response.Response([p.id for p in parents])", "def set_parent_id(self, parent_id):\n pass", "def test__put_parent_id_into():\n for input_, defaults, expected_output in (\n (0, False, {}),\n (0, True, {'parent_id': None}),\n (1, False, {'parent_id': '1'}),\n ):\n data = put_parent_id_into(input_, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def _check_priorities(self) -> None:\n\n priority_dict = defaultdict(list)\n for p in self.policies:\n priority_dict[p.priority].append(type(p).__name__)\n\n for k, v in priority_dict.items():\n if len(v) > 1:\n logger.warning(\n (\n \"Found policies {} with same priority {} \"\n \"in PolicyEnsemble. When personalizing \"\n \"priorities, be sure to give all policies \"\n \"different priorities. More information: \"\n \"{}/core/policies/\"\n ).format(v, k, DOCS_BASE_URL)\n )", "def process_parent_links(self):\n\t\tfor student in self.students.get_objects():\n\t\t\tfor parent in student.parents:\n\t\t\t\tself.parent_links.make_parent_link(parent.idnumber, student.idnumber)", "def _make_policies(self):\r\n self.policies = [AutoScalePolicy(self.manager, dct, self)\r\n for dct in self.scalingPolicies]", "def walk_parents(self):\n active = self.parent_datasets[:]\n while active:\n d = active.pop()\n yield d\n active += d.parent_datasets", "def build_policy(bucket, src_policy, ids):\n if not src_policy:\n src_policy = '{ \"Version\" : \"2012-10-17\", \"Statement\" : [] }'\n jpolicy = json.loads(src_policy)\n\n for aid in ids:\n stmt = {\n \"Sid\" : aid,\n \"Action\" : \"s3:ListBucket\",\n \"Effect\" : \"Deny\",\n \"Resource\" : \"arn:aws:s3:::\" + bucket,\n \"Principal\" : { \"AWS\" : [ aid ] }\n }\n jpolicy[\"Statement\"].append(stmt.copy())\n\n if DEBUG:\n print(\"--\", \"Constructed policy:\", jpolicy)\n\n return json.dumps(jpolicy)", "def set_parents(tower):\n\tfor key in tower:\n\t\tfor child in tower[key].children:\n\t\t\ttower[child].parent = tower[key].name\n\treturn tower", "def propagateParents(currentTerm, baseGOid, GOdict, parentSet):\n\n # If current term has no further parents the recursion will end and move back up the stack,\n # since there are no more parents left to iterate over (because looping through None does nothing).\n parents = GOdict.get(currentTerm).parents\n\n # For each parent of the current term under consideration\n for parent in parents:\n # Check if parent is present in GO dictionary\n # This is required because namespace filtering might lead to parent terms\n # that are no longer present as GOterm objects themselves.\n if parent in GOdict:\n # Add current term's parents to growing set\n parentSet.add(parent)\n # and recurse function for each parent\n propagateParents(parent, baseGOid, GOdict, parentSet)\n\n else:\n # Print a warning that a parent term was reported for the original base term,\n # yet the term is absent from the gene ontology file\n print('WARNING!\\n' + parent, 'was defined as a parent for',\n baseGOid, ', but was not found in the OBO file.\\n')\n\n return None", "def get_parents(self, id_):\n return # osid.id.IdList", "def __set_parent(self):\n\n guides = self.__validate()\n\n if len(guides) >= 2:\n\n children = guides[:-1]\n parent = guides[-1]\n for child in children:\n guide.set_parent(child, parent)\n\n cmds.select(parent.node, r=True)", "def do_parenting(self):\n self.ik_handle.setParent(self.ik_ctrl)\n self.pole_locator.setParent(self.pv_ctrl)\n self.fk_ctrls[0].set_parent(self.root_grp)\n self.ik_ctrl.set_parent(self.root_grp)\n self.ik_chain.set_parent(self.root_grp)\n self.fk_chain.set_parent(self.root_grp)\n self.sw_chain.set_parent(self.root_grp)\n self.pv_ctrl.set_parent(self.root_grp)\n self.switch.set_parent(self.sw_chain[-1])\n transform.lock(self.fk_ctrls, rotate=False)\n transform.lock(self.ik_ctrl, translate=False, rotate=False)\n transform.lock(self.switch)", "def iter_parents(content: IResource) -> typing.Iterator[IResource]:\n content = getattr(content, '__parent__', None)\n while content is not None:\n yield content\n content = getattr(content, '__parent__', None)", "def add_parents(go, identifier: str, graph: BELGraph, child: BaseEntity):\n for _, parent_identifier in go.out_edges(identifier):\n graph.add_is_a(child, gobp(go, identifier))", "def _read_parents(self):\n return set()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines if states are equivalent. Uses equivalent_states defined in the IAMRole class.
def is_state_equivalent(self, state1, state2): return IAMRole.equivalent_states.get(state1) == IAMRole.equivalent_states.get(state2)
[ "def _are_equal_states(\n self,\n state1: Dict[str, Any],\n state2: Dict[str, Any],\n ) -> bool:\n if set(state1.keys()) != set(state2.keys()):\n return False\n for state_name, value1 in state1.items():\n value2 = state2[state_name]\n if type(value1) != type(value2):\n return False\n if torch.is_tensor(value1): # tensor state\n assert torch.is_tensor(value2)\n # Check the values on CPU to be device-agnostic\n value1 = value1.cpu()\n value2 = value2.cpu()\n if value1.shape != value2.shape or not torch.all(\n torch.isclose(value1, value2)\n ):\n return False\n else: # non-tensor state\n if value1 != value2:\n return False\n return True", "def check_if_same_states(s1, s2):\n return np.any(np.isclose(np.mean(np.square(s1-s2), axis=(1, 2)), 0))", "def testSpecialEquivalence(self):\n\t\tself.assertTrue(electronStates['2'].equivalent(electronStates['2S']))\n\t\tself.assertTrue(electronStates['2'].equivalent(electronStates['2T']))\n\t\tself.assertTrue(electronStates['2S'].equivalent(electronStates['2']))\n\t\tself.assertTrue(electronStates['2T'].equivalent(electronStates['2']))", "def computeEquivalence(self):\n\t\tdone = {}\n\t\tself.groups = []\n\t\tfor s1 in self.states.values():\n\t\t\tif s1.name not in done:\n\t\t\t\tnewGroup = [s1]\n\t\t\t\tdone[s1.name] = True\n\t\t\t\tfor s2 in self.states.values():\n\t\t\t\t\tif s2.name not in done and s1.isEquivalent(s2):\n\t\t\t\t\t\tnewGroup.append(s2)\n\t\t\t\t\t\tdone[s2.name] = True\n\t\t\t\tself.groups.append(newGroup)\n\n\t\tfor i in range(len(self.groups)):\n\t\t\tself.groups[i] = sorted(self.groups[i],\n\t\t\t\t\t\t\t\t\tkey=operator.attrgetter(\"name\"))\n\n\t\tself.groups = sorted(self.groups,key=lambda x: x[0].name)", "def is_state_definition_equivalent(self):\n self.get_state()\n self.current_state_definition = pcf_util.param_filter(self.current_state_definition, NotebookInstance.START_PARAM_FILTER)\n desired_definition = pcf_util.param_filter(self.desired_state_definition, NotebookInstance.START_PARAM_FILTER)\n\n diff_dict = pcf_util.diff_dict(self.current_state_definition, desired_definition)\n return diff_dict == {}", "def compare_states(self, old_state, new_state):\n return old_state['color'] != new_state['color'] or \\\n old_state['position'] != new_state['position'] or \\\n old_state['load'] != new_state['load'] or \\\n (abs(new_state['depth'] - self._dali_depth) >= 0.02)", "def are_inverse(self,a,b):\n return self._inverse[a] == b", "def __eq__(self,other_state):\n \n if type(self) == type(other_state):\n return self.__members() == other_state.__members()\n else:\n return False", "def equal_position(self, other):\n assert isinstance(other, MStarState)\n for i, single_state in enumerate(self._single_agents_states):\n if not single_state.equal_position(other.get_single_agent_states()[i]):\n return False\n return True", "def affectsState(self) -> \"SbBool\":\n return _coin.SoMultipleCopy_affectsState(self)", "def test_equivalent(self):\n op1 = And(BoolVar(), PedestriansCrossingRoad())\n op2 = And(PedestriansCrossingRoad(), BoolVar())\n op3 = And(DriversAwaitingGreenLightVar(), BoolVar())\n\n op1.check_equivalence(op2)\n op2.check_equivalence(op1)\n\n assert_raises(AssertionError, op1.check_equivalence, op3)\n assert_raises(AssertionError, op2.check_equivalence, op3)\n assert_raises(AssertionError, op3.check_equivalence, op1)\n assert_raises(AssertionError, op3.check_equivalence, op2)\n\n ok_(op1 == op2)\n ok_(op2 == op1)\n ok_(op1 != op3)\n ok_(op2 != op3)\n ok_(op3 != op1)\n ok_(op3 != op2)", "def tibbles_are_equivalent(A, B):\n \n A_copy = A.copy()\n B_copy = B.copy()\n \n Atib = canonicalize_tibble(A_copy)\n Btib = canonicalize_tibble(B_copy)\n \n \n return Atib.equals(Btib)", "def check_state_change(self, pre_state):\n logger.debug(\"Checking to see if state has been positively changed\")\n logger.debug(\"preState: %s\", pre_state)\n logger.debug(\"postState: %s\", self.state)\n\n result = {}\n result[\"target_access\"] = self.check_target_access(pre_state)\n result[\"role_esc\"] = self.check_role_esc(pre_state)\n result[\"access_esc\"] = self.check_access_esc(pre_state)\n result[\"new_cred\"] = self.check_new_credential(pre_state)\n result[\"sensitive\"] = self.check_sensitive()\n result[\"goal\"] = self.check_goal_reached()\n\n return result", "def compare_featurized_states(\n states1: List[Dict[Text, List[Features]]], states2: List[Dict[Text, List[Features]]]\n) -> bool:\n\n if len(states1) != len(states2):\n return False\n\n for state1, state2 in zip(states1, states2):\n if state1.keys() != state2.keys():\n return False\n for key in state1.keys():\n for feature1, feature2 in zip(state1[key], state2[key]):\n if np.any((feature1.features != feature2.features).toarray()):\n return False\n if feature1.origin != feature2.origin:\n return False\n if feature1.attribute != feature2.attribute:\n return False\n if feature1.type != feature2.type:\n return False\n return True", "def test_state_distinguishability_two_states():\n e_0, e_1 = basis(2, 0), basis(2, 1)\n e_00 = e_0 * e_0.conj().T\n e_11 = e_1 * e_1.conj().T\n states = [e_00, e_11]\n probs = [1 / 2, 1 / 2]\n\n res = state_distinguishability(states, probs)\n np.testing.assert_equal(np.isclose(res, 1), True)", "def possible(self,act,state_asst):\n return all(state_asst[pre] == act.preconds[pre]\n for pre in act.preconds)", "def assert_state_equals(self, manifest, assertion, expect=True):\n\n # TODO IMPLEMENT THIS METHOD\n\n pass", "def _is_equivalent(self, other, equivalencies=[]):\n if isinstance(other, UnrecognizedUnit):\n return False\n\n if self._get_physical_type_id() == other._get_physical_type_id():\n return True\n elif len(equivalencies):\n unit = self.decompose()\n other = other.decompose()\n for a, b, forward, backward in equivalencies:\n if b is None:\n # after canceling, is what's left convertible\n # to dimensionless (according to the equivalency)?\n try:\n (other / unit).decompose([a])\n return True\n except Exception:\n pass\n elif (a._is_equivalent(unit) and b._is_equivalent(other)) or (\n b._is_equivalent(unit) and a._is_equivalent(other)\n ):\n return True\n\n return False", "def test_unambiguous_state_distinguishability_two_states():\n e_0, e_1 = basis(2, 0), basis(2, 1)\n e_00 = e_0 * e_0.conj().T\n e_11 = e_1 * e_1.conj().T\n states = [e_00, e_11]\n probs = [1 / 2, 1 / 2]\n\n res = state_distinguishability(states, probs, dist_method=\"unambiguous\")\n np.testing.assert_equal(np.isclose(res, 1), True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the ipaFeatures dict with added ipa_symbols and description.
def final_ipa_dict() -> dict: ipa_dict = ipaFeatures_dict() with open("ipa_new.csv", "r") as f: f.readline() for row in f: row = row.split(",") if row[0] not in ipa_dict and len(row[0].split()) == 1: ipa_dict[row[0]] = [row[0], row[2].rstrip(), row[1]] elif len(row[0].split()) == 1: ipa_dict[row[0]] = [row[0], row[2].rstrip(), ipa_dict[row[0]]] else: symbols = row[0].split() for symbol in symbols: if symbol in ipa_dict: symbol_in_dict = symbol break for symbol in symbols: ipa_dict[symbol] = [row[0], row[2].rstrip(), ipa_dict[symbol_in_dict]] return ipa_dict
[ "def e_features(filename):\n features = {}\n try:\n pe = pefile.PE(filename, fast_load=False)\n\n # File Header\n features['filename'] = os.path.basename(filename)\n features['machine'] = pe.FILE_HEADER.Machine\n features['number of sections'] = pe.FILE_HEADER.NumberOfSections\n features['compile date'] = pe.FILE_HEADER.TimeDateStamp\n features['pointer to symbol table'] = pe.FILE_HEADER.PointerToSymbolTable\n features['number of symbols'] = pe.FILE_HEADER.NumberOfSymbols\n features['size of optional header'] = pe.FILE_HEADER.SizeOfOptionalHeader\n features['characteristics'] = pe.FILE_HEADER.Characteristics\n\n # Optional Header\n features['magic'] = pe.OPTIONAL_HEADER.Magic\n features['major linker version'] = pe.OPTIONAL_HEADER.MajorLinkerVersion\n features['minor linker version'] = pe.OPTIONAL_HEADER.MinorLinkerVersion\n features['size of code'] = pe.OPTIONAL_HEADER.SizeOfCode\n features['size init data'] = pe.OPTIONAL_HEADER.SizeOfInitializedData\n features['size uninit data'] = pe.OPTIONAL_HEADER.SizeOfUninitializedData\n features['entry point address'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint\n features['base of code'] = pe.OPTIONAL_HEADER.BaseOfCode\n if hasattr(pe.OPTIONAL_HEADER, 'BaseOfData'):\n features['base of data'] = pe.OPTIONAL_HEADER.BaseOfData\n features['image base'] = float(pe.OPTIONAL_HEADER.ImageBase)\n features['section alignment'] = pe.OPTIONAL_HEADER.SectionAlignment\n features['file alignment'] = pe.OPTIONAL_HEADER.FileAlignment\n features['major operating system version'] = pe.OPTIONAL_HEADER.MajorOperatingSystemVersion\n features['minor operating system version'] = pe.OPTIONAL_HEADER.MinorOperatingSystemVersion\n features['major image version'] = pe.OPTIONAL_HEADER.MajorImageVersion\n features['minor image version'] = pe.OPTIONAL_HEADER.MinorImageVersion\n features['major subsystem version'] = pe.OPTIONAL_HEADER.MajorSubsystemVersion\n features['minor subsystem version'] = pe.OPTIONAL_HEADER.MinorSubsystemVersion\n features['size of image'] = pe.OPTIONAL_HEADER.SizeOfImage\n features['size of headers'] = pe.OPTIONAL_HEADER.SizeOfHeaders\n features['checksum'] = pe.OPTIONAL_HEADER.CheckSum\n features['subsystem'] = pe.OPTIONAL_HEADER.Subsystem\n features['dll charactersitics'] = pe.OPTIONAL_HEADER.DllCharacteristics\n features['size of stack reserve'] = float(pe.OPTIONAL_HEADER.SizeOfStackReserve)\n features['size of stack commit'] = float(pe.OPTIONAL_HEADER.SizeOfStackCommit)\n features['size of heap reserve'] = float(pe.OPTIONAL_HEADER.SizeOfHeapReserve)\n features['size of heap commit'] = float(pe.OPTIONAL_HEADER.SizeOfHeapCommit)\n features['loader flags'] = pe.OPTIONAL_HEADER.LoaderFlags\n features['number of rva and sizes'] = pe.OPTIONAL_HEADER.NumberOfRvaAndSizes\n\n # Data directory\n datadirs = {0: 'export table', 1: 'import table',\n 2: 'resource table', 3: 'exception table',\n 5: 'base relocation', 6: 'debug',\n 9: 'tls table', 12: 'import address table'}\n\n data_directories = {}\n for idx, datadir_name in datadirs.items():\n if len(pe.OPTIONAL_HEADER.DATA_DIRECTORY) <= idx:\n continue\n\n directory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[idx]\n features['data dir ' + datadir_name + ' size'] = directory.Size\n features['data dir ' + datadir_name + ' rva'] = directory.VirtualAddress\n\n\n # Resource Entry (grab first two)\n if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):\n resources = []\n index = 0\n for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:\n if resource_type.name is not None:\n name = \"%s\" % resource_type.name\n else:\n name = \"%s\" % pefile.RESOURCE_TYPE.get(resource_type.struct.Id)\n if name == None:\n name = \"%d\" % resource_type.struct.Id\n if hasattr(resource_type, 'directory'):\n for resource_id in resource_type.directory.entries:\n if hasattr(resource_id, 'directory'):\n for resource_lang in resource_id.directory.entries:\n resource = {}\n if hasattr(resource_lang, 'data'):\n try:\n features['resource ' + str(index) + ' rva'] = resource_lang.data.struct.OffsetToData\n features['resource ' + str(index) + ' size'] = resource_lang.data.struct.Size\n features['resource ' + str(index) + ' lang'] = resource_lang.data.lang\n index += 1\n if index == 2:\n break\n except pefile.PEFormatError as pfe:\n pass\n if index == 2:\n break\n if index == 2:\n break\n\n except Exception as e:\n print \"Error processing %s - %s\" % (filename, str(e))\n #print traceback.format_exc()\n\n return features", "def builtin_features(self) -> Dict:\n return self._builtin_features", "def features_descriptions(self):\n return self.features.descriptions()", "def feature_info(self):\n feature_list = self.prop('available-features-list', None)\n if feature_list is None:\n raise ValueError(\"Firmware features are not supported on CPC {}\"\n .format(self.name))\n return feature_list", "def list_features():\n options_list = []\n plugins_list = []\n obj = {\n 'pyocd_version' : __version__,\n 'version' : { 'major' : 1, 'minor' : 1 },\n 'status' : 0,\n 'features' : [\n {\n 'name': 'plugins',\n 'plugins': plugins_list,\n },\n ],\n 'options' : options_list,\n }\n\n # Add plugins\n plugins = ListGenerator.list_plugins()\n plugins_list.extend(plugins['plugins'])\n\n # Add options\n for option_name in options.OPTIONS_INFO.keys():\n info = options.OPTIONS_INFO[option_name]\n option_dict = {\n 'name' : option_name,\n 'default' : info.default,\n 'description' : info.help,\n }\n try:\n types_list = []\n for t in info.type:\n types_list.append(t.__name__)\n except TypeError:\n types_list = [info.type.__name__]\n option_dict['type'] = types_list\n options_list.append(option_dict)\n\n return obj", "def collect_features_content(instances, inst_idx_lst):\n fea_dict = {}\n for i, inst_dscrpt in enumerate(instances):\n if inst_idx_lst and i not in inst_idx_lst:\n continue\n ufo_pth = inst_dscrpt.path\n ufo_pth = os.path.abspath(os.path.realpath(ufo_pth))\n fea_pth = os.path.join(ufo_pth, FEATURES_FILENAME)\n if os.path.isfile(fea_pth):\n with open(fea_pth, 'r') as fp:\n fea_cntnts = fp.read()\n fea_dict[fea_pth] = fea_cntnts\n return fea_dict", "def _getFeatDict(mol, featFactory, features):\n molFeats = {}\n for feat in features:\n family = feat.GetFamily()\n if family not in molFeats:\n matches = featFactory.GetFeaturesForMol(mol, includeOnly=family)\n molFeats[family] = matches\n return molFeats", "def preprocess(self, features):\n\n features.update(self.preprocessor(features))\n features['f0_midi'] = ddsp.core.hz_to_midi(features['f0_hz'])\n features['db'] = features[self.db_key]\n\n return features", "def contextual_feature_map(self, features):\n return features", "def generate_features(self, job_info):\n\t\tfeatures = {}\n\t\tfor feature in self.get_feature_names():\n\t\t\tfeature_function = \"add_feature_%s\" % feature\n\t\t\tself.logger.debug(\"Adding feature %s\" % feature)\n\t\t\ttry:\n\t\t\t\tfeatures[\"%s.%s\" % (self.name, feature)] = getattr(self, feature_function)(job_info)\n\t\t\texcept Exception as e:\n\t\t\t\tself.logger.error(\"Unable to add feature %s: %s\" % (feature, str(e)))\n\t\treturn features", "def get_cli_feature_map(self) -> dict:\n\n # index :(function, function_description)\n function_mapper = {\n \"1\": (self.download_all_survey_data, \"Download vw_AllSurveyData\"),\n \"2\": (self.update_vw_AllSurveyData, \"Update vw_AllSurveyData\"),\n \"3\": (self.run_select_query, \"Run Custom SELECT Query\"),\n \"4\": (self.exit, \"Exit\"),\n }\n\n return function_mapper", "def _get_feature_dict(post):\n # Make sure the post is an XML element.\n assert type(post) is Element\n\n feature_dict = {}\n tokenized_words = nltk.word_tokenize(post.text)\n\n # Establish all features\n feature_dict['first-word'] = tokenized_words[0]\n feature_dict['ends-with-question'] = tokenized_words[-1] == '?'\n feature_dict['ends-with-exclamation'] = tokenized_words[-1] == '!'\n feature_dict['is-upcase'] = post.text.isupper()\n\n for word in tokenized_words:\n feature_dict['contains({})'.format(word)] = True\n\n return feature_dict", "def feature_options(self) -> Dict:\n return self._feature_options", "def features(self):\n return self.__features", "def feature_list(self):\n return self.features.features()", "def GetFeatures(self):\n return json.dumps(FEATURES)", "def mglia_features(image_file):\n\n X\n Y\n perimeter\n total_area\n soma_area\n eccentricity\n inertia_tensor\n label\n max_intensity\n mean_intensity\n moments\n solidity\n total_processes\n avg_p_length\n main_process\n\n return mglia_features", "def highlevelfeaturesRetriever(self):", "def vocation_features(self):\n return self.vocation.features" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Concise method for how long to wait for browser to load ... in seconds.
def wait_for(self, seconds): if seconds < 0: return self.browser.implicitly_wait(5) return self.browser.implicitly_wait(seconds)
[ "def remainingTimeToWait(self) -> int:\n ...", "def time_to_interactive(self) -> float:\n return self.navigation_timing.dom_complete", "def loading_time_secs(self):\n return self.__loading_time", "def wait_for_page(self, url=None) -> None:\r\n script = 'return document.readyState === \"complete\";'\r\n if url is None:\r\n url = self.last_link\r\n try:\r\n WebDriverWait(self.driver, LONG_WAIT).until(\r\n lambda _: url in self.current_url())\r\n WebDriverWait(self.driver, LONG_WAIT).until(\r\n lambda _: self.driver.execute_script(script))\r\n except TimeoutException:\r\n raise TimeoutException(\r\n 'Timed out waiting for {0} to load.'.format(url)) from None", "def get_xpath_wait_timeout(self):\n\n if \"_xpath_wait_timeout\" in self.__dict__:\n return self._xpath_wait_timeout\n else:\n return 30.0", "def set_page_load_timeout(self, timeout):\n self._selenium_web_driver().set_page_load_timeout(timeout / 1000.0)", "def page_load_time(self) -> float:\n return self.navigation_timing.load_event_end - self.navigation_timing.start_time", "def get_remaining_page_time(self):\n if (self.state == PageState.ACTIVE and\n self.config['page_timeout'] > 0):\n remaining = (float(self.config['page_timeout'])-\n (time.clock()-self.start_page_clock_time))\n if remaining > 0:\n return remaining\n else:\n return 0\n return -1", "def _enforce_crawl_delay(self) -> None:\n time_delta = datetime.datetime.now() - self.last_crawl # Time since last request\n td_seconds = time_delta.total_seconds()\n if self.crawl_delay > td_seconds:\n # Not enough time has yet elapsed since the last request..\n remaining_time = self.crawl_delay - td_seconds\n print(f'Need to wait another {remaining_time} seconds for the crawl delay..', datetime.datetime.now())\n time.sleep(remaining_time)", "def _http_lock_wait_time(self):\r\n if self._http_lock_wait_begin == 0:\r\n return 0\r\n if self._http_lock_wait_end == 0:\r\n return time.time() - self._http_lock_wait_begin\r\n return self._http_lock_wait_end - self._http_lock_wait_begin", "def seconds_to_sleep(self):\n if self.next_request_timestamp is None:\n return\n sleep_seconds = self.next_request_timestamp - time.time()\n if sleep_seconds <= 0:\n return\n return sleep_seconds", "def _wait_to_appear(self, *args, timeout=\"15s\"):\n\n try:\n self.selenium.wait_for_condition(\n \"return (document.readyState == 'complete')\"\n )\n self.salesforce.wait_for_aura()\n self.builtin.wait_until_keyword_succeeds(\n timeout,\n \"2s\",\n \"Current page should be\",\n self._page_type,\n self._object_name,\n )\n except Exception as e:\n self.builtin.log(e, \"DEBUG\")\n raise Exception(\n f\"Page object {self._page_type} {self._object_name} did not appear before timeout ({timeout}) expired.\"\n )", "def wait_until_page_loads(self, new_page_title, timeout = 5):\n\n element = WebDriverWait(self.driver, timeout).until(EC.title_contains(new_page_title))\n element = WebDriverWait(self.driver, timeout).until(EC.text_to_be_present_in_element_value(\n (By.NAME, \"page-load-status\"), 'done'))", "def wait_for_page_loaded(self):\n self.wait_for(lambda: self.loaded and len(self._unsupported_files.keys()) == 0,\n 'Unable to load requested page')\n \n return self.get_loaded_page()", "def time_until(self, cookies):\n\n time = 0.0\n if self._cookies >= cookies:\n return 0.0\n else: \n time = math.ceil((cookies - self._cookies) / self._cps)\n \n #print \"type\", type(time)\n return time", "def wait_for_time():\n while rospy.Time().now().to_sec() == 1:\n pass", "def set_wait(self, wait: int) -> None:\r\n global LONG_WAIT\r\n LONG_WAIT = wait\r\n self.driver.implicitly_wait(wait)", "def render_timeout(self):\n return self.settings.setdefault('render_timeout', 0)", "def set_timeouts_safety(self, timeout):\n\n self.set_page_load_timeout(timeout)\n # Command timeout value be setted to 45 seconds\n # FIXME: Seems the whole webdriver broken if command executor timeout!\n self.command_executor.set_timeout(timeout + 15.0)\n self.set_xpath_wait_timeout(timeout)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A utilty/shortcut method to fetch the comment_form container on a page.
def get_comment_form(self): comment_form = self.browser.find_element_by_id('comment_form') return comment_form
[ "def get_form():\n from django_comments.forms import CommentForm\n\n return CommentForm", "def show_comment_form(self, req, page):\n page_id = self.env.get_real_filename(page)[:-4]\n ajax_mode = req.args.get('mode') == 'ajax'\n target = req.args.get('target')\n page_comment_mode = not target\n\n form_error = preview = None\n title = req.form.get('title', '').strip()\n if 'author' in req.form:\n author = req.form['author']\n else:\n author = req.session.get('author', '')\n if 'author_mail' in req.form:\n author_mail = req.form['author_mail']\n else:\n author_mail = req.session.get('author_mail', '')\n comment_body = req.form.get('comment_body', '')\n fields = (title, author, author_mail, comment_body)\n\n if req.method == 'POST':\n if req.form.get('preview'):\n preview = Comment(page_id, target, title, author, author_mail,\n comment_body)\n # 'homepage' is a forbidden field to thwart bots\n elif req.form.get('homepage') or self.antispam.is_spam(fields):\n form_error = 'Your text contains blocked URLs or words.'\n else:\n if not all(fields):\n form_error = 'You have to fill out all fields.'\n elif _mail_re.search(author_mail) is None:\n form_error = 'You have to provide a valid e-mail address.'\n elif len(comment_body) < 20:\n form_error = 'You comment is too short ' \\\n '(must have at least 20 characters).'\n else:\n # '|none' can stay since it doesn't include comments\n self.cache.pop(page_id + '|inline', None)\n self.cache.pop(page_id + '|bottom', None)\n comment = Comment(page_id, target,\n title, author, author_mail,\n comment_body)\n comment.save()\n req.session['author'] = author\n req.session['author_mail'] = author_mail\n if ajax_mode:\n return JSONResponse({'posted': True, 'error': False,\n 'commentID': comment.comment_id})\n return RedirectResponse(comment.url)\n\n output = render_template(req, '_commentform.html', {\n 'ajax_mode': ajax_mode,\n 'preview': preview,\n 'suggest_url': '@edit/%s/' % page,\n 'comments_form': {\n 'target': target,\n 'title': title,\n 'author': author,\n 'author_mail': author_mail,\n 'comment_body': comment_body,\n 'error': form_error\n }\n })\n\n if ajax_mode:\n return JSONResponse({\n 'body': output,\n 'error': bool(form_error),\n 'posted': False\n })\n return Response(render_template(req, 'commentform.html', {\n 'form': output\n }))", "def handle_comment_form(request, user_last_post):\n if request.method == 'POST':\n form = handle_comment(request, user_last_post)\n\n else:\n form = CommentForm()\n\n set_humanity_check(request)\n form.humanity = translate_humanity(request)\n form.js_check = request.session['random_number']\n\n return form", "def comment_form_target():\n return 'test'", "def get_form(browser, predicate=None):\n f = browser.get_form(lambda f: predicate is None or predicate(Form(f)))\n if f: return Form(f)", "def facet_discussion(self):\r\n\r\n self.object = self.get_object()\r\n discussion = self.object.discussion\r\n comments = discussion.comment_set.all().order_by('date')\r\n form = CommentForm()\r\n return {'discussion': discussion, 'comments': comments, 'form': form}", "def page_widget(self):\n p = self.declaration.page_widget()\n if p is not None:\n return p.proxy.widget or None", "def get_form_as_dict(response):\n html_parser = etree.HTMLParser()\n root = etree.fromstring(response.get_data(), html_parser)\n input_elements = CSSSelector(\"input\")(root)\n form = {ie.attrib[\"name\"].replace(\"-input\", \"\"): ie for ie in input_elements}\n form[\"description\"] = CSSSelector(\"textarea#description-textarea\")(root)[0]\n return form", "def form(self):\n return self.question.form_class(question=self.question)", "def get_simple_panel (container, parent):\n from dynamic_data import EditCommentPanel\n return __make_panel(container, parent, edit_panel=EditCommentPanel)", "def show_comments(self, page):\n the_comments = WikiComment.comments_on_page(page)\n #the_comments = lazy_iter(sort_comments, the_comments)\n the_comments = sort_comments(the_comments)\n self.template_value['comments'] = the_comments", "def fase_page():\n return fase_form()", "def build_helper(self):\n helper = FormHelper()\n helper.form_class = 'form-horizontal'\n helper.label_class = 'col-lg-2'\n helper.field_class = 'col-lg-8'\n helper.layout = Layout(\n 'body',\n FormActions(\n StrictButton('Post comment', type='submit'),\n ),\n )\n return helper", "def _get_container(self, container_name, html):\n #html_tree = BeautifulSoup(html, 'html.parser')\n container_attr = getattr(self, 'container_' + container_name, None)\n if container_attr:\n container_html = html.find(\n container_attr.tag,\n {container_attr.attribute: container_attr.value}\n )\n else:\n container_html = html\n return container_html", "def test_get_form_no_obj(self):\n request = self.get_page_request(None, self.user, \"/\", edit=True)\n form = page_admin.get_form(request)\n self.assertEqual(form.base_fields.get(\"meta_description\"), None)", "def _get_container(self, container_name, html):\n html_tree = BeautifulSoup(html, 'html.parser')\n container_attr = getattr(self, 'container_' + container_name, None)\n if container_attr:\n container_html = html_tree.find(\n container_attr.tag,\n {container_attr.attribute: container_attr.value}\n )\n else:\n container_html = html_tree\n return container_html", "def GetPageWidget(self):\n return self._page_widget", "def prepare_form(self):\n raise NotImplementedError(\"Just use get_form() method instead\")", "def get_form_element(style, definition, storable):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear the contents of the current Axes object
def clear(self): self._ax.clear()
[ "def clear_axes(self):\r\n # Remove lines and selection as they can't be reloaded properly\r\n for plot in self.sub_plots:\r\n self.figure.delaxes(plot.axes)\r\n plot.axes=None\r\n plot.y2_axis = None\r\n plot.selection = None\r\n plot.lines = []\r\n self.figure.clear()\r\n # Set selction of view area to false as it was removed\r\n self.has_selection = False", "def clear_plot(self):\n self.plot_window.pg_plot_widget.clear()", "def clear(self):\n\n # loop through all existing figures\n if self.figs is not None:\n self.figs.clear()\n self.repaint()", "def clear(self):\n logger.debug(\"Clearing graph from RAM: %s\", self)\n self.fig.clf()\n del self.fig", "def clear_charts(self):\n self.clear_queue('charts')", "def clear_figure(self):\n self._plot_status = PlotStatus.NO_DATA\n self._figure.data = []", "def remove(self):\r\n self.figure.delaxes(self.sub_plots[-1].axes)\r\n del self.sub_plots[-1]", "def clear(self):\n self._bar_dict.clear()", "def clear_elements(self):\n\n for child in self.ax.get_children(): # remove pie legend\n if 'legend' and 'anno' in str(child).lower():\n child.remove()\n\n for axes in self.fig.axes[1:]: # remove colorbar\n axes.remove()\n\n # remove pie charts\n try:\n for mpl_objects in [pie for lst in self.mpl_paths.values() for pie in lst]:\n try:\n mpl_objects.remove()\n except Exception:\n continue\n except Exception as e:\n print('No Pies Plotted!')\n print('exception: ', e)\n\n # remove choropleth areas\n try:\n for mpl_objects in [poly for lst in self.mpl_polygons.values() for poly in lst]:\n try:\n mpl_objects.remove()\n except Exception:\n continue\n except Exception as e:\n print('No Polygons Plotted!')\n print('exception: ', e)", "def clear_all_plots(self):\n self.delete_plots(self.contained_plots)\n self.contained_plots = []", "def clear_rays(self):\n self.pack_forget()", "def clear_figure(self):\n self.fig.clf()\n return", "def clear(self):\n self.shapes.clear()", "def clear_axes(axs, spines='none'):\n for ax in axs:\n clear_axis(ax, spines)\n return axs", "def reset(self):\n mpl.rcParams.update(mpl.rcParamsDefault)", "def clear_lines(self, index = 0):\r\n self.sub_plots(index).axes.cla()\r\n self.sub_plots(index).lines = []", "def reset(self):\n self.x=0\n self.y=0", "def clear_axlabels(axtype, ax=None):\n if ax is None:\n ax = plt.gca()\n if axtype.lower() == 'x':\n ax.set_xlabel('')\n ax.set_xticklabels([])\n else:\n ax.set_ylabel('')\n ax.set_yticklabels([])\n return None", "def clear_crossfilter(self):\n print ('Trigger clear')\n self.struct_df = None\n self.elem_df = None\n self.prop_df = None\n self.code_df = None\n self.exchange_df = None\n self.plot_data = None\n layout.children[4] = self.create_figure(self.plot_data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
do_send_to_client_paypal send withdrawal amount to the client's paypal account
def do_send_to_client_paypal(self, transaction: WalletTransactionsModel) -> Future: # TODO use paypal SDK to send transactions to paypal here # TODO then update transaction to reflect that transaction was sent # NOTE: Could also listen to an _ipn to find out if transaction succeeded on paypal side wallet_instance: WalletModel = WalletModel.query( WalletModel.organization_id == transaction.organization_id, WalletModel.uid == transaction.uid).get_async().get_result() if wallet_instance.is_verified: paypal_address = wallet_instance.paypal_address amount_to_send: AmountMixin = transaction.amount # TODO send amount to paypal using paypal address from wallet and amount from transactions transaction.is_settled = True tran_key: Optional[ndb.Key] = transaction.put_async(retries=self._max_retries, timeout=self._max_timeout).get_result() yield bool(tran_key) yield False
[ "async def withdraw(self, **params):\r\n return await self.client_helper(\"withdraw\", **params)", "async def withdraw(self, ctx, amount: int):\n data = await BonfideCoin(self.bot).get(ctx.guild.id, ctx.author.id)\n if data is None:\n await self.add_to_db(ctx.guild.id, ctx.author.id)\n\n data = await BonfideCoin(self.bot).get(ctx.guild.id, ctx.author.id)\n if amount <= data.get(\"bank\"):\n query = \"\"\"UPDATE bonafidecoin SET bank = bank - $3, wallet = wallet + $3 WHERE guild_id = $1 AND user_id\n = $2 \"\"\"\n await self.bot.db.execute(query, ctx.guild.id, ctx.author.id, amount)\n return await ctx.send(\n f\"Successfully withdrawn <:coin:853891390537465858> **{amount}**.\"\n )\n\n return await ctx.send(\"You don't have sufficient balance to withdraw.\")", "async def donate(self, ctx, amount: CoinConverter):\n await self.transfer(ctx.author.id, ctx.guild.id, amount)\n await ctx.send(f'\\N{MONEY WITH WINGS} `{ctx.author!s}` > '\n f'`{amount}JC` > `{ctx.guild!s}` \\N{MONEY BAG}')", "def test_paypal_notify_url_for_withdrawal_with_refunded(self, mock_postback):\n mock_postback.return_value = b\"VERIFIED\"\n entry = baker.make(Entry, status='selected_confirmed', withdrawn=True)\n pptrans = create_entry_paypal_transaction(\n entry.user, entry, 'withdrawal'\n )\n pptrans.transaction_id = \"test_trans_id\"\n pptrans.save()\n\n self.assertFalse(PayPalIPN.objects.exists())\n params = dict(IPN_POST_PARAMS)\n params.update(\n {\n 'custom': b('withdrawal {}'.format(entry.id)),\n 'invoice': b(pptrans.invoice_id),\n 'payment_status': b'Refunded'\n }\n )\n self.paypal_post(params)\n entry.refresh_from_db()\n self.assertFalse(entry.withdrawal_fee_paid)\n self.assertTrue(entry.withdrawn) # still withdrawn\n\n self.assertEqual(len(mail.outbox), 1,)\n\n # emails sent to support\n self.assertEqual(mail.outbox[0].to, [settings.SUPPORT_EMAIL])", "def withdrawal_success(request, order_number):\n withdrawal = get_object_or_404(Withdrawal, order_number=order_number)\n messages.success(request, 'Withdrawal complete, the money should arrive \\\n in the account provided within 3 business days.')\n\n message = get_template(\"tokens/withdrawal_email.html\").render({\n 'withdrawal': withdrawal\n })\n\n mail = EmailMessage(\n \"Content Flow Withdrawal confirmation\",\n message,\n 'contentflow@contentflow.com',\n [withdrawal.email],\n )\n mail.content_subtype = 'html'\n mail.send()\n\n context = {\n 'withdrawal': withdrawal,\n }\n return render(request, 'tokens/withdrawal_success.html', context)", "def __call__(self, amount: Union[int, float, Decimal], currency: str, paymentagent_loginid: str, verification_code: str, description: Optional[str] = None, dry_run: Optional[int] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):\n\n data = {\n \"paymentagent_withdraw\": int(1),\n \"amount\": amount,\n \"currency\": currency,\n \"paymentagent_loginid\": paymentagent_loginid,\n \"verification_code\": verification_code\n }\n\n if description:\n data['description'] = str(description)\n\n if dry_run:\n data['dry_run'] = int(dry_run)\n\n return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)", "def send_money(self):\n pass", "def send_approved_withdrawals_to_paypal_wallets(self) -> Optional[List[Future]]:\n try:\n wallet_transactions: List[WalletTransactionsModel] = WalletTransactionsModel.query(\n WalletTransactionsModel.is_verified == True, WalletTransactionsModel.is_settled == False).fetch_async().get_result()\n print('approved withdrawals running')\n\n return [self.do_send_to_client_paypal(transaction=transaction) for transaction in wallet_transactions\n if transaction.transaction_type == 'withdrawal']\n except RetryError as e:\n # TODO Log this error\n return None", "def send_withdrawl_notification(user):\r\n _sender = SENDER\r\n _subject = 'Transit Subsidy Program: Thank You for Beginning Your Enrollment'\r\n message = Template(\"\"\"\r\n <style>html,p{font-family: arial, helvetica}</style>\r\n\r\n <p>Dear {{user.first_name}},</p>\r\n\r\n <p>You have been withdrawn from the Transit Subsidy Program on {{ transit.timestamp }}.</p>\r\n\r\n <p>This will be reflected in the next cycle. Also, if you need to re-enroll, please visit \r\n the enrollment application again.\r\n </p>\r\n\r\n \"\"\")\r\n\r\n ctx = Context({'user':user})\r\n \r\n subject, from_email, to = _subject, _sender, user.email\r\n text_content = message.render(ctx)\r\n html_content = message.render(ctx)\r\n e = EmailMultiAlternatives(subject, html_content, from_email, [to])\r\n e.attach_alternative(html_content, \"text/html\")\r\n e.content_subtype = \"html\"\r\n e.send()", "def withdrawals(self, request, pk=None):\n user = get_object_or_404(User, pk=pk)\n self.check_object_permissions(request, user)\n \n withdrawal_data = request.data.copy()\n\n try:\n amount = float(withdrawal_data.get('amount')) * -1\n withdrawal_data['amount'] = amount\n # negate amount so it's subtracted from user's available balance before saving the instance\n\n available = user.balance.first().available_balance\n\n if available + amount < 0:\n error_message = {'detail': 'You cannot withdraw more than your available balance'}\n return Response(error_message, status=status.HTTP_403_FORBIDDEN)\n\n except ValueError:\n pass\n\n serializer = BankTransferSerializer(data=withdrawal_data)\n\n if serializer.is_valid():\n serializer.save(owner=user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def send_confirmation_email_to_buyer(self, order_instance, **kwargs):\n order = order_instance\n # Merge tags in template\n now = kwargs.pop('now', None) or timezone.now()\n vars_items = []\n for item in order.item_set.all():\n vars_item = {\n 'NAME': item.detail.get('name'),\n 'PRICE': item.subtotal,\n 'IMAGE': item.detail.images[0]['url'] if item.detail.images else None,\n 'QUANTITY': item.qty,\n 'SHIPPING_CHARGE': item.shipping_charge,\n 'ADDITIONAL_CHARGE': item.additional_charge,\n 'TOTAL': item.total,\n 'PAID_ON': now,\n 'SHIPPING_METHOD': item.shipping_method.name if item.shipping_method else None,\n 'SHIPPING_KIND': item.shipping_kind,\n # 'ORDER_FOR': 'purchase' if not item.rentalitem else 'rental'\n }\n vars_items.append(vars_item)\n\n self.msg.global_merge_vars = {\n 'FULL_NAME': order.user.profile.fullname,\n 'CONFIRM_DATE': now,\n 'ORDER_ID': order.id,\n 'TOTAL': order.total,\n 'SUBTOTAL': order.subtotal,\n\n # 'PAYMENT_METHOD': self.subtotal,\n 'SHIPPING_ADDRESS': {\n 'FIRST_NAME': order.shipping_address.first_name,\n 'LAST_NAME': order.shipping_address.last_name,\n 'ADDRESS1': order.shipping_address.address1,\n 'ADDRESS2': order.shipping_address.address2,\n 'ZIP_CODE': order.shipping_address.zip_code,\n 'CITY': order.shipping_address.city.name_std,\n 'STATE': order.shipping_address.state.name_std,\n 'COUNTRY': order.shipping_address.country.name,\n 'PHONE': order.shipping_address.phone\n },\n 'ITEMS': vars_items\n }\n\n return self._send(to=[self.user.email], template_name=self.ETPL_ORDER_CONFIRM)", "def withdraw_currency(self, coin, amount, wallet):\r\n\r\n url = self.url_base + 'id=' + self.user_id + '&email=' + self.email + '&password=' + self.password + \\\r\n '&manualwithdraw=' + coin + '&amount=' + str(amount) + '&wallet=' + wallet\r\n\r\n if self.debug == 1:\r\n print url\r\n\r\n try:\r\n result = requests.get(url, timeout=self.timeout)\r\n except requests.exceptions.RequestException as exception:\r\n print exception\r\n return \"ERROR\"\r\n\r\n return result.text", "async def transfer(self, ctx, amount : int, user : discord.Member):\n try:\n if await self.usercheck('levels', ctx.message.author) is False:\n await self._create_user(ctx.message.author.id)\n except:\n pass\n if amount < 10:\n await ctx.send(\"Minimum send price is $10\")\n return\n if user.bot:\n await ctx.send(\"You can't send credits to bots.\")\n return\n elif user == ctx.message.author:\n await ctx.send(\"You cant send credits to yourself.\")\n return\n else:\n if await self.usercheck('economy', ctx.message.author) is False:\n await ctx.send(\"You don't have a bank account...\")\n return\n elif await self.usercheck('economy', user) is False:\n await ctx.send(f\"{user.name} has no bank account...\")\n return\n else:\n x = await self.execute(f\"SELECT balance FROM economy WHERE userid = {ctx.message.author.id}\", isSelect=True)\n author_balance = int(x[0])\n x = await self.execute(f\"SELECT balance FROM economy WHERE userid = {user.id}\", isSelect=True)\n user_balance = int(x[0])\n if (author_balance - amount) < 0:\n await ctx.send(\"You don't have that much to spend.\")\n return\n else:\n await self.execute(f\"UPDATE economy SET balance = {amount + user_balance} WHERE userid = {user.id}\", commit=True)\n await self.execute(f\"UPDATE economy SET balance = {author_balance - amount} WHERE userid = {ctx.message.author.id}\", commit=True)\n await ctx.send(f\"Send `{amount}` to {user.mention}!\")\n try:\n await user.send(f\"{ctx.message.author.name} has sent you ${amount}.\")\n except:\n pass", "def withdraw(request):\n if request.method == 'POST':\n amount = request.POST.get('amount')\n\n if not amount:\n request.session['error'] = {'error_msg': 'Необходимо ввести сумму', 'error_back': 'withdraw'}\n\n return redirect('error')\n\n amount = int(amount)\n card_id = request.session.get('card_id')\n card = CardAccount.objects.get(pk=card_id)\n\n if amount <= card.balance:\n operation = Transaction()\n operation.card = card\n operation.operation_code = Transaction.WITHDRAW\n operation.withdraw_amount = amount\n operation.save()\n\n card.balance -= amount\n card.save()\n\n request.session['transaction_id'] = operation.pk\n\n return redirect('report')\n\n else:\n request.session['error'] = {'error_msg': 'Недостаточно денежных средств на карте', 'error_back': 'withdraw'}\n\n return redirect('error')\n\n return render(request, 'cash_machine/withdraw.tpl')", "def withdraw(self, date: datetime.date, amount: float) -> float:\n raise NotImplementedError()", "async def donate(self):\n await self.bot.say(\"You can donate to me here:\\n<https://www.paypal.me/avrae>\\n\\u2764\")", "def withdraw(self, account, amount):\n # Take the amount of money our of the account\n self.accounts[account].balance -= amount\n # Return the amount of money we withdrew\n return amount", "def update_account_balance_after_transaction(sender, instance, **kwargs):\n instance.account.update_current_balance()\n if (\n instance.trans_type == \"CREDIT\"\n and \"WAGER\" not in instance.title.upper()\n and \"ACCOUNT OPENING\" not in instance.title.upper()\n ):\n msg = f\"\"\"\n Confirmed Deposit of {instance.account.owner.country.currency_code} {instance.amount} to Account: {instance.account.owner.phonenumber} on {instance.created_at} \\n\n New Wallet Balance is : {instance.account.owner.country.currency_code} {instance.account.balance}\n \"\"\"\n rec = [sanitize_phone(instance.account.owner.phonenumber)]\n sms.send_message(recipients=rec, message=msg.strip())\n fcm_push_msg(\n uids=[instance.account.owner.id],\n message=msg.strip(),\n title=\"Business Payment\",\n )\n if (\n instance.trans_type == \"DEBIT\"\n and instance.amount > 0\n and \"PEER\" not in instance.title.upper()\n and \"ACCOUNT OPENING\" not in instance.title.upper()\n ):\n msg = f\"\"\"\n Confirmed on {instance.created_at} you have received {instance.account.owner.country.currency_code}: {instance.amount} from your BMB Wallet\n Your new wallet balance is {instance.account.owner.country.currency_code}: {instance.account.balance}\n \"\"\"\n rec = [sanitize_phone(instance.account.owner.phonenumber)]\n sms.send_message(recipients=rec, message=msg.strip())\n fcm_push_msg(\n uids=[instance.account.owner.id],\n message=msg.strip(),\n title=\"Business Payout\",\n )", "def withdraw(self, amount: int): # METHOD\n if self.check_withdraw(amount): # Inputs the output from check-withdrawal(amount)\n self._balance -= amount # Balance is decremented after each withdrawal\n return self._balance # New balance is shown\n else:\n message = f'Insufficient funds.'\n raise ValueError(message)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }