query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Returns the approximated Hessian of the function at the point x.
|
def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:
return hessian_approximation(self.f, x)
|
[
"def InvHessian(self,x):\n return linalg.inv(self.besthessian(x))",
"def evaluateHessian(fgradient,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros((len(x),len(x)))\n for i in range(0,len(x)):\n # Define new gradient function which returns only the i:th element of \n # the gradient in a point x.\n def fgradienti(x):\n return fgradient(x)[i]\n # Evaluate new funciton object and store the result as a row in the \n # hessian.\n row = evaluateGradient(fgradienti,x)\n res[i,:] = row\n return res",
"def getHessian(fgradient):\n def hess(x):\n return evaluateHessian(fgradient,x)\n return hess",
"def default_hessian(self, x, f):\r\n n = len(x)\r\n G = zeros((n,n))\r\n h = 1e-3\r\n \r\n for i in range(n):\r\n for j in range(n):\r\n\r\n G[i,j] = (f(x + h*self._basisvec(n,(i,j),(1,1))) - f(x + h*self._basisvec(n,(i,j), (1,-1)))\r\n - f(x + h*self._basisvec(n,(i,j),(-1,1))) + f(x + h*self._basisvec(n,(i,j),(-1,-1))))/(4*h**2)\r\n G = (G + G.T)/2\r\n return linalg.inv(G)",
"def hessian ( x0, calculate_cost_function, epsilon=1.e-5, linear_approx=False, *args ):\n # ``calculate_cost_function`` is the cost function implementation\n # The next line calculates an approximation to the first\n # derivative\n f1 = approx_fprime( x0, calculate_cost_function, epsilon, *args)\n\n # This is a linear approximation. Obviously much more efficient\n # if cost function is linear\n if linear_approx:\n f1 = np.matrix(f1)\n return f1.transpose() * f1 \n # Allocate space for the hessian\n n = x0.shape[0]\n hessian = np.zeros ( ( n, n ) )\n # The next loop fill in the matrix\n xx = x0\n for j in range( n ):\n xx0 = xx[j] # Store old value\n xx[j] = xx0 + epsilon # Perturb with finite difference\n # Recalculate the partial derivatives for this new point\n f2 = approx_fprime( x0, calculate_cost_function, epsilon, *args) \n hessian[:, j] = (f2 - f1)/epsilon # scale...\n xx[j] = xx0 # Restore initial value of x0 \n return hessian",
"def pdf_hessian(self, x):\n\n # Convert x or self.dof to arrays of the same size\n x, dof = self._check_param(x)\n\n pdf_hessian_ = numpy.zeros((x.size, x.size), dtype=float)\n\n for i in range(x.size):\n ex = 0.5 * (dof[i] + 1.0)\n coeff = gamma(ex) / \\\n (numpy.sqrt(dof[i] * numpy.pi) * gamma(0.5*dof[i]))\n k = 1.0 + x[i]**2 / dof[i]\n pdf_hessian_[i, i] = -(2.0 * coeff * ex / dof[i]) * \\\n (k**(-ex-1.0) -\n (ex+1.0) * x[i] * k**(-ex-2.0) * (2.0 * x[i] / dof[i]))\n\n if self.half:\n pdf_hessian_ = 2.0*pdf_hessian_\n\n return pdf_hessian_",
"def build_hessian(self,x,y):\n #Precalculate entries of hessian\n x_sum = np.sum(x)\n x_squared_sum = np.sum(x*x)\n y_sum = np.sum(y)\n y_squared_sum = np.sum(y*y)\n xy_sum = np.sum(x*y)\n n = len(x)\n\n hessian = np.array([\n [n,0,x_sum,y_sum,0,0],\n [0,n,0,0,x_sum,y_sum],\n [x_sum,0,x_squared_sum,xy_sum,0,0],\n [y_sum,0,xy_sum,y_squared_sum,0,0],\n [0,x_sum,0,0,x_squared_sum,xy_sum],\n [0,y_sum,0,0,xy_sum,y_squared_sum]\n ])\n return hessian",
"def check_hessian(f, hess_analytical, x0, delta = 1e-5, verbose = True):\n hessian_analytical = np.array(hess_analytical)\n hessian_num = hessian_numerical(f, x0, delta)\n if verbose:\n print('check_hessian: hessian_analytical = ', hessian_analytical)\n print('check_hessian: hessian_num = ', hessian_num)\n print('check_hessian: hessian difference = ', \n hessian_analytical - hessian_num)\n \n return np.sqrt(np.sum((hessian_analytical - hessian_num) ** 2))",
"def calc_lagrangian_hessian_at(self, x, lambda_) -> np.ndarray:\n\n def lagrangian(x_):\n return self.calc_lagrangian_at(x_, lambda_)\n\n return hessian_approximation(lagrangian, x)",
"def hessian(self, var, bayesianOptimizer):\n bayesianOptimizer.raiseAnError(NotImplementedError,'Hessian is not yet developed for this acqusition function')",
"def hess(self, x, t, hdata):\n\n # Check arguments for consistency\n errstring = self.consist('mlp', x, t)\n if errstring != None:\n raise Exception(errstring)\n\n if computeData:\n # Data term in Hessian needs to be computed\n hdata = self.datahess(x, t)\n\n h, hdata = self.hbayes(hdata)\n\n # Sub-function to compute data part of Hessian",
"def update_gradient_hessian(self, X, y, sample_weight):",
"def hessian_neg_log_density(self, x):\n hessian = np.zeros((len(x), len(x)))\n\n for prior in self.prior_list:\n hessian[prior['index'], prior['index']] -= prior['density_ddx'](\n x[prior['index']]\n )\n\n return hessian",
"def hessian(poly: PolyLike) -> ndpoly:\n return gradient(gradient(poly))",
"def heaviside_derivative(x):\n return np.zeros(x.shape)",
"def eval_numerical_gradient(f, x, verbose=False, h=1.e-7):\n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad",
"def posterior_variance_hessian(self, x: ndarray) -> ndarray:\n _, gp_variance_hessian = self._gp.posterior_hessians(x)\n\n return gp_variance_hessian",
"def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n\n # fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n original_value = x.copy()[ix]\n x[ix] = original_value + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = original_value - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = original_value # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print (ix, grad[ix], fxph, fxmh)\n it.iternext() # step to next dimension\n\n return grad",
"def hessian(X, theta, reg):\n n = len(X)\n d = len(X[0, :])\n h_vec = np.array([h(x, theta) for x in X])\n w = h_vec * (1 - h_vec)\n \n hess = np.zeros((d, d))\n for i in range(n):\n hess += np.outer(w[i] * X[i], X[i])\n hess += n * reg * np.eye(d)\n return hess"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the constraint function values at the point x.
|
def calc_constraints_at(self, x: np.ndarray) -> np.ndarray:
return np.array([c(x) for c in self.constraints])
|
[
"def calc_constraint_at(self, i: int, x: np.ndarray) -> float:\n return self.constraints[i](x)",
"def calc_constraints_jacobian_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([gradient_approximation(c.c, x) for c in self.constraints])",
"def constraint(self, x):\n return x[0]",
"def get_interpolated_value(self, x):\n if len(self.ydim) == 1:\n return get_linear_interpolated_value(self.x, self.y, x)\n else:\n return [get_linear_interpolated_value(self.x, self.y[:, k], x)\n for k in range(self.ydim[1])]",
"def calc_constraint_gradient_at(self, i: int, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.constraints[i], x)",
"def f(x):\n x = x.detach().cpu().numpy()\n res_total = np.zeros((*x.shape[:-1], 1))\n\n for i in range(len(x)):\n parameters = {\n p: float(x[i, ..., j])\n for j, p in enumerate(self.search_space.parameters)\n }\n observation_features = [\n ObservationFeatures(parameters=parameters)\n ]\n\n for t in reversed(list(self.transforms.values())):\n observation_features = t.untransform_observation_features(\n observation_features\n )\n params = observation_features[0].parameters\n\n mean = list(self.transforms.values())[-1].Ymean\n std = list(self.transforms.values())[-1].Ystd\n res = (self.d_function(params) - mean[self.d_metric])\n res = res / std[self.d_metric]\n res_total[i, ..., 0] = res\n\n return torch.as_tensor(res_total)",
"def coordinates(self, x):\n K = self.number_field()\n V, from_V, to_V = K.absolute_vector_space()\n try:\n return self.free_module().coordinate_vector(to_V(K(x)))\n except ArithmeticError as e:\n raise TypeError(e)",
"def residual_jacobian(self, x):\n sres = np.zeros((len(self.prior_list), len(x)))\n for iprior, prior in enumerate(self.prior_list):\n sres[iprior, prior['index']] = prior['residual_dx'](\n x[prior['index']]\n )\n\n return sres",
"def func(self,x):\n return dot(self.par, self.__fit_func__(x))",
"def get_solution(self):\n # TODO: enter your code here\n solution = 0.0\n f = domain[:, 0]\n for idx, item in enumerate(self.x):\n if self.v[idx][0] >= self.v_min and self.f[idx][0] > f:\n f = self.f[idx][0]\n solution = item[0]\n return solution",
"def __call__(self, x):\n assert (x>=self.xlimits[0]) & (x<self.xlimits[1]), \"x is out of bounds.\"\n ix = self.get_index(x)\n return self.value[ix]",
"def point_cost(self, x):\n fp_vect = self.fp_to_vector(self.fixed_points)\n cost = self.bgfs_cost(np.append(fp_vect, x))\n return cost",
"def constraint(self, x) -> float:\n portfolio_capacity = self.portfolio_capacity\n for i in range(len(x)):\n portfolio_capacity -= x[i] * self.share_price[i]\n return portfolio_capacity",
"def jacobian(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=np.object)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad))\n return np.vstack(\n [y.derivatives() for y in y_ad.flat]).reshape(y_ad.shape + (-1,))",
"def acquisition_function(self, x):\n\n # TODO: enter your code here\n f_mean, f_std = self.f_model.predict(x.reshape(1, -1), return_std=True)\n v_mean, v_std = self.v_model.predict(x.reshape(1, -1), return_std=True)\n\n # Expected speed\n v = v_mean[0]\n\n # Relu around 1.2 instead of 0.0\n if v < self.v_min:\n # Set to lowest value\n af_value = domain[:, 0]\n else:\n # Upper Confidence Bound, max exploration\n af_value = f_mean[0] + f_std[0]\n return af_value",
"def calc_gradient_at(self, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.f, x)",
"def deterministic_stochastic_constraints_and_gradients(self, x):\n det_stoch_constraints = tuple(np.ones(self.dim) - self.factors[\"error_prob\"])\n det_stoch_constraints_gradients = ((0,),)\n return det_stoch_constraints, det_stoch_constraints_gradients",
"def get_fval(self, x: np.ndarray) -> float:\n fval = self(x, (0,), MODE_FUN)\n return fval",
"def critical_points(self):\n from sage.calculus.calculus import maxima\n x = QQ[self.default_variable()].gen()\n crit_pts = []\n for (a,b), f in self.list():\n for root in maxima.allroots(SR(f).diff(x)==0):\n root = float(root.rhs())\n if a < root < b:\n crit_pts.append(root)\n return crit_pts"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the constraint function value of constraint i at the point x.
|
def calc_constraint_at(self, i: int, x: np.ndarray) -> float:
return self.constraints[i](x)
|
[
"def constraint(self, x):\n return x[0]",
"def calc_constraint_gradient_at(self, i: int, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.constraints[i], x)",
"def calc_constraints_jacobian_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([gradient_approximation(c.c, x) for c in self.constraints])",
"def calc_constraints_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([c(x) for c in self.constraints])",
"def cost_function(x):\n return f(x)",
"def _cei(x, gp_objective, xi, gp_constraint, constraint_upper):\n ei = UtilityFunction._ei(x, gp_objective, xi)\n\n mean, std = gp_constraint.predict(x, return_std=True)\n z = (constraint_upper - mean) / std\n\n cumulative_probabiliy = norm.cdf(z)\n return cumulative_probabiliy * ei",
"def get_solution(self):\n # TODO: enter your code here\n solution = 0.0\n f = domain[:, 0]\n for idx, item in enumerate(self.x):\n if self.v[idx][0] >= self.v_min and self.f[idx][0] > f:\n f = self.f[idx][0]\n solution = item[0]\n return solution",
"def constant_func(i):\n return lambda x: i",
"def getDecisionFunction(\n self, i, alpha=..., svidx=...\n ) -> Tuple[retval, alpha, svidx]:\n ...",
"def getConstraint (self):\n\n return self.constraint",
"def func(self,x):\n return dot(self.par, self.__fit_func__(x))",
"def get_fval(self, x: np.ndarray) -> float:\n fval = self(x, (0,), MODE_FUN)\n return fval",
"def constraint(self, x) -> float:\n portfolio_capacity = self.portfolio_capacity\n for i in range(len(x)):\n portfolio_capacity -= x[i] * self.share_price[i]\n return portfolio_capacity",
"def __call__(self, x):\n assert (x>=self.xlimits[0]) & (x<self.xlimits[1]), \"x is out of bounds.\"\n ix = self.get_index(x)\n return self.value[ix]",
"def objective_function(constraint):\n\n def maximum_stress(solution):\n unflattened_solution = BridgeFactory.preprocess_solution(\n constraint, solution\n )\n load_map = Bridge._create_load_map(unflattened_solution)\n\n current_max_overstress = 0.0\n for row in range(Bridge.HEIGHT):\n for column in range(Bridge.WIDTH):\n cell_overstress = (\n load_map[row][column] - unflattened_solution[row][column]\n )\n if cell_overstress > current_max_overstress:\n current_max_overstress = cell_overstress\n return current_max_overstress\n\n return maximum_stress",
"def feqc(x, f, fstar):\n s = f[0] - fstar\n for i in range(min(len(x), len(f))):\n if (f[i] - fstar) * s < 0.0:\n # Linear interpolation\n dxf = (f[i] - f[i-1]) / (x[i] - x[i-1])\n xstar = x[i-1] + (fstar - f[i-1]) / dxf\n istar = i\n return xstar, istar\n\n # We get to the end and cannot find the root\n return None, None",
"def ApproximateJacobian(f, x, dx=1e-6):\n try:\n n = len(x)\n except TypeError:\n n = 1\n fx = f(x)\n Df_x = N.matrix(N.zeros((n,n)))\n for i in range(n):\n\n v = N.matrix(N.zeros((n,1)))\n v[i,0] = dx\n Df_x[:,i] = (f(x + v) - fx)/v[i,0]\n return Df_x",
"def _solve_column_with_cv(self, X, i):\n X_train, X_test = train_test_split(X, test_size=0.4, random_state=0)\n S_train = self.calculate_scaled_covariance(X_train)\n S_test = np.cov(X_test, rowvar=False)\n # Calculate the lambdas to check\n lambdas = np.arange(0.005, 51)\n lambdas = lambdas/50\n test_errors = []\n for l in lambdas:\n beta = self._solve_column_problem(S_train, i, l)\n error = self.column_likelihood_function(S_test, beta, i)\n test_errors.append(error)\n\n min_err_i = np.argmin(test_errors)\n\n best_l = lambdas[min_err_i]\n S = np.cov(X, rowvar=False)\n return self._solve_column_problem(S, i, best_l), best_l",
"def residual_jacobian(self, x):\n sres = np.zeros((len(self.prior_list), len(x)))\n for iprior, prior in enumerate(self.prior_list):\n sres[iprior, prior['index']] = prior['residual_dx'](\n x[prior['index']]\n )\n\n return sres"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the constraints approximated Jacobian at the point x.
|
def calc_constraints_jacobian_at(self, x: np.ndarray) -> np.ndarray:
return np.array([gradient_approximation(c.c, x) for c in self.constraints])
|
[
"def jacobian(self, x):\n return self.jnz",
"def jacobian(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=np.object)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad))\n return np.vstack(\n [y.derivatives() for y in y_ad.flat]).reshape(y_ad.shape + (-1,))",
"def calc_constraints_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([c(x) for c in self.constraints])",
"def calc_constraint_at(self, i: int, x: np.ndarray) -> float:\n return self.constraints[i](x)",
"def residual_jacobian(self, x):\n sres = np.zeros((len(self.prior_list), len(x)))\n for iprior, prior in enumerate(self.prior_list):\n sres[iprior, prior['index']] = prior['residual_dx'](\n x[prior['index']]\n )\n\n return sres",
"def ApproximateJacobian(f, x, dx=1e-6):\n try:\n n = len(x)\n except TypeError:\n n = 1\n fx = f(x)\n Df_x = N.matrix(N.zeros((n,n)))\n for i in range(n):\n\n v = N.matrix(N.zeros((n,1)))\n v[i,0] = dx\n Df_x[:,i] = (f(x + v) - fx)/v[i,0]\n return Df_x",
"def constraint(self, x):\n return x[0]",
"def calc_constraint_gradient_at(self, i: int, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.constraints[i], x)",
"def J_dense(x): # dense Jacobian\n return np.array([[1.004, -1e3*x[2], -1e3*x[1]],\n [-0.004, 1.0 + 1e3*x[2] + 60.0*x[1], 1e3*x[1]],\n [0.0, -60.0*x[1], 1.0]])",
"def pdf_jacobian(self, x):\n\n # Convert x or self.dof to arrays of the same size\n x, dof = self._check_param(x)\n\n pdf_jacobian_ = numpy.zeros((x.size, ), dtype=float)\n\n for i in range(x.size):\n ex = 0.5 * (dof[i] + 1.0)\n coeff = gamma(ex) / \\\n (numpy.sqrt(dof[i] * numpy.pi) * gamma(0.5*dof[i]))\n k = 1.0 + x[i]**2 / dof[i]\n pdf_jacobian_[i] = -coeff * ex * k**(-ex-1.0) * (2.0*x[i]/dof[i])\n\n if self.half:\n pdf_jacobian_ = 2.0*pdf_jacobian_\n\n return pdf_jacobian_",
"def jacobian(self, flux):\n if not self._fields:\n raise ValueError(\"No fields.\")\n return Matrix(flux).jacobian(self._fields)",
"def numerical_jacobian(func, x: np.ndarray, *args):\n n = len(x)\n eps = 1e-3\n\n fx = func(x, *args).flatten()\n xpeturb=x.copy()\n jac = np.empty((len(fx), n))\n for i in range(n):\n xpeturb[i] = xpeturb[i]+eps\n jac[:,i] = (func(xpeturb, *args).flatten() - fx)/eps\n xpeturb[i]=x[i]\n\n return jac",
"def jacobian(self, xs, argdict=None, eps_f=5e-11):\n jac = []\n xs = np.asarray(xs)\n for i, x in enumerate(xs):\n # Determine the separation to use\n # Optimal one-pt separation is (eps_f*f/f'')^(1/2) ~ sqrt(eps_f)*x\n # Optimal two-pt separation is (eps_f*f/f''')^(1/3) ~ cbrt(eps_f)*x\n h = np.zeros(len(xs))\n h[i] = (eps_f**(1./3.))*x\n\n # Evaluate the function\n # One-pt\n #f1 = rebound_2d_earth_res(xs...)\n # Two-pt\n f1 = self.residuals(xs-h, argdict)\n f2 = self.residuals(xs+h, argdict)\n\n # Difference\n # One-pt\n #(f2-f1)/h\n # Two-pt\n jac.append((f2-f1)*0.5/h[i])\n\n # put them together\n jac = np.asarray(jac)\n return jac",
"def jacobian_information(self):\n has_jacobian = True\n jacobian_free_solvers = [\"lm-scipy-no-jac\"]\n return has_jacobian, jacobian_free_solvers",
"def getJacobian(x,y,f,g,x0,y0):\r\n dx = sp.gradient(x)[1] # the derivative in the X direction\r\n dy = sp.gradient(y)[0] # the derivative in the Y direction\r\n dfy, dfx = sp.gradient(f) # the derivatives of f in the X and Y directions\r\n dgy, dgx = sp.gradient(g) # the derivatives of g in the X and Y directions\r\n\r\n # Now we need to get the values at the fixed point. We have to interpolate\r\n # the data from what we have.\r\n points = (x.flatten(), y.flatten())\r\n point = (x0, y0)\r\n dx0 = griddata(points, dx.flatten(), point)\r\n dy0 = griddata(points, dy.flatten(), point)\r\n dfdx0 = griddata(points, dfx.flatten(), point)\r\n dfdy0 = griddata(points, dfy.flatten(), point)\r\n dgdx0 = griddata(points, dgx.flatten(), point)\r\n dgdy0 = griddata(points, dgy.flatten(), point)\r\n\r\n #X, Y = x.flatten(), y.flatten()\r\n #xi,yi = plt.meshgrid([x0-1, x0, x0+1], [y0-1, y0, y0+1])\r\n #dx0 = griddata(X, Y, dx.flatten(), xi,yi)[1][1]\r\n #dy0 = griddata(X, Y, dy.flatten(), xi,yi)[1][1]\r\n #dfdx0 = griddata(X, Y, dfx.flatten(),xi,yi)[1][1]\r\n #dfdy0 = griddata(X, Y, dfy.flatten(),xi,yi)[1][1]\r\n #dgdx0 = griddata(X, Y, dgx.flatten(),xi,yi)[1][1]\r\n #dgdy0 = griddata(X, Y, dgy.flatten(),xi,yi)[1][1]\r\n\r\n return sp.array([[dfdx0/dx0, dfdy0/dy0], [dgdx0/dx0, dgdy0/dy0]])",
"def calc_jacobian(f, x, *args, use_autograd=False, eps = 1e-3):\n\tif use_autograd:\n\t\tJ = jacobian(f)(x, *args)\n\telse:\n\t\tJ = np.zeros((len(f(x, *args)), len(x)))\n\t\tfor i in range(len(x)):\n\t\t\teps_i = np.zeros_like(x)\n\t\t\teps_i[i] = eps\n\t\t\tJ[:,i] = (f(x+eps_i, *args) - f(x, *args))/eps\n\treturn J",
"def Jacobian(self,t,y):\n return -self.lambd",
"def jacobian(Q, d):\n return zeros([n, n])",
"def get_jacobian(self):\n return self.__jac_mod_system"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the approximated gradient values of constraint i at the point x.
|
def calc_constraint_gradient_at(self, i: int, x: np.ndarray) -> np.ndarray:
return gradient_approximation(self.constraints[i], x)
|
[
"def calc_gradient_at(self, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.f, x)",
"def _gradient_terms(self, x):\n # gradient of predictive variance of y\n dvar_dx = self.model.base_gp.kern.dKdiag_dx(x)\n dKxX_dx1 = self.model.base_gp.kern.dK_dx1(x, self.model.X)\n graminv_KXx = self._graminv_Kx(x)\n\n d_y_predictive_var_dx = dvar_dx - 2. * (dKxX_dx1 * np.transpose(graminv_KXx)).sum(axis=2, keepdims=False)\n\n # gradient of predictive covariance between integral and (x, y)-pair\n dqKx_dx = np.transpose(self.model.base_gp.kern.dqK_dx(x))\n qKX_graminv = self._qK_graminv() # (1, N)\n dKXx_dx2 = self.model.base_gp.kern.dK_dx2(self.model.X, x)\n d_predictive_cov_dx = dqKx_dx - np.dot(qKX_graminv, np.transpose(dKXx_dx2))[0, :, :]\n\n return np.transpose(d_y_predictive_var_dx), d_predictive_cov_dx",
"def GetGradient(self, x):\n return _handle.OperatorHandle_GetGradient(self, x)",
"def calc_constraint_at(self, i: int, x: np.ndarray) -> float:\n return self.constraints[i](x)",
"def gradient(self, i: int) -> float:\n assert -1 <= i < len(self.times)\n if i == -1:\n i = 0\n elif i == len(self.times) - 1:\n i = len(self.times) - 2\n return (self.values[i + 1] - self.values[i]) / (self.times[i + 1] - self.times[i])",
"def calc_constraints_jacobian_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([gradient_approximation(c.c, x) for c in self.constraints])",
"def grad(self, x, apply_bcs=True):\n if self.__objective_gradient is None:\n self.compile_objective_gradient()\n\n self.assign_vector(x, apply_bcs=apply_bcs)\n\n # Evaluate gradient and apply boundary conditions.\n g = assemble(self.__objective_gradient)\n # for bc in self.bcs:\n # bc.apply(g)\n\n return g.array()",
"def gradient(self):\n gx, gy = np.gradient(self.zz)\n return gx, gy",
"def f_grad(self, x):\n gradient = []\n\n for key in self.mean_functions:\n gradient.push(self.mean_functions[key][1](x))\n\n return np.array(gradient)",
"def gradient(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=AutoDiffXd)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad))\n # TODO(eric.cousineau): Consider restricting this in the future to only be\n # a scalar.\n assert y_ad.size == 1 and y_ad.ndim <= 1, (\n \"The output of `function` must be of a scalar or a vector of size 1\")\n y_ad = y_ad.reshape(()) # To scalar.\n return y_ad.item().derivatives()",
"def gradient(x):\n\t\tpass",
"def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n dl_dwx = self.softmax(_x) - _y\n dl_dx = np.matmul(_x.reshape(self.n_features,1), dl_dwx.reshape(1,self.k))\n _g = dl_dx\n return _g\n ### END YOUR CODE",
"def compute_gradient(self, model, x, y):\n\t\tpass",
"def evaluateGradient(function,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros(shape(x)) \n for i in range(0,len(x)):\n # Set the step on the correct variable.\n h[i] = epsilon\n # Approximate derivative using central difference approximation.\n res[i] = (function(x + h) - function(x - h)) / (2 * epsilon)\n # Reset step for next iteration.\n h[i] = 0.0\n return res",
"def eval_numerical_gradient(f, x, verbose=False, h=1.e-7):\n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad",
"def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n\n # fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n original_value = x.copy()[ix]\n x[ix] = original_value + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = original_value - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = original_value # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print (ix, grad[ix], fxph, fxmh)\n it.iternext() # step to next dimension\n\n return grad",
"def grad_input(self, x):\n # Compute the gradient of the mean function.\n d_kernel = self.kernel.grad_input(x, self.X)\n d_mean = d_kernel.T.dot(self.alpha)\n # Compute the gradient of the standard deviation function. It is\n # absolutely crucial to note that the predict method returns the\n # variance, not the standard deviation, of the prediction.\n sd = np.sqrt(self.predict(x)[1])\n K_cross = self.kernel.cov(x, self.X)\n M = spla.cho_solve((self.L, True), K_cross.T).ravel()\n d_sd = -d_kernel.T.dot(M) / sd\n return d_mean, d_sd",
"def eval_numberical_gradient(f, x):\n fx = f(x) #evaluate function value at original point\n grad = np.zeros(x.shape)\n h = 0.00001\n\n #iterate over all indexes in x\n #np.nditer: It inter as follows:\n #------------->\n #...\n #------------->\n #You should know that it.multi_index is the index\n #of the matrix. And do not forget to interate\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n print \"Now the iterate begins...\"\n while not it.finished:\n #evaluate function at x+h\n ix = it.multi_index\n old_value = x[ix]\n x[ix] = old_value + h #increment by h\n fxh = f(x) #evaluate f(x+h)\n x[ix] = old_value #restore to previous value!!\n #compute the partial derivative\n grad[ix] = (fxh - fx) / h #the slope\n print \"Now the fxh: \" + str(fxh) + \"\\tfx: \" + str(fx) \n print \"and the grad\"+ str(ix) + \"is \" + str(grad[ix]) + '\\n'\n it.iternext() #step to next dimension\n\n print \"Now the iterates ends...\"\n return grad",
"def grad(self, x):\n raise NotImplementedError('Grad oracle is not implemented.')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the lagrangian function value at the point x.
|
def calc_lagrangian_at(self, x, lambda_) -> float:
assert len(lambda_) == len(self.constraints)
result = self.calc_f_at(x)
for i, lambda_i in enumerate(lambda_):
result -= lambda_i * self.calc_constraint_at(i, x)
return result
|
[
"def calc_lagrangian_gradient_at(self, x, lambda_) -> np.ndarray:\n\n def lagrangian(x_):\n return self.calc_lagrangian_at(x_, lambda_)\n\n return gradient_approximation(lagrangian, x)",
"def calc_lagrangian_hessian_at(self, x, lambda_) -> np.ndarray:\n\n def lagrangian(x_):\n return self.calc_lagrangian_at(x_, lambda_)\n\n return hessian_approximation(lagrangian, x)",
"def lorentzian(params, x):\n return params[0] + params[1] / ((x - params[2]) ** 2 + (0.5 * params[3]) ** 2)",
"def lgamma(x):\n cof = [ 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5 ]\n y = x\n tmp = x + 5.5\n tmp -= ((x + 0.5) * math.log(tmp))\n ser = 1.000000000190015\n for j in range(len(cof)):\n y += 1\n ser += (cof[j] / y)\n return (-tmp + math.log(2.5066282746310005 * ser / x))",
"def L(self, x, c, gamma):\n return gamma / (np.pi * ((x - c) ** 2 + gamma ** 2))",
"def langevin(x):\n return np.coth(x)-1/x",
"def alpha(self, x):\n df = pd.concat([self.df[[\"l\", \"alpha\"]].rename(columns={\"l\":\"x\"}), self.df[[\"alpha\", \"r\"]].rename(columns={\"r\":\"x\"})]).sort_values(by=[\"x\", \"alpha\"])\n df = df.drop_duplicates()\n return np.interp(x, df.x, df.alpha, left=0., right=0.)",
"def lnprob4(x):\n global resid_f,alphaab,gmat,meta,cpn\n\n if x[0] > 0 and (alpha_min < x[1] < alpha_max) and x[2] > 0 and (alphared_min < x[3] < alphared_max):\n return (logL2(resid_f,alphaab,times_f,gmat,meta,cpn,A=x[0],alpha=x[1],Ared=x[2],alphared=x[3])\n - math.log(alpha_max - alpha_min) - math.log(alphared_max - alphared_min))\n else:\n return -N.inf",
"def log_gamma(x):\n return math.lgamma(x)",
"def fourier_series_value(self,x,L):\n xnew = x - int(RR(x/(2*L)))*2*L\n endpts = self.end_points()\n if xnew == endpts[0] or xnew == endpts[-1]:\n return (self.functions()[0](endpts[0]) + self.functions()[-1](endpts[-1]))/2\n else:\n return self(xnew)",
"def likelihood(self, x: np.ndarray) -> np.ndarray:",
"def calculate_lagrange_polynomials(x_values):\n lagrange_polynomials = []\n monomials = [Polynomial.monomial(1, FieldElement.one()) -\n Polynomial.monomial(0, x) for x in x_values]\n numerator = prod(monomials)\n for j in tqdm(range(len(x_values))):\n # In the denominator, we have:\n # (x_j-x_0)(x_j-x_1)...(x_j-x_{j-1})(x_j-x_{j+1})...(x_j-x_{len(X)-1})\n denominator = prod([x_values[j] - x for i, x in enumerate(x_values) if i != j])\n # Numerator is a bit more complicated, since we need to compute a poly multiplication here.\n # Similarly to the denominator, we have:\n # (x-x_0)(x-x_1)...(x-x_{j-1})(x-x_{j+1})...(x-x_{len(X)-1})\n cur_poly, _ = numerator.qdiv(monomials[j].scalar_mul(denominator))\n lagrange_polynomials.append(cur_poly)\n return lagrange_polynomials",
"def laxity(ev: SessionInfo) -> float:\n lax = (ev.estimated_departure - iface.current_time) - (\n iface.remaining_amp_periods(ev) / iface.max_pilot_signal(ev.station_id)\n )\n return lax",
"def make_lagrangian(func, equality_constraints):\n def init_multipliers(params, *args, **kwargs):\n h = jax.eval_shape(equality_constraints, params, *args, **kwargs)\n multipliers = tree_util.tree_map(lambda x: np.zeros(x.shape, x.dtype), h)\n return params, multipliers\n\n def lagrangian(params, multipliers, *args, **kwargs):\n h = equality_constraints(params, *args, **kwargs)\n return -func(params, *args, **kwargs) + math.pytree_dot(multipliers, h)\n\n def get_params(opt_state):\n return opt_state[0]\n\n return init_multipliers, lagrangian, get_params",
"def lj_p(r_a):\r\n \r\n func = ((r_a)**(-12)-(r_a)**(-6))\r\n \r\n return func",
"def LMLgrad_X(self):\n return _core.CGPbase_LMLgrad_X(self)",
"def get_fval(self, x: np.ndarray) -> float:\n fval = self(x, (0,), MODE_FUN)\n return fval",
"def goldstein_func(x):\n if not x.shape[1] == 2:\n raise IndexError('Goldstein function only takes two-dimensional '\n 'input.')\n if not np.logical_and(x >= -2, x <= 2).all():\n raise ValueError('Input for Goldstein-Price function must be within '\n '[-2, 2].')\n\n x_ = x[:, 0]\n y_ = x[:, 1]\n j = ((1 + (x_ + y_ + 1)**2.0\n * (19 - 14*x_ + 3*x_**2.0 - 14*y_ + 6*x_*y_ + 3*y_**2.0))\n * (30 + (2*x_ - 3 * y_)**2.0\n * (18 - 32*x_ + 12*x_**2.0 + 48*y_ - 36*x_*y_ + 27*y_**2.0)))\n\n return j",
"def L_x(self, Lbol):\n #Coefficients from Table 1 for the UV luminosity.\n c1, k1, c2, k2 = 10.83, 0.28, 6.08, -0.020\n #Implementation of equation (5).\n x = Lbol/(1e10*L_sun)\n bc = c1*x**k1 + c2*x**k2\n return Lbol/bc"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the approximated lagrangian gradient with regard to x at the point x.
|
def calc_lagrangian_gradient_at(self, x, lambda_) -> np.ndarray:
def lagrangian(x_):
return self.calc_lagrangian_at(x_, lambda_)
return gradient_approximation(lagrangian, x)
|
[
"def calc_gradient_at(self, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.f, x)",
"def GetGradient(self, x):\n return _handle.OperatorHandle_GetGradient(self, x)",
"def LMLgrad_X(self):\n return _core.CGPbase_LMLgrad_X(self)",
"def gradient(x):\n\t\tpass",
"def f_grad(self, x):\n return np.zeros((x.shape[0]))",
"def grad(self, x):\n raise NotImplementedError('Grad oracle is not implemented.')",
"def f_grad(self, x):\n gradient = []\n\n for key in self.mean_functions:\n gradient.push(self.mean_functions[key][1](x))\n\n return np.array(gradient)",
"def calc_lagrangian_hessian_at(self, x, lambda_) -> np.ndarray:\n\n def lagrangian(x_):\n return self.calc_lagrangian_at(x_, lambda_)\n\n return hessian_approximation(lagrangian, x)",
"def gradient(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=AutoDiffXd)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad))\n # TODO(eric.cousineau): Consider restricting this in the future to only be\n # a scalar.\n assert y_ad.size == 1 and y_ad.ndim <= 1, (\n \"The output of `function` must be of a scalar or a vector of size 1\")\n y_ad = y_ad.reshape(()) # To scalar.\n return y_ad.item().derivatives()",
"def grad_input(self, x):\n # Compute the gradient of the mean function.\n d_kernel = self.kernel.grad_input(x, self.X)\n d_mean = d_kernel.T.dot(self.alpha)\n # Compute the gradient of the standard deviation function. It is\n # absolutely crucial to note that the predict method returns the\n # variance, not the standard deviation, of the prediction.\n sd = np.sqrt(self.predict(x)[1])\n K_cross = self.kernel.cov(x, self.X)\n M = spla.cho_solve((self.L, True), K_cross.T).ravel()\n d_sd = -d_kernel.T.dot(M) / sd\n return d_mean, d_sd",
"def calc_lagrangian_at(self, x, lambda_) -> float:\n assert len(lambda_) == len(self.constraints)\n\n result = self.calc_f_at(x)\n\n for i, lambda_i in enumerate(lambda_):\n result -= lambda_i * self.calc_constraint_at(i, x)\n\n return result",
"def evaluateGradient(function,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros(shape(x)) \n for i in range(0,len(x)):\n # Set the step on the correct variable.\n h[i] = epsilon\n # Approximate derivative using central difference approximation.\n res[i] = (function(x + h) - function(x - h)) / (2 * epsilon)\n # Reset step for next iteration.\n h[i] = 0.0\n return res",
"def gradient_descent_update(x, gradx, learning_rate):\n return x - learning_rate * gradx",
"def LMLgrad(self):\n return _core.CGPSum_LMLgrad(self)",
"def LMLgrad(self):\n return _core.CGPkronSum_LMLgrad(self)",
"def _gradient_terms(self, x):\n # gradient of predictive variance of y\n dvar_dx = self.model.base_gp.kern.dKdiag_dx(x)\n dKxX_dx1 = self.model.base_gp.kern.dK_dx1(x, self.model.X)\n graminv_KXx = self._graminv_Kx(x)\n\n d_y_predictive_var_dx = dvar_dx - 2. * (dKxX_dx1 * np.transpose(graminv_KXx)).sum(axis=2, keepdims=False)\n\n # gradient of predictive covariance between integral and (x, y)-pair\n dqKx_dx = np.transpose(self.model.base_gp.kern.dqK_dx(x))\n qKX_graminv = self._qK_graminv() # (1, N)\n dKXx_dx2 = self.model.base_gp.kern.dK_dx2(self.model.X, x)\n d_predictive_cov_dx = dqKx_dx - np.dot(qKX_graminv, np.transpose(dKXx_dx2))[0, :, :]\n\n return np.transpose(d_y_predictive_var_dx), d_predictive_cov_dx",
"def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n dl_dwx = self.softmax(_x) - _y\n dl_dx = np.matmul(_x.reshape(self.n_features,1), dl_dwx.reshape(1,self.k))\n _g = dl_dx\n return _g\n ### END YOUR CODE",
"def rosegrad(x):\n \n # Return gradient of Rosenbrock's test function\n\n if x.ndim==1:\n g = np.zeros(2)\n g[0] = -400 * np.dot((x[1] - x[0]*x[0]), x[0]) - 2 * (1 - x[0])\n g[1] = 200 * (x[1] - x[0]*x[0])\n\n elif x.ndim==2:\n nrows = x.shape[0]\n g = np.zeros((nrows,2))\n g[:,0] = -400 * np.dot((x[:,1] - x[:,0]*x[:, 0]), x[:,0]) - 2 * (1 - x[:,0])\n g[:,1] = 200 * (x[:,1] - x[:,0]*x[:,0])\n else:\n raise Exception(\"Dimension of input x must be 1 or 2.\")\n\n return g",
"def grad_ReLU(self):\n grad = np.zeros(self.x.shape)\n grad[self.x <= 0] = 0\n grad[self.x > 0] = 1\n return grad"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the approximated lagrangian Hessian with regard to x at the point x.
|
def calc_lagrangian_hessian_at(self, x, lambda_) -> np.ndarray:
def lagrangian(x_):
return self.calc_lagrangian_at(x_, lambda_)
return hessian_approximation(lagrangian, x)
|
[
"def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n return hessian_approximation(self.f, x)",
"def InvHessian(self,x):\n return linalg.inv(self.besthessian(x))",
"def default_hessian(self, x, f):\r\n n = len(x)\r\n G = zeros((n,n))\r\n h = 1e-3\r\n \r\n for i in range(n):\r\n for j in range(n):\r\n\r\n G[i,j] = (f(x + h*self._basisvec(n,(i,j),(1,1))) - f(x + h*self._basisvec(n,(i,j), (1,-1)))\r\n - f(x + h*self._basisvec(n,(i,j),(-1,1))) + f(x + h*self._basisvec(n,(i,j),(-1,-1))))/(4*h**2)\r\n G = (G + G.T)/2\r\n return linalg.inv(G)",
"def getHessian(fgradient):\n def hess(x):\n return evaluateHessian(fgradient,x)\n return hess",
"def evaluateHessian(fgradient,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros((len(x),len(x)))\n for i in range(0,len(x)):\n # Define new gradient function which returns only the i:th element of \n # the gradient in a point x.\n def fgradienti(x):\n return fgradient(x)[i]\n # Evaluate new funciton object and store the result as a row in the \n # hessian.\n row = evaluateGradient(fgradienti,x)\n res[i,:] = row\n return res",
"def hessian ( x0, calculate_cost_function, epsilon=1.e-5, linear_approx=False, *args ):\n # ``calculate_cost_function`` is the cost function implementation\n # The next line calculates an approximation to the first\n # derivative\n f1 = approx_fprime( x0, calculate_cost_function, epsilon, *args)\n\n # This is a linear approximation. Obviously much more efficient\n # if cost function is linear\n if linear_approx:\n f1 = np.matrix(f1)\n return f1.transpose() * f1 \n # Allocate space for the hessian\n n = x0.shape[0]\n hessian = np.zeros ( ( n, n ) )\n # The next loop fill in the matrix\n xx = x0\n for j in range( n ):\n xx0 = xx[j] # Store old value\n xx[j] = xx0 + epsilon # Perturb with finite difference\n # Recalculate the partial derivatives for this new point\n f2 = approx_fprime( x0, calculate_cost_function, epsilon, *args) \n hessian[:, j] = (f2 - f1)/epsilon # scale...\n xx[j] = xx0 # Restore initial value of x0 \n return hessian",
"def build_hessian(self,x,y):\n #Precalculate entries of hessian\n x_sum = np.sum(x)\n x_squared_sum = np.sum(x*x)\n y_sum = np.sum(y)\n y_squared_sum = np.sum(y*y)\n xy_sum = np.sum(x*y)\n n = len(x)\n\n hessian = np.array([\n [n,0,x_sum,y_sum,0,0],\n [0,n,0,0,x_sum,y_sum],\n [x_sum,0,x_squared_sum,xy_sum,0,0],\n [y_sum,0,xy_sum,y_squared_sum,0,0],\n [0,x_sum,0,0,x_squared_sum,xy_sum],\n [0,y_sum,0,0,xy_sum,y_squared_sum]\n ])\n return hessian",
"def check_hessian(f, hess_analytical, x0, delta = 1e-5, verbose = True):\n hessian_analytical = np.array(hess_analytical)\n hessian_num = hessian_numerical(f, x0, delta)\n if verbose:\n print('check_hessian: hessian_analytical = ', hessian_analytical)\n print('check_hessian: hessian_num = ', hessian_num)\n print('check_hessian: hessian difference = ', \n hessian_analytical - hessian_num)\n \n return np.sqrt(np.sum((hessian_analytical - hessian_num) ** 2))",
"def pdf_hessian(self, x):\n\n # Convert x or self.dof to arrays of the same size\n x, dof = self._check_param(x)\n\n pdf_hessian_ = numpy.zeros((x.size, x.size), dtype=float)\n\n for i in range(x.size):\n ex = 0.5 * (dof[i] + 1.0)\n coeff = gamma(ex) / \\\n (numpy.sqrt(dof[i] * numpy.pi) * gamma(0.5*dof[i]))\n k = 1.0 + x[i]**2 / dof[i]\n pdf_hessian_[i, i] = -(2.0 * coeff * ex / dof[i]) * \\\n (k**(-ex-1.0) -\n (ex+1.0) * x[i] * k**(-ex-2.0) * (2.0 * x[i] / dof[i]))\n\n if self.half:\n pdf_hessian_ = 2.0*pdf_hessian_\n\n return pdf_hessian_",
"def calc_lagrangian_gradient_at(self, x, lambda_) -> np.ndarray:\n\n def lagrangian(x_):\n return self.calc_lagrangian_at(x_, lambda_)\n\n return gradient_approximation(lagrangian, x)",
"def init_hessian(cls, x):\n\n x = numpy.ravel(x)\n\n # generate directions\n N = x.size\n M = (N*(N+1))/2\n L = (N*(N-1))/2\n S = numpy.zeros((N,M), dtype=x.dtype)\n\n s = 0\n i = 0\n for n in range(1,N+1):\n S[-n:,s:s+n] = numpy.eye(n)\n S[-n,s:s+n] = numpy.ones(n)\n s+=n\n i+=1\n S = S[::-1].T\n\n data = numpy.zeros(numpy.hstack([3,S.shape]), dtype=x.dtype)\n data[0] = x\n data[1] = S\n return cls(data)",
"def lr_loss_gradient_hessian(y, tx, w):\n loss, gradient = lr_compute_gradient(y, tx, w)\n # print(loss)\n\n return lr_compute_loss(y, tx, w), gradient, hessian(tx, w)",
"def hessian(poly: PolyLike) -> ndpoly:\n return gradient(gradient(poly))",
"def hessian_neg_log_density(self, x):\n hessian = np.zeros((len(x), len(x)))\n\n for prior in self.prior_list:\n hessian[prior['index'], prior['index']] -= prior['density_ddx'](\n x[prior['index']]\n )\n\n return hessian",
"def hessian(self, var, bayesianOptimizer):\n bayesianOptimizer.raiseAnError(NotImplementedError,'Hessian is not yet developed for this acqusition function')",
"def update_gradient_hessian(self, X, y, sample_weight):",
"def hessian(molecule: Ligand) -> Ligand:\n\n append_to_log(\"Starting hessian calculation\")\n\n if molecule.bonds_engine in [\"g09\", \"g16\"]:\n qm_engine = Gaussian(molecule)\n\n # Use the checkpoint file as this has higher xyz precision\n try:\n copy(\n os.path.join(molecule.home, \"03_qm_optimise\", \"lig.chk\"), \"lig.chk\"\n )\n result = qm_engine.generate_input(\n \"qm\", hessian=True, restart=True, execute=molecule.bonds_engine\n )\n except FileNotFoundError:\n append_to_log(\n \"qm_optimise checkpoint not found, optimising first to refine atomic coordinates\",\n msg_type=\"minor\",\n )\n result = qm_engine.generate_input(\n \"qm\", optimise=True, hessian=True, execute=molecule.bonds_engine\n )\n\n if not result[\"success\"]:\n raise HessianCalculationFailed(\n \"The hessian was not calculated check the log file.\"\n )\n\n hessian = qm_engine.hessian()\n\n else:\n hessian = QCEngine(molecule).call_qcengine(engine=\"psi4\", driver=\"hessian\")\n np.savetxt(\"hessian.txt\", hessian)\n\n molecule.hessian = hessian\n\n append_to_log(f\"Finishing Hessian calculation using {molecule.bonds_engine}\")\n\n return molecule",
"def Piecewise_Linear_Interpolation_Function(x,data):\n #print(x)\n if x>data[-1][0]:\n return data[-1][1]\n for i in range(len(data)):\n #print(i,data[i][0])\n if (data[i][0]<=x and data[i+1][0]>=x):\n index=i\n break\n x1=data[index][0]\n y1=data[index][1]\n x2=data[index+1][0]\n y2=data[index+1][1]\n return y1*(x-x2)/(x1-x2)+y2*(x-x1)/(x2-x1)",
"def calculate_logistic_gradient_hessian(y, tx, w):\n loss, gradient = calculate_logistic_gradient(y, tx, w)\n return loss, gradient, calculate_hessian(tx, w)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Destandardizes x based on the original problem.
|
def destandardize_x(self, x: np.ndarray) -> np.ndarray:
n = self.original_n
x_plus = x[:n] # take x_+ part
x_neg = x[n:n + len(self.indices_of_non_positive_constrained_vars)]
# subtract x_- from x_+ to get x
x_plus[self.indices_of_non_positive_constrained_vars] -= x_neg
return x_plus
|
[
"def transform_x(self, x):\n raise NotImplementedError()",
"def transform_x(self, x):\n if len(self.x_cols) == 0:\n return x\n self.logging('x shape: {}'.format(_shape(x)), level=logging.DEBUG)\n x_new = x.copy()\n if len(self.x_cols) > 0:\n x_new.drop(list(set(x_new.columns.values.tolist()) - set(self.x_cols)), axis=1, inplace=True)\n self.logging(' shape: {}'.format(_shape(x_new)), level=logging.DEBUG)\n return x_new",
"def descale_coords(self, x):\n pass",
"def inverse_transform_x(self, x):\n raise NotImplementedError()",
"def inverse_transform_x(self, x):\n self.logging('removing x columns is not invertible as transformation', level=logging.WARNING)\n return x",
"def fit_transform_x(self, x):\n return self.fit_x(x).transform_x(x)",
"def normalise(self) -> None:\n _ma.stochastify_d(self.plast)\n _ma.stochastify_d(self.initial)",
"def standardize_1(x):\n # the features left that are meaningful and useful for training\n feature_left = np.array([0, 1, 2, 3, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 29])\n left_x = np.zeros((x.shape[0], len(feature_left)))\n left_x[:, :] = x[:, feature_left]\n return standardize_0123_helper(left_x)",
"def dp_unnormalise(y,normalisation_parameters): \n y = y * normalisation_parameters['std']\n y = y + normalisation_parameters['mean']\n return y",
"def standardize_23(x):\n # the features left that are meaningful and useful for training\n feature_left = np.delete(np.arange(30), 22)\n left_x = np.zeros((x.shape[0], len(feature_left)))\n left_x[:, :] = x[:, feature_left]\n return standardize_0123_helper(left_x)",
"def correct(self, x):\n return self.compensation_std * x + self.compensation_mean",
"def normalize_X(self,X):\r\n X_n = X.copy()\r\n for i in range(X_n.shape[1]):\r\n X_n[:, i] = (X_n[:, i] - self.lower_bound[i]) / (self.upper_bound[i] - self.lower_bound[i])\r\n return X_n",
"def rescale(x, min=0, max=1):\n n_features = x.shape[-1]\n x_copy = np.copy(x)\n for f in range(n_features):\n c = x.obs[:, :, f]\n c_min = np.min(c)\n c_max = np.max(c)\n x_copy[:, :, f] = (c - c_min) / (c_max - c_min) * (max - min) + min\n return x_copy",
"def unflatten(self, x):\n pass",
"def rediscretize(src_dx,src_y,n_sigma,frac_samples=None,intensive=True):\n \n #seg_sel_z=np.nonzero( self.hydro_z.seg_to_2d_element==elt )[0]\n #seg_v_z=vol_z[seg_sel_z] # that's src_dx\n # seg_scal_z=scalar_z[seg_sel_z] # that's src_y\n src_dx_sum=src_dx.sum()\n if src_dx_sum==0:\n assert np.all(src_y==0.0)\n return np.zeros(n_sigma)\n \n src_dx = src_dx / src_dx_sum # normalize to 1.0 volume for ease\n src_xsum=np.cumsum(src_dx)\n\n # would like to integrate that, finding s_i = 10 * (Int i/10,(i+1)/10 s df)\n # instead, use cumsum to handle the discrete integral then interp to pull\n # out the individual values\n\n if intensive:\n src_y_ext = src_y * src_dx\n else:\n src_y_ext = src_y\n \n cumul_mass =np.concatenate( ( [0],\n np.cumsum(src_y_ext) ) )\n frac_sum=np.concatenate( ( [0], src_xsum ) )\n if frac_samples is None:\n frac_samples=np.linspace(0,1,n_sigma+1)\n \n dest_y = np.diff(np.interp(frac_samples,\n frac_sum,cumul_mass) )\n if intensive:\n dest_y *= n_sigma # assumes evenly spread out layers \n return dest_y",
"def transform(self, x):\r\n x_np = self.ensure_numpy(x)\r\n return np.delete(x_np, self.features_to_remove, axis=1)",
"def prune(self, x):\n if self.stride == (1, 2):\n x = x[:, :, 1 : -1, 0 : - 1]\n else:\n x = x[:, :, 0 : - 1, 0 : - 1]\n return x",
"def normalize(self, x):\n return self.mean_std_tracker.normalize(x)",
"def assign(self, x, value):\n x.value = value\n\n modified = []\n\n # Actualizamos el dominio de los vecinos, eliminando este valor\n for var in x.vecinos:\n # Solo modificamos los dominios de variables sin asignar\n if var.value == 0 and value in var.domain:\n var.domain -= {value}\n modified.append(var)\n \n return modified"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Factory method to create standardizing meta info for a problem that is already standardized. This represents a default instance in the sense of a nonstandardized meta info.
|
def from_pre_standardized(cls, problem: 'LinearConstraintsProblem') -> 'StandardizingMetaInfo':
return StandardizingMetaInfo(problem.n, np.empty(0, dtype=int), 0, problem.constraints)
|
[
"def _construct_metadata(self):\n if self.properties:\n return self._step_type_to_output_format_map[self.type]()\n return None",
"def _meta(self, field, **kwargs):\n try:\n return self.meta[field][0]\n except (KeyError, IndexError):\n if 'default' in kwargs:\n return kwargs['default']\n else:\n raise KeyError('Required metadata not found: %s' % field)",
"def create_problem(self, problem) -> None:\n for index, p in enumerate(self.problems):\n if p.course == problem.course and p.wale == problem.wale:\n # Same problems will only be added once\n if isinstance(problem, p.__class__):\n return\n\n # Errors should override warnings\n if (isinstance(p, NumberOfLoopsInNeedleWarning) and isinstance(problem, NumberOfLoopsInNeedleError)) or\\\n (isinstance(p, LoopHoldWarning) and isinstance(problem, LoopHoldError)) or\\\n (isinstance(p, RackingWarning) and isinstance(problem, RackingError)):\n self.problems[index] = problem\n return\n\n # Warnings should not be added if an error exists\n if (isinstance(problem, NumberOfLoopsInNeedleWarning) and isinstance(p, NumberOfLoopsInNeedleError)) or\\\n (isinstance(problem, LoopHoldWarning) and isinstance(p, LoopHoldError)) or\\\n (isinstance(problem, RackingWarning) and isinstance(p, RackingError)):\n return\n self.problems.append(problem)",
"def __missing__(self, key):\n self[key] = self.default_factory(key)\n return self[key]",
"def _make_meta(self, db_type: Optional[str] = None, col: Optional[str] = None) -> server_pb2.Meta:\n meta = server_pb2.Meta(project=self.project_id)\n if self.token is not None:\n meta.token = self.token\n if db_type is not None:\n meta.dbType = db_type\n if col is not None:\n meta.col = col\n return meta",
"def _parse_meta(self, meta):\n if isinstance(meta, astropy.io.fits.header.Header):\n meta = MetaDict(sunpy.io.header.FileHeader(meta))\n if isinstance(meta, sunpy.timeseries.TimeSeriesMetaData):\n new_meta = MetaDict()\n for m in meta.metas:\n new_meta.update(m)\n meta = new_meta\n return meta",
"def __init__(self, metricName='properMotion',\n m5Col='fiveSigmaDepth', mjdCol='expMJD', units='mas/yr',\n filterCol='filter', seeingCol='FWHMgeom', rmag=20.,\n SedTemplate='flat', badval= -666,\n atm_err=0.01, normalize=False,\n baseline=10., **kwargs):\n cols = [m5Col, mjdCol,filterCol,seeingCol]\n if normalize:\n units = 'ratio'\n super(ProperMotionMetric, self).__init__(col=cols, metricName=metricName, units=units,\n badval=badval, **kwargs)\n # set return type\n self.seeingCol = seeingCol\n self.m5Col = m5Col\n filters=['u','g','r','i','z','y']\n self.mags={}\n if SedTemplate == 'flat':\n for f in filters:\n self.mags[f] = rmag\n else:\n self.mags = utils.stellarMags(SedTemplate, rmag=rmag)\n self.atm_err = atm_err\n self.normalize = normalize\n self.baseline = baseline\n self.comment = 'Estimated uncertainty of the proper motion fit (assuming no parallax or that parallax is well fit). '\n self.comment += 'Uses visits in all bands, and generates approximate astrometric errors using the SNR in each visit. '\n if SedTemplate == 'flat':\n self.comment += 'Assumes a flat SED. '\n if self.normalize:\n self.comment += 'This normalized version of the metric represents the estimated uncertainty in the proper '\n self.comment += 'motion divided by the minimum uncertainty possible (if all visits were '\n self.comment += 'obtained on the first and last days of the survey). Values closer to 1 '\n self.comment += 'indicate more optimal scheduling.'",
"def _init_feature_meta(feature: pd.Series, column_type):\n if column_type in [ColumnTypeName.DATETIME, ColumnTypeName.TIMESPAN, ColumnTypeName.NAN]:\n common_logger.info(f\"Skip {column_type} type feature: {feature.name}\")\n feature_meta = None\n elif column_type in [ColumnTypeName.CATEGORICAL, ColumnTypeName.BINARY, ColumnTypeName.OBJECT,\n ColumnTypeName.STRING]:\n feature_meta = FeatureMeta(name=feature.name, type_=column_type, fill='', vocab=feature.dropna().unique())\n else:\n mean = feature.replace(to_replace=[np.inf, -np.inf], value=np.nan).mean()\n if np.isnan(mean):\n feature_meta = None\n common_logger.info(f\"Skip {column_type} feature {feature.name}, the mean value is {mean}\")\n else:\n feature_meta = FeatureMeta(name=feature.name, type_=column_type, fill=mean)\n\n return feature_meta",
"def test_custom_magic_to_default_inheritance(self):\n options = self.initialize_option_tree()\n options.Image.A.B = Options('style', alpha=0.2)\n\n obj = Image(np.random.rand(10, 10), group='A', label='B')\n\n # Before customizing...\n expected_obj = {'alpha': 0.2, 'cmap': 'hot', 'interpolation': 'nearest'}\n obj_lookup = Store.lookup_options('matplotlib', obj, 'style')\n self.assertEqual(obj_lookup.kwargs, expected_obj)\n\n custom_tree = {0: OptionTree(groups=Options._option_groups,\n style={'Image' : dict(clims=(0, 0.5))})}\n Store._custom_options['matplotlib'] = custom_tree\n obj.id = 0 # Manually set the id to point to the tree above\n\n # Customize this particular object\n expected_custom_obj = dict(clims=(0,0.5), **expected_obj)\n custom_obj_lookup = Store.lookup_options('matplotlib', obj, 'style')\n self.assertEqual(custom_obj_lookup.kwargs, expected_custom_obj)",
"def test_from_subclass_partial_info(self, tmpdir, instance):\n format = \"json\"\n cosmo = getattr(cosmology.realizations, instance)\n fname = tmpdir / f\"{instance}.{format}\"\n\n cosmo.write(str(fname), format=format)\n\n # partial information\n with open(fname, \"r\") as file:\n L = file.readlines()\n L[0] = L[0][:L[0].index('\"cosmology\":')]+L[0][L[0].index(', ')+2:]\n i = L[0].index('\"Tcmb0\":') # delete Tcmb0\n L[0] = L[0][:i] + L[0][L[0].index(', ', i)+2:]\n\n tempfname = tmpdir / f\"{instance}_temp.{format}\"\n with open(tempfname, \"w\") as file:\n file.writelines(L)\n\n # read with the same class that wrote fills in the missing info with\n # the default value\n got = cosmo.__class__.read(tempfname, format=format)\n got2 = Cosmology.read(tempfname, format=format, cosmology=cosmo.__class__)\n got3 = Cosmology.read(tempfname, format=format, cosmology=cosmo.__class__.__qualname__)\n\n assert (got == got2) and (got2 == got3) # internal consistency\n\n # not equal, because Tcmb0 is changed\n assert got != cosmo\n assert got.Tcmb0 == cosmo.__class__._init_signature.parameters[\"Tcmb0\"].default\n assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0.value) == cosmo\n # but the metadata is the same\n assert got.meta == cosmo.meta",
"def __init__(self, metaFileName='Meta.csv'):\n try:\n self.wavMeta = pd.read_csv(metaFileName)\n except Exception as e:\n print(e, \"\\n\", \"meta.csv does not exist, generating it.\")\n super().__init__()\n metaObj = PipelineMeta()\n self.wavMeta = metaObj.metaGenerator(save=True)",
"def __init__(self, info_yaml, basedir=''):\n f, url_parts = self._open(info_yaml, basedir)\n solution_yaml = f.read().decode('utf-8')\n self.basedir = urlunparse((url_parts.scheme, url_parts.netloc,\n os.path.dirname(url_parts.path),\n None, None, None))\n\n # create a markdown converter and modify it to rebase image links\n markdown = Markdown()\n markdown.inlinePatterns['image_link'] = _RebasedImageLinkPattern(\n self.basedir, IMAGE_LINK_RE, markdown)\n markdown.inlinePatterns['image_reference'] = _RebasedImageRefPattern(\n self.basedir, IMAGE_REFERENCE_RE, markdown)\n\n # import the solution's metadata\n info = yaml.load(solution_yaml)\n self.id = hashlib.md5(solution_yaml.encode('utf-8')).hexdigest()\n self.title = info['name']\n self.release = str(info['release'])\n if 'logo' in info:\n self.logo = self._make_absolute_path(info.get('logo'),\n self.basedir)[0]\n # in all the following fields, newlines are suppressed because they\n # are not rendered properly in Javascript strings by Django\n self.short_description = \\\n markdown.convert(info['short_desc']).replace('\\n', '')\n self.long_description = \\\n markdown.convert(info['long_desc']).replace('\\n', '')\n self.architecture = \\\n markdown.convert(info['architecture']).replace('\\n', '')\n self.design_specs = info.get('design_specs', [])\n self.heat_template = info['heat_template']\n self.env_file = info.get('env_file') # environments are optional",
"def _gen_meta(self):\n meta = {\"encode_dict\" : self.encode_dict,\n \"word_length\" : self.word_len,\n \"data_length\" : self.data_length,\n \"magic_number\" : MAGIC_NUMBER}\n return meta",
"def testFormatReasons(self):\n feature0 = Feature0()\n feature1 = Feature1()\n feature2 = Feature2()\n meta_feature = MetaFeatureValue(\n 'dummy',\n {feature0.name: feature0(1)(False),\n 'meta': MetaFeatureValue(\n 'meta',\n {feature1.name: feature1(2)(True),\n feature2.name: feature2(3)(True)})})\n self.assertEqual(meta_feature.reason, {'Feature0': 'reason0',\n 'Feature1': 'reason1',\n 'Feature2': 'reason2'})\n self.assertEqual(meta_feature.reason, meta_feature._reason)",
"def default_def_obj(name, kind, world) :\n world.add_relation(IsA(name, kind))",
"def _create_wrapper(cls_spec, element_info, myself):\n # only use the meta class to find the wrapper for BaseWrapper\n # so allow users to force the wrapper if they want\n if cls_spec != myself:\n obj = object.__new__(cls_spec)\n obj.__init__(element_info)\n return obj\n\n new_class = cls_spec.find_wrapper(element_info)\n obj = object.__new__(new_class)\n\n obj.__init__(element_info)\n\n return obj",
"def make_SG_D3PDObject (default_typeName,\n default_sgkey,\n default_prefix,\n default_object_name = None,\n default_allowMissing = False,\n default_getterClass = \\\n D3PDMakerCoreComps.SGObjGetterTool,\n allow_args = []):\n def make_obj (name, prefix, object_name,\n getter = None,\n sgkey = None,\n allowMissing = default_allowMissing,\n typeName = default_typeName,\n getterClass = default_getterClass):\n if sgkey == None: sgkey = default_sgkey\n if not getter:\n getter = getterClass (name + '_Getter',\n TypeName = typeName,\n SGKey = sgkey)\n from D3PDMakerConfig.D3PDMakerFlags import D3PDMakerFlags\n return D3PDMakerCoreComps.ObjFillerTool (name,\n Prefix = prefix,\n Getter = getter,\n ObjectName = object_name,\n AllowMissing=allowMissing,\n SaveMetadata = \\\n D3PDMakerFlags.SaveObjectMetadata())\n\n if default_object_name == None:\n default_object_name = default_typeName\n default_object_name = default_object_name.split('::')[-1]\n\n return D3PDObject (make_obj, default_prefix, default_object_name,\n sgkey = default_sgkey,\n typeName = default_typeName,\n allow_args = allow_args)",
"def make_info_template(self, item):\n if item.typ == u'Foto':\n return self.make_foto_info(item)\n elif item.typ == u'Föremål':\n return self.make_artwork_info(item)",
"def default_metadata(self):\n\n metadata = {\n \"__template_source__\": self.ctx[\"git_url\"],\n \"__template_ref__\": self.ctx[\"branch\"],\n \"__template_id__\": self.ctx[\"identifier\"],\n \"__namespace__\": self.ctx[\"project_namespace\"],\n \"__repository__\": self.ctx[\"project_repository\"],\n \"__sanitized_project_name__\": self.ctx[\"project_name_stripped\"],\n \"__project_slug__\": self.ctx[\"project_slug\"],\n \"__project_description__\": self.ctx[\"project_description\"],\n }\n\n cli_version = os.environ.get(\"RENKU_PROJECT_DEFAULT_CLI_VERSION\") or __version__\n if is_release(cli_version):\n metadata[\"__renku_version__\"] = cli_version\n\n return metadata"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the true application name from an inputted phrase Designed to find the closest app, account for poor listening
|
def get_app_name(app_names_list, app):
most_similar = 0.0
app_to_open = None
for app_name in app_names_list:
app_name_trimmed = app_name.split('.app')[0].lower()
similarity = SequenceMatcher(None, app_name_trimmed, app.lower()).ratio()
if similarity > most_similar:
app_to_open = app_name
most_similar = similarity
return app_to_open
|
[
"def get_closest_name(self, word):\n self.get_distances(word)\n name = min(self.distances, key=self.distances.get)\n return self.app_names[name] if self.distances[name] < 5 else \"\"",
"def get_app_name(app_id: str) -> str:\n return get_app_names([app_id]).get(app_id)",
"def get_app_name(self, event):\n app_name = [None, None, None]\n app_name[0] = event[\"client\"][\"userAgent\"][\"os\"]\n if event[\"target\"] is not None and len(event[\"target\"]) != 0:\n for target in event[\"target\"]:\n if target[\"type\"] == \"AppInstance\":\n app_name[1] = target[\"displayName\"]\n if target[\"type\"] == \"AppUser\":\n app_name[2] = target[\"alternateId\"]\n return app_name",
"def get_app_name(hwnd) -> Optional[str]:\n name = None\n _, pid = win32process.GetWindowThreadProcessId(hwnd)\n for p in c.query('SELECT Name FROM Win32_Process WHERE ProcessId = %s' % str(pid)):\n name = p.Name\n break\n return name",
"def appname(self, appId):\n return self.get_apps()[appId]['appName']",
"def get_app_name(self, device_info_filename, device_name):\r\n tree = ET.parse(device_info_filename)\r\n root = tree.getroot()\r\n os_name = \"\"\r\n for node in root.findall(\"device\"):\r\n match = re.match(\"adb:(.*)\", device_name)\r\n if match:\r\n if node.get(\"name\") == match.group(1):\r\n os_name = node.get(\"os\")\r\n\r\n if os_name == \"android\":\r\n app_name = \"com.ascensia.contour/.MainActivity\"\r\n else:\r\n app_name = \"com.onyx.g7\" # ios application Name will come here\r\n return app_name",
"def extract_application_name(url):\n try:\n args = parse_rfc1738_args(url)\n except ValueError:\n return\n scheme = args['name'] or ''\n if scheme.startswith('sam+'):\n return args['ipv4host'] or args['ipv6host']",
"def name():\n app_name = current_app.name.split(\".\")[0]\n click.echo(app_name)\n return app_name",
"def get_app_from_rq_name(name):\n parts = name.split(':')\n return {'env': parts[0], 'name': parts[1], 'role': parts[2]}",
"def test_get_raw_app_name_from_alias():\n di = no_datastore_interface.NoDatastoreInterface()\n assert di.get_raw_app_name_from_alias(api_key, device_name, \"Alias\") == ''",
"def get_app_name(self, pkg: str) -> str:\n name = self.db.get_pkg(pkg).get_app_name()\n if not name:\n name = self.db.get_pkg(pkg).get_name()\n return name",
"def get_app_name(self):\n return self._APP_NAME",
"def get_application_by_name(self, team_name, application_name):\n return self._request('GET',\n 'rest/applications/' + str(team_name) + '/lookup?name=' + str(application_name))",
"def application_name(self):\n raise NotImplementedError()",
"def __get_app_name(os: str) -> str:\n app_name = \"\"\n try:\n if os.upper() == \"ANDROID\":\n app_name = \"Test_app.apk\"\n elif os.upper() == \"IOS\":\n app_name = \"Test_app.ipa\"\n return app_name\n except KeyError:\n raise KeyError(f\"Unexpected os '{os.upper()}'. Check your behave.ini file for available variables\")",
"def _retrieve_title() -> str:\n\n app_title = None\n while not app_title:\n # Shell eats up special characters such as ', &, etc.\n app_title = input(\"Enter title: \").strip()\n\n return app_title",
"def program_name():\n return os.path.basename(sys.argv[0])",
"def app_uuid_to_name(self, uuid):\n cursor = self.__apk_info.find({\"uuid\": str(uuid)})\n return cursor[0][\"packageName\"] # uuid should always correlate to an app",
"def render_app_label(context, app, fallback=\"\"):\r\n try:\r\n text = app['app_label']\r\n except KeyError:\r\n text = fallback\r\n except TypeError:\r\n text = app\r\n return text"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Open a given app Must be within applications folder Append the opened process to processes list
|
def start_app(self, app_to_open, new_instance_command=False):
true_app_name = self.get_app_name(self.appNames, app_to_open)
activity_monitor_app_name = true_app_name.split('.app')[0]
new_instance = new_instance_command or not self.is_running(activity_monitor_app_name)
if new_instance:
process = subprocess.Popen(["open", "-n", "-W", "/Applications/" + true_app_name],
stdout=subprocess.PIPE,
shell=False)
else:
process = subprocess.Popen(["open", "-W", "/Applications/" + true_app_name],
stdout=subprocess.PIPE,
shell=False)
self.processes.append(process)
return process
|
[
"def open_application(self):\n return os.startfile(os.getcwd()+\"/broken-hashserve/broken-hashserve_win.exe\")",
"def localapp(path, newinstance=False, hide=False):\n\t# Always create AEAddressDesc by process serial number; that way there's no confusion if multiple versions of the same app are running\n\tif newinstance:\n\t\tdesc = _launchapplication(path, _runevent, newinstance, hide)\n\telse:\n\t\ttry:\n\t\t\tdesc = ae.psnforapplicationpath(path)\n\t\texcept ae.MacOSError, err:\n\t\t\tif err.args[0] == -600: # Application isn't running, so launch it in background and send it a standard 'run' event.\n\t\t\t\tsleep(1)\n\t\t\t\tdesc = _launchapplication(path, _runevent, newinstance, hide)\n\t\t\telse:\n\t\t\t\traise\n\treturn desc",
"def restart_app():\n active_win = ahk.windows.get_active()\n pid = active_win.pid\n if pid is None:\n return\n\n ps = psutil.Process(pid)\n args = [ps.exe()] + ps.cmdline()[1:]\n\n all_wins = ahk.windows.filter(pid=pid)\n closed = all_wins.close_all(timeout=5)\n if not closed:\n ps.terminate()\n try:\n ps.wait(timeout=5)\n except psutil.TimeoutExpired:\n return\n\n subprocess.Popen(args)",
"def launch(self):\n\t\tif not self.isrunning() and self.AS_appdata.constructor == 'path' \\\n\t\t\t\tand self.AS_appdata.relaunchmode != 'never':\n\t\t\taem.Application.launch(self.AS_appdata.identifier)\n\t\t\tself.AS_appdata.target().reconnect() # make sure aem.Application object's AEAddressDesc is up to date\n\t\telse: # send launch event to app (will error if not already running)\n\t\t\tCommand(self, 'launch', 'ascrnoop', {})()",
"def openApp(self, app_name):\n time.sleep(2)\n locatorStr = ('//*[@title=\"' + app_name + '\"]')\n self.double_click_object(By.XPATH, locatorStr)",
"def _run_apps(self, paths):\n\n for path in paths:\n common.shell_process(path, background=True)\n time.sleep(0.2) # delay some between starts",
"def start_program(self):\n if self.__proc__ is None: \n args = self.gen_args()\n self.__proc__ = subprocess.Popen(self.gen_args(),\n stdout=PIPE,\n stderr=PIPE)\n else:\n logger.warn('Process exists already. Doing nothing.')",
"def _launch_app(self, url):\n raise NotImplementedError()",
"def start_app(self, app):\n\t\tif isinstance(app, str):\n\t\t\tpackage_name = app\n\t\telif isinstance(app, App):\n\t\t\tpackage_name = app.get_package_name()\n\t\t\tif app.get_main_activity():\n\t\t\t\tpackage_name = \"/%s\" % app.get_main_activity()\n\t\telse:\n\t\t\tself.logger.warning(\"Unsupported param \" + app + \" with type: \", type(app))\n\t\t\treturn\n\t\tintent = Intent(suffix = package_name)\n\t\tself.send_intent(intent)",
"def launch_app(self, url):\n if not self.is_available():\n t = 'Cannot launch app, because %s runtime is not available'\n raise RuntimeError(t % self.get_name())\n self._launch_app(url)\n logger.info('launched as %s app: %s' % (self.get_name(), url))",
"def get_app_path(hwnd) -> Optional[str]:\n path = None\n _, pid = win32process.GetWindowThreadProcessId(hwnd)\n for p in c.query('SELECT ExecutablePath FROM Win32_Process WHERE ProcessId = %s' % str(pid)):\n path = p.ExecutablePath\n break\n return path",
"def localappbypid(pid):\n\treturn ae.newdesc(kae.typeKernelProcessID, struct.pack('i', pid))",
"def launch(self):\n self.processdev.start()\n pid = self.processdev.pid\n p = psutil.Process(self.processdev.pid)\n p.nice(psutil.HIGH_PRIORITY_CLASS)\n print(str(pid) + \"est le pid\")",
"def find_app(self) -> Optional[Gio.AppInfo]:\n for desktop_id in self.desktop_ids:\n try:\n # pygobject raises a type error if new returns NULL, for whatever reason\n return Gio.DesktopAppInfo.new(desktop_id)\n except TypeError:\n continue\n return None",
"def startprogram(path):\n # We use raw strings (r') to avoid escaping\n # path characters\n pathlocation = r'path\n # os has a method to start a file ready for us\n os.startfile(pathlocation)",
"def open_recent_apps(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(187)\n # elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n # params = {\"element\": element, \"name\": \"back\"}\n # self.mob_conn.execute_script(\"mobile: swipe\", params)",
"def open() -> None:\n try:\n run_and_return(\n [\"/usr/bin/open\", \"/System/Applications/Reminders.app/\"],\n inject_reminder=False,\n )\n except TaskCommandException as e:\n print(f\":x: Failed to open Reminders.app\\n{e}\")\n raise typer.Exit(code=1)",
"def launch_app(self):\n self._selenium_web_driver().launch_app()",
"def _changeActiveAppAndInit(self, app):\n assertMainThread()\n inProcessEvents = mainThread().property(\"processEventsRunning\")\n if inProcessEvents:\n logging.getLogger(__name__).debug(\n \"_changeActiveAppAndInit waiting for inProcessEvents to be finished inProcessEvents=%s\",\n inProcessEvents)\n MethodInvoker(dict(object=self, method=\"_changeActiveAppAndInit\", thread=mainThread()),\n Qt.QueuedConnection, app)\n return\n if isinstance(app, str):\n app = self.configuration().applicationByName(app)\n currentApp = Application.activeApplication\n if currentApp is not None:\n currentApp = currentApp.getApplication()\n self._waitForActivated = app\n self.changeActiveApp(app.getName())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if an application is currently running
|
def is_running(app_name):
count = int(subprocess.check_output(["osascript",
"-e", "tell application \"System Events\"",
"-e", "count (every process whose name is \"" + app_name + "\")",
"-e", "end tell"]).strip())
return count > 0
|
[
"def is_program_running(self):\n return self.rob.secmon.is_program_running()",
"def exe_stillRunning(self):\n return self.t_exe.isAlive()",
"def isRunning (self):\n\t\tif not self.job.pid:\n\t\t\treturn False\n\t\treturn ps.exists(int(self.job.pid))",
"def _is_running(process):\n with hide('output'):\n s = run('ps auwx')\n for x in s.split('\\n'):\n if re.search(process, x):\n print '%s running' % process\n return True\n\n return False",
"def is_running(self, instance: RuntimeInstance.Params, env: RuntimeEnvironment.Params, **kwargs) -> bool:",
"def is_running(self):\n # return False if the process is not started yet\n if not self._proc:\n return False\n # return False if there is a return code from the main process\n return self._proc.poll() is None",
"def is_running(self) -> bool:\n return self.game_running",
"def is_running(self):\n if self.isRunning():\n # Startup\n return True\n\n if self.server is None:\n return False\n\n return self.server.serving",
"def _openface_running(self):\n try:\n return self.DOCKER_NAME in subprocess.check_output(['docker', 'ps'])\n except Exception as e:\n return False",
"def _check_process_is_running(self, name: str):\n for proc in psutil.process_iter():\n try:\n if name.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False",
"def is_running(name):\n if _job_pid(name) is not None:\n return True\n\n return False",
"def is_application(self):\n\n elements = self.get(CPE.KEY_APP)\n return len(elements) > 0",
"def is_server_active(self):\n return self._process and self._process.is_alive()",
"def isRunning (self):\n\t\tjobid = self.job.id ()\n\t\tif not jobid:\n\t\t\treturn False\n\t\treturn Popen (['qstat', '-j', jobid], stdout=open(devnull, 'w'), stderr=open(devnull, 'w')).wait() == 0",
"def is_running(self):\n return self.motors.are_running()",
"def is_running(self):\n return (self.configsvr is not None and self.configsvr.is_running() and\n all(shard.is_running() for shard in self.shards) and\n self.mongos is not None and self.mongos.is_running())",
"def running(self):\n return self.status == \"STARTED\"",
"def _is_running(self):\n try:\n p = subprocess.Popen([self.vmware.get(\"path\"), \"-T\", \"ws\", \"list\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output, error = p.communicate()\n output = output.decode(\"utf-8\")\n except OSError as e:\n print(\"Unable to check running status for %s. Reason: %s\" % (self.vmx_path, e))\n else:\n if output:\n output_lines = output.splitlines()\n print(output_lines)\n if self.vmx_path in output_lines:\n print(\"Found the snapshots name is %s\" % self.vmx_path)\n return True\n else:\n print(\"Doesn't has the correct snapshot setting\")\n return False\n else:\n return False",
"def IsApplicationAvailable(application_name):\n return application_name in GetListOfAvailableApplications()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Kill the last opened process Should be useful for bugtesting in the future Ie. No don't open that
|
def kill_last(self):
killed = False
while len(self.processes) > 0 and not killed:
last_process_opened = self.processes.pop()
try:
self.kill(last_process_opened)
killed = True
except ProcessLookupError:
pass
return
|
[
"def kill_process(self):\r\n self._proc.kill()",
"def kill_process(process):\n while True:\n process.terminate()\n if process.is_alive() == False:\n break",
"def terminate_process(self, upid):",
"def close(self):\r\n \r\n logging.debug('Cleanup...')#Used for debug\r\n if hasattr(self, 'p'):\r\n try:\r\n parent = psutil.Process(self.p.pid)\r\n except psutil.NoSuchProcess:\r\n logging.debug('No process: {}'.format(self.p.pid))\r\n return\r\n if self.pathDir not in ' '.join(parent.cmdline()):\r\n logging.debug('Process not in: {}'.format(parent.cmdline()))\r\n return\r\n children = parent.children(recursive = True)\r\n #Killing all the child process that created during the run\r\n for process in children:\r\n #Used for debug\r\n logging.debug('Killing pid: {}, cmdline: {}'.format(process.pid, process.cmdline()))\r\n process.kill()\r\n #Used for debug\r\n logging.debug('Killing shell pid: {}, cmdline: {}'.format(parent.pid, parent.cmdline()))\r\n parent.kill()",
"def no_kill_test():\n args = ['no_kill.py']\n log_path = \"log_file\"\n log_file = log_msg.LogMessage(log_path)\n\n log_file.write_starting()\n log_file.write_info(\"no kill test\")\n\n proc = Process(args, log_file)\n proc.set_expiration(5)\n \n # exec the process\n proc.execp()\n\n # monitor it\n ret = proc.monitorp()\n log_file.write_info(\"monitor returned %d\" % ret)\n log_file.write_ending(0, \"exec_proc.py\")",
"def kill_process(self, name='', pid=0):\n\n assert(name or pid)\n\n command = \"Stop-Process \"\n if name:\n command += \"-Name \"+name\n if pid:\n command += \"-Id \"+str(pid)\n\n self.run_shell_cmd(command=command)",
"def kill_subprocess(process):\n try:\n process.kill()\n except OSError:\n pass\n return",
"def test_kill_removes_pid_file(self):\n start(ForeverService()).kill()\n time.sleep(1)\n start(ForeverService())",
"def kill_another_instances():\r\n os.system(\"pgrep pomobar > pomobar.pid\")\r\n filePids = open(\"pomobar.pid\", \"r\")\r\n for pid in filePids:\r\n pid = int(pid)\r\n if pid != int(os.getpid()):\r\n os.system(\"kill -9 \" + str(pid))\r\n filePids.close()\r\n os.system(\"rm pomobar.pid\")",
"def terminate_process(self, pid):\n \n terminate = 1\n try:\n handle = win32api.OpenProcess(terminate, False, pid)\n win32api.TerminateProcess(handle, -1)\n win32api.CloseHandle(handle)\n # print(key + \" --Terminated!\")\n except Exception:\n # print(key + \" --ACCESS DENIED!\")\n pass",
"def kill_proc(self, proc_name): \n params = \"{'proc_name':'%s'}\" % proc_name\n return self.do_cmd(\"kill_proc\", params)",
"def kill(self):\n if self.gid:\n try:\n os.killpg(self.gid, signal.SIGKILL)\n except OSError:\n return False\n else:\n try:\n super(Popen, self).kill()\n except OSError:\n return False\n return True",
"def kill_subprocesses(self):\n pass",
"def kill_application(self):\r\n self._runWidget.kill_process()",
"def killTask(self):\n print \"TaskRunner.killTask called\"\n if self.process > -1:\n procList = self.findProcesses()\n for process in procList:\n print \"Sending SIGTERM to process: %s \" % process\n try:\n os.kill(int(process), signal.SIGTERM)\n\n except OSError:\n pass\n time.sleep(2)\n procList = self.findProcesses()\n for process in procList:\n print \"Sending SIGKILL to process: %s \" % process\n try:\n os.kill(int(process), signal.SIGKILL)\n except OSError,e:\n print \"SIGKILL error: %s, removing process from list...\" % e \n procList.remove(process)\n try:\n os.kill(self.process, signal.SIGTERM)\n except OSError:\n pass\n else:\n print \"self.process <= -1\"\n return",
"def find_and_destroy(self):\n \n pids = self.get_live_flagged_pids()\n if pids != None:\n for pid in pids:\n print(\"TERMINATING: \" + str(pid))\n self.terminate_process(pid)",
"def _KillCrashedProcesses(self):\n\n # usually our g3_monitor will kill these processes for us, but there is\n # a very brief time before it starts running where a proc could crash\n # and not get cleaned up.\n event_logs = self.ExecOnDevice([\n 'logcat',\n '-d',\n '-b',\n 'events',\n '-s',\n 'am_crash:*',\n 'am_anr:*',\n 'am_proc_died:*'])\n\n procs_to_kill = self._FindProcsToKill(event_logs)\n if procs_to_kill:\n self.ExecOnDevice(['kill'] + procs_to_kill)",
"def restart_app():\n active_win = ahk.windows.get_active()\n pid = active_win.pid\n if pid is None:\n return\n\n ps = psutil.Process(pid)\n args = [ps.exe()] + ps.cmdline()[1:]\n\n all_wins = ahk.windows.filter(pid=pid)\n closed = all_wins.close_all(timeout=5)\n if not closed:\n ps.terminate()\n try:\n ps.wait(timeout=5)\n except psutil.TimeoutExpired:\n return\n\n subprocess.Popen(args)",
"def run_rkill():\n extract_item('RKill', silent=True)\n cmd = [\n global_vars['Tools']['RKill'],\n '-s', '-l', r'{LogDir}\\Tools\\RKill.log'.format(**global_vars),\n '-new_console:n', '-new_console:s33V']\n run_program(cmd, check=False)\n wait_for_process('RKill')\n\n # RKill cleanup\n desktop_path = r'{USERPROFILE}\\Desktop'.format(**global_vars['Env'])\n if os.path.exists(desktop_path):\n for item in os.scandir(desktop_path):\n if re.search(r'^RKill', item.name, re.IGNORECASE):\n dest = r'{LogDir}\\Tools\\{name}'.format(\n name=dest, **global_vars)\n dest = non_clobber_rename(dest)\n shutil.move(item.path, dest)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the shuffled intervals do not overlap each other If there is a single overlap, discard this while shuffle step and redo (discarding only this interval would introduce a bias in the probability of the position and it would not be a purely random shuffle)
|
def test_shuffled_interval_overlap(intervals):
print "testing"
print intervals
results = {}
for interval in intervals.values()[0]:
try:
chromosome = interval[0]
if chromosome not in results:
results[chromosome] = {}
results[chromosome][interval[1]] = interval[2]
except:
pass #Do not interrupt due to any exception. Continue to the next interval
for chromosome in results:
intervals = results[chromosome]
ordered_intervals = collections.OrderedDict(sorted(intervals.items()))
starts=[]
ends=[]
#print "od", ordered_intervals
[(starts.append(start_), ends.append(end_)) for start_, end_ in ordered_intervals.items()]
for x in range(0, len(starts)-1):
if int(starts[x+1])<int(ends[x]):
print "reject", starts, ends
return False
print "accept", starts, ends
print intervals
return True
|
[
"def test_shuffle_range(self):\n shuffle_range(self.to_test, 3, -3)\n self.assertEqual(self.to_test[:3],self.numbers)\n self.assertEqual(self.to_test[-3:], self.numbers)\n self.assertNotEqual(self.to_test[3:-3], 2*self.letters)\n self.assertEqualItems(self.to_test[3:-3], 2*self.letters)\n #this time, start is negative and end is positive\n shuffle_range(self.to_test, -15, 15)\n self.assertEqual(self.to_test[:3],self.numbers)\n self.assertEqual(self.to_test[-3:], self.numbers)\n self.assertNotEqual(self.to_test[3:-3], 2*self.letters)\n self.assertEqualItems(self.to_test[3:-3], 2*self.letters)",
"def test_no_overlap():\n random.seed(123)\n rectangles = [(random.randint(50, 100), random.randint(50, 100))\n for _ in range(40)]\n positions = rpack.pack(rectangles)\n for i, ((x1, y1), (w1, h1)) in enumerate(zip(positions, rectangles)):\n for j, ((x2, y2), (w2, h2)) in enumerate(zip(positions, rectangles)):\n if i != j:\n disjoint_in_x = (x1 + w1 <= x2 or x2 + w2 <= x1)\n disjoint_in_y = (y1 + h1 <= y2 or y2 + h2 <= y1)\n assert disjoint_in_x or disjoint_in_y",
"def test_shuffle_between(self):\n shuffle_peptides = shuffle_between('KR')\n seq1 = 'AGHCDSGAHF' #each 10 chars long \n seq2 = 'PLMIDNYHGT'\n protein = seq1 + 'K' + seq2\n result = shuffle_peptides(protein)\n self.assertEqual(result[10], 'K')\n self.assertNotEqual(result[:10], seq1)\n self.assertEqualItems(result[:10], seq1)\n self.assertNotEqual(result[11:], seq2)\n self.assertEqualItems(result[11:], seq2)",
"def testOverlapWithoutEnoughPause(self):\r\n self.log_time(start=self.start_before, end=self.start_inside,\r\n pause=119)\r\n user_total_overlaps = self.use_checkoverlap(self.get_entries())\r\n self.assertEqual(user_total_overlaps, 1)",
"def random_different_coordinates(coords, size_x, size_y, pad,cond):\n good = False\n while not good:\n good = True\n c1 = random.randint(pad + 1, size_x - (pad + 1))\n c2 = random.randint(pad + 1, size_y -( pad + 1))\n if cond:\n for c in coords:\n coordset_0 = range(int(c[0]/radius)-1,int(c[0]/radius)+2)\n coordset_1 = range(int(c[1]/radius)-1,int(c[1]/radius)+2)\n #if c1 in coordset_0 and c2 in coordset_1:\n if int(c1/radius) in coordset_0 and int(c2/radius) in coordset_1:\n good = False\n break\n else:\n for c in coords:\n if c1==c[0] and c2==c[1]:\n good = False\n break\n return (c1,c2)",
"def test_oversampling_no_replace(base_clumper):\n with pytest.raises(ValueError):\n base_clumper.sample(n=len(base_clumper) + 1, replace=False)",
"def test_board_robot_placement():\n # Location within the board boundary (0, 0) <= (x, y) < (n, m) for location (x,y)\n for _ in range(MAX_TEST_TIMES):\n negative = random.randint(-MAX_BOARD_SIZE, -1)\n positive = random.randint(1, MAX_BOARD_SIZE)\n n = random.randint(1, MAX_BOARD_SIZE)\n m = random.randint(1, MAX_BOARD_SIZE)\n\n board = Board(n, m)\n\n # TC 005: False for x < 0\n location = [random.randint(-MAX_BOARD_SIZE, -1), 0]\n assert not board.contains(location), f\"contains({location}) needs to be False\"\n\n # TC 006 False for y < 0\n location = [0, random.randint(-MAX_BOARD_SIZE, -1)]\n assert not board.contains(location), f\"contains({location}) needs to be False\"\n\n # False for x > n-1\n location = [random.randint(n, MAX_BOARD_SIZE+1), 0]\n assert not board.contains(location), f\"contains({location}) needs to be False\"\n\n # False for y > m-1\n location = [0, random.randint(m, MAX_BOARD_SIZE+1)]\n assert not board.contains(location), f\"contains({location}) needs to be False\"\n\n # False for x > n-1 and y > m-1\n location = [random.randint(n, MAX_BOARD_SIZE+1), random.randint(m, MAX_BOARD_SIZE+1)]\n assert not board.contains(location), f\"contains({location}) needs to be False\"\n\n # True for x = 0 and 0 <= y <= m-1\n location = [0, random.randint(0, m-1)]\n assert board.contains(location), f\"contains({location}) needs to be True\"\n\n # True for 0 <= x <= n-1 and y = 0\n location = [random.randint(0, n-1), 0]\n assert board.contains(location), f\"contains({location}) needs to be True\"\n\n # True for 0 <= x <= n-1 and 0 <= y <= m-1\n location = [random.randint(0, n-1), random.randint(0, m-1)]\n assert board.contains(location), f\"contains({location}) needs to be True\"",
"def test_shuffle_except_indices(self):\n seq1 = 'AGHCDSGAHF' #each 10 chars long \n seq2 = 'PLMIDNYHGT'\n protein = seq1 + 'K' + seq2\n result = list(protein)\n shuffle_except_indices(result, [10])\n self.assertEqual(result[10], 'K')\n self.assertNotEqual(''.join(result), protein)\n self.assertEqualItems(''.join(result), protein)\n self.assertNotEqualItems(''.join(result[:10]), seq1)",
"def shuffle_bounds(ratings, recommendation_length):\n ratings_len = len(ratings)\n counter = recommendation_length\n while True:\n if ratings[counter] == ratings[counter-1]:\n counter = counter + 1\n else:\n right_bound = counter - 1\n break\n if counter >= ratings_len - 1:\n right_bound = ratings_len - 1\n break\n counter = recommendation_length-1\n while True:\n if ratings[counter] == ratings[counter-1]:\n counter = counter - 1\n else:\n left_bound = counter\n break\n if counter <= 0 :\n left_bound = 0\n break\n return left_bound, right_bound",
"def test_split_pos(self):\n self.func2()\n for x in self.list_algo:\n rand1 = r.randint(-100, 100)*100\n rand2 = r.randint(-100, 100)*100\n check = [rand1,rand2]\n my_list = [rand1,rand2]\n if check[0] < 0:\n check[0] += 0.04\n else:\n check[0] -= 0.04\n if check[1] < 0:\n check[1] += 0.04\n else:\n check[1] -= 0.04\n\n result = x.checkValue(my_list)\n self.assertEqual(result[0] == check[0],result[1] == check[1])",
"def start_shuffle(output_file,np):\n index=0\n starttime = time.time()\n individualIntervals = allIndividuals.items()\n try:\n print \"starting parallel shuffle...\"\n\tpool = Pool(np)\n results = pool.map(shuffle, individualIntervals)\n\tprint \"pool finished\\n\"\n\tprint str(results)\n\tpool.close()\n pool.join()\n except:\n os.nice(100)\n pass\n else:\n\tprint \"bbb\"\n print \"finished shuffling phase. Starting overlap analysis\"\n elapsedtime = time.time() - starttime\n reads = {}\n persons_reads = {}\n for result in results:\n for y in result.values()[0]:\n id = str(index)\n reads[id] = [str(y[0]), str(y[1]), str(y[2]), str(result.keys()[0])]\n if str(result.keys()[0]) not in persons_reads: persons_reads[str(result.keys()[0])] = []\n persons_reads[str(result.keys()[0])].append(id)\n index += 1\n \n \"\"\"Dictionary to keep track of occurrence of each number of overlaps: 0/1 (no/yes)\"\"\"\n local_overall_overlaps = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0,\n 13: 0, 14: 0, 15: 0, 16: 0, 17: 0, 18: 0, 19: 0, 20: 0}\n \n \"\"\"for each interval, of each individual, get the genes in this region from the tree representation of refseq\n genes(refseq_gene_tree) and build a dictionary of gene:[list of intervals covering this gene]\n finally COUNT the number of intervals covering the gene. This is in number of intervals overlaping a gene\n \"\"\"\n genes = {}\n for read in reads:\n l = reads[read]\n\t print l\n a = refseq_gene_tree.interval_tree(l[0], int(l[1]), int(l[2]))\n for result in a:\n b = result[3][:-1]\n if b not in genes:\n genes[b] = []\n genes[b].append(l[3])\n \n for result in genes:\n if len(genes[result]) > 1:\n if (len(genes[result])) not in local_overall_overlaps:\n local_overall_overlaps[len(genes[result])] = 0\n if local_overall_overlaps[len(genes[result])] == 0:\n local_overall_overlaps[len(genes[result])] = 1\n \n with open(output_file, 'a') as outfile:\n json.dump(local_overall_overlaps, outfile)\n outfile.write(\"\\n\")\n \n print \"Finished in {0:.1f}\".format(elapsedtime) + \" s\"\n \n return 0",
"def _shuffle(val_list, start : int):\n length = len(val_list)\n\n for i in range(start, length):\n swap_index = random.randint(0, length - 1)\n\n if i != swap_index:\n val_list[i], val_list[swap_index] = val_list[swap_index], val_list[i]",
"def testOverlapWithPause(self):\r\n self.log_time(start=self.start_before, end=self.start_inside,\r\n pause=120)\r\n user_total_overlaps = self.use_checkoverlap(self.get_entries())\r\n self.assertEqual(user_total_overlaps, 0)",
"def test_overlap(self):\r\n\r\n for pats, graph in self.known_overlap:\r\n result = assembler.overlap_graph(pats)\r\n self.assertEqual(graph, result)",
"def removeCoveredIntervals(self, intervals):\r\n # sort the intervals to account for covering: sort first by leftmost starting point\r\n # and then by rightmost ending point, so sort ascending and descending\r\n intervals.sort( key = lambda x: ( x[0], -x[1] ) )\r\n \r\n # the first interval won't be covered by anything, so it goes anyway\r\n \r\n res = [ intervals[0] ]\r\n for l, r in intervals[1:]:\r\n # you want to compare current interval with the immediately previous to see if covered\r\n prevL, prevR = res[-1]\r\n \r\n # is it covered by the previous?\r\n if prevL <= l and prevR >= r:\r\n continue\r\n \r\n res.append([l, r])\r\n \r\n return len(res)",
"def _no_gaps(sequence):\n return set(sequence) == set(range(len(sequence)))",
"def shuffle_in_unison_inplace(a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]",
"def mutate_outatt_single(seq, exclude):\n exclude_low = exclude[np.argmin(exclude,axis=0)[0]]\n exclude_high = exclude[np.argmax(exclude,axis=0)[0]]\n\n # sanity check zero size window\n candidate_1 = (0, exclude_low[0]-1)\n candidate_2 = (exclude_low[1]+1, exclude_high[0]-1)\n candidate_3 = (exclude_high[1]+1, len(seq) -1)\n candidates = (candidate_1, candidate_2, candidate_3)\n\n window_size_1 = exclude_low[0]\n window_size_2 = exclude_high[0]-1 - exclude_low[1]\n window_size_3 = len(seq) - exclude_high[1] -1\n suitable_candidates = np.where(np.array((window_size_1,\n window_size_2,\n window_size_3))!=0)[0]\n # print(suitable_candidates)\n # choose mutation window\n choose_exclude = np.random.uniform(0,1)\n for k in range(0,len(suitable_candidates)):\n if choose_exclude <= (k+1) / len(suitable_candidates):\n start, end = candidates[suitable_candidates[k]]\n break\n window_size = end - start + 1\n\n # choose mutation point\n choose_point = np.random.uniform(0,1)\n # print(\"choose point %.6f, window_size %d\"%(choose_point, window_size))\n for i in range(1, window_size+1):\n # print(i / window_size)\n if choose_point <= i / window_size:\n mutated_point = start+i-1\n break\n original_type = seq[mutated_point]\n\n\n # choose mutation type\n types = ['A','C','G','T']\n left_types = types.remove(original_type)\n\n choose_type = np.random.uniform(0,1)\n\n if choose_type <= 1 / 3:\n mutate_type = types[0]\n elif choose_type <= 2 / 3:\n mutate_type = types[1]\n else:\n mutate_type = types[2]\n# print(mutate_type)\n# print(seq[mutated_point])\n# print(mutated_point)\n seq[mutated_point] = mutate_type\n return seq",
"def test_non_commuting_overlapping_targets(self):\n op1 = qml.ops.op_math.Controlled(qml.PauliZ(3), control_wires=(0, 1, 2))\n op2 = qml.ops.op_math.Controlled(qml.RX(1.2, 3), control_wires=(0, 1))\n assert not qml.is_commuting(op1, op2)\n assert not qml.is_commuting(op2, op1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initiate parallel threads for interval shuffling phase
|
def start_shuffle(output_file,np):
index=0
starttime = time.time()
individualIntervals = allIndividuals.items()
try:
print "starting parallel shuffle..."
pool = Pool(np)
results = pool.map(shuffle, individualIntervals)
print "pool finished\n"
print str(results)
pool.close()
pool.join()
except:
os.nice(100)
pass
else:
print "bbb"
print "finished shuffling phase. Starting overlap analysis"
elapsedtime = time.time() - starttime
reads = {}
persons_reads = {}
for result in results:
for y in result.values()[0]:
id = str(index)
reads[id] = [str(y[0]), str(y[1]), str(y[2]), str(result.keys()[0])]
if str(result.keys()[0]) not in persons_reads: persons_reads[str(result.keys()[0])] = []
persons_reads[str(result.keys()[0])].append(id)
index += 1
"""Dictionary to keep track of occurrence of each number of overlaps: 0/1 (no/yes)"""
local_overall_overlaps = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0,
13: 0, 14: 0, 15: 0, 16: 0, 17: 0, 18: 0, 19: 0, 20: 0}
"""for each interval, of each individual, get the genes in this region from the tree representation of refseq
genes(refseq_gene_tree) and build a dictionary of gene:[list of intervals covering this gene]
finally COUNT the number of intervals covering the gene. This is in number of intervals overlaping a gene
"""
genes = {}
for read in reads:
l = reads[read]
print l
a = refseq_gene_tree.interval_tree(l[0], int(l[1]), int(l[2]))
for result in a:
b = result[3][:-1]
if b not in genes:
genes[b] = []
genes[b].append(l[3])
for result in genes:
if len(genes[result]) > 1:
if (len(genes[result])) not in local_overall_overlaps:
local_overall_overlaps[len(genes[result])] = 0
if local_overall_overlaps[len(genes[result])] == 0:
local_overall_overlaps[len(genes[result])] = 1
with open(output_file, 'a') as outfile:
json.dump(local_overall_overlaps, outfile)
outfile.write("\n")
print "Finished in {0:.1f}".format(elapsedtime) + " s"
return 0
|
[
"def worker_init_fn(worker_id):\n np.random.seed(args.seed + worker_id)\n random.seed(args.seed + worker_id)",
"def initialize_workers(self):\n self.workers = []\n for j in range(self.n):\n # generate p according to spammer-hammer model\n p_j = np.random.choice([1., 0.5], p=[self.q, 1 - self.q])\n worker = Worker(j, p_j)\n self.workers.append(worker)\n return",
"def test_multithreading():",
"def pushRandom(t):\n Worker.push(t)\n shuffle(Worker.workers)",
"def thread_task(self):\n for _ in range(100000): \n self.increment()",
"def run (self) :\n \n threads = []\n concurrency = int(self.bench_cfg['concurrency'])\n \n self._start ()\n \n for tid in range (0, concurrency) :\n \n self.events[tid] = {}\n self.events[tid]['event_1'] = rut.Event ()\n self.events[tid]['event_2'] = rut.Event ()\n self.events[tid]['event_3'] = rut.Event ()\n self.events[tid]['event_4'] = rut.Event ()\n self.events[tid]['event_5'] = rut.Event ()\n self.start [tid] = time.time ()\n self.times [tid] = list()\n \n t = rut.Thread (self._thread, tid)\n threads.append (t)\n \n \n for t in threads :\n t.start ()\n \n \n # wait for all threads to start up and initialize\n self.t_init = time.time ()\n rut.lout (\"\\n> \" + \"=\"*concurrency)\n rut.lout (\"\\n> \")\n for tid in range (0, concurrency) :\n self.events[tid]['event_1'].wait ()\n \n # start workload in all threads\n self.t_start = time.time ()\n for tid in range (0, concurrency) :\n self.events[tid]['event_2'].set ()\n \n # wait for all threads to finish core test\n for tid in range (0, concurrency) :\n self.events[tid]['event_3'].wait ()\n self.t_stop = time.time ()\n \n # start shut down\n rut.lout (\"\\n< \" + \"-\"*concurrency)\n rut.lout (\"\\n< \")\n for tid in range (0, concurrency) :\n self.events[tid]['event_4'].set ()\n \n # wait for all threads to finish shut down\n for tid in range (0, concurrency) :\n self.events[tid]['event_5'].wait ()",
"def initTasks(self):\n # create a looping task sequence that will occasionally update the\n # suit population in the local neighborhood\n self.__waitForNextUpkeep()\n\n # create a looping task sequence that will occasionally update the\n # adjustment to the number of suits desired in this hood, this gradually\n # changes over time\n self.__waitForNextAdjust()",
"def real_run(self):\n if self.sample.delay > 0:\n self.logger.info(\"Sample set to delay %s, sleeping.\" % s.delay)\n time.sleep(self.sample.delay)\n \n\n # 12/29/13 CS Queueable plugins pull from the worker queue as soon as items\n # are in it and farm it out to a pool of workers to generate.\n # Non-Queueable plugins will run as a seperate process all on their own generating\n # events, and is the same as we used to operate.\n\n # 12/29/13 Non Queueable, same as before\n plugin = c.getPlugin('generator.'+self.sample.generator, self.sample)\n self.logger.debugv(\"Generating for class '%s' for generator '%s' queueable: %s\" % (plugin.__name__, self.sample.generator, plugin.queueable))\n \n # Wait a random amount of time, try to grab a lock, then start up the timer\n time.sleep(random.randint(0, 100)/1000)\n self.logger.debug(\"Timer creating plugin for '%s'\" % self.sample.name)\n with c.copyLock:\n while c.timersStarting.value() > 0:\n self.logger.debug(\"Waiting for exclusive lock to start for timer '%s'\" % self.sample.name)\n time.sleep(0.1)\n \n c.timersStarting.increment()\n p = plugin(self.sample)\n self.executions = 0\n \n c.timersStarting.decrement()\n c.timersStarted.increment()\n \n # 9/6/15 Don't do any work until all the timers have started\n while c.timersStarted.value() < len(c.sampleTimers):\n self.logger.debug(\"Not all timers started, sleeping for timer '%s'\" % self.sample.name)\n time.sleep(1.0)\n try:\n p.setupBackfill()\n except ValueError as e:\n self.logger.error(\"Exception during backfill for sample '%s': '%s'\" % (self.sample.name, str(e)))\n \n\n while (1):\n if not self.stopping:\n if not self.interruptcatcher:\n if self.countdown <= 0:\n # 12/15/13 CS Moving the rating to a separate plugin architecture\n count = self.rater.rate()\n\n et = self.sample.earliestTime()\n lt = self.sample.latestTime()\n\n # Override earliest and latest during backfill until we're at current time\n if self.sample.backfill != None and not self.sample.backfilldone:\n if self.sample.backfillts >= self.sample.now(realnow=True):\n self.logger.info(\"Backfill complete\")\n self.sample.backfilldone = True\n else:\n self.logger.debug(\"Still backfilling for sample '%s'. Currently at %s\" % (self.sample.name, self.sample.backfillts))\n\n if not p.queueable:\n try:\n partialInterval = p.gen(count, et, lt)\n # 11/24/13 CS Blanket catch for any errors\n # If we've gotten here, all error correction has failed and we\n # need to gracefully exit providing some error context like what sample\n # we came from\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n import traceback\n self.logger.error('Exception in sample: %s\\n%s' % (self.sample.name, \\\n traceback.format_exc()))\n sys.stderr.write('Exception in sample: %s\\n%s' % (self.sample.name, \\\n traceback.format_exc()))\n sys.exit(1)\n\n self.countdown = partialInterval\n self.executions += 1\n\n ## Sleep for partial interval\n # If we're going to sleep for longer than the default check for kill interval\n # go ahead and flush output so we're not just waiting\n # if partialInterval > self.time:\n # self.logger.debugv(\"Flushing because we're sleeping longer than a polling interval\")\n # self.sample.out.flush()\n\n \n self.logger.debug(\"Generation of sample '%s' in app '%s' sleeping for %f seconds\" \\\n % (self.sample.name, self.sample.app, partialInterval) ) \n # logger.debug(\"Queue depth for sample '%s' in app '%s': %d\" % (self.sample.name, self.sample.app, c.outputQueue.qsize())) \n else:\n # Put into the queue to be generated\n stop = False\n while not stop:\n try:\n c.generatorQueue.put((self.sample.name, count, (time.mktime(et.timetuple())*(10**6)+et.microsecond), (time.mktime(lt.timetuple())*(10**6)+lt.microsecond)), block=True, timeout=1.0)\n c.generatorQueueSize.increment()\n self.logger.debug(\"Put %d events in queue for sample '%s' with et '%s' and lt '%s'\" % (count, self.sample.name, et, lt))\n stop = True\n except Full:\n self.logger.warning(\"Generator Queue Full, looping\")\n if self.stopping:\n stop = True\n pass\n\n # Sleep until we're supposed to wake up and generate more events\n self.countdown = self.sample.interval\n self.executions += 1\n\n # Clear cache for timestamp\n # self.sample.timestamp = None\n\n # No rest for the wicked! Or while we're doing backfill\n if self.sample.backfill != None and not self.sample.backfilldone:\n # Since we would be sleeping, increment the timestamp by the amount of time we're sleeping\n incsecs = round(self.countdown / 1, 0)\n incmicrosecs = self.countdown % 1\n self.sample.backfillts += datetime.timedelta(seconds=incsecs, microseconds=incmicrosecs)\n self.countdown = 0\n\n if self.countdown > 0:\n self.sample.saveState()\n\n # 8/20/15 CS Adding support for ending generation at a certain time\n if self.sample.end != None:\n # 3/16/16 CS Adding support for ending on a number of executions instead of time\n # Should be fine with storing state in this sample object since each sample has it's own unique\n # timer thread\n if self.sample.endts == None:\n if self.executions >= self.sample.end:\n self.logger.info(\"End executions %d reached, ending generation of sample '%s'\" % (self.sample.end, self.sample.name))\n self.stopping = True\n elif lt >= self.sample.endts:\n self.logger.info(\"End Time '%s' reached, ending generation of sample '%s'\" % (self.sample.endts, self.sample.name))\n self.stopping = True\n else:\n self.countdown -= self.time\n time.sleep(self.time)\n else:\n time.sleep(self.time)\n else:\n while c.generatorQueueSize.value() > 0 or c.outputQueueSize.value() > 0:\n self.logger.debugv(\"Waiting for queues to empty\")\n time.sleep(0.1)\n self.logger.info(\"Stopped timer for sample '%s'\" % self.sample.name)\n sys.exit(0)",
"def reshuffle(self):\n self.trials = generate_trials(self.targets, n_repeat=self.rep)",
"def worker_init_fn(worker_id, num_workers, rank, seed):\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)",
"def seeding(self):\n\t\tx = 5\n\t\twhile x != 0:\n\t\t\trandom.shuffle(self.inPlay)\n\t\t\tx -= 1",
"def run(self):\n\n def do_work():\n self.queue.append(self.triplet_sampler.sample(self.batch_size))\n\n while True:\n if len(self.queue) < self.queue_size:\n self.queue.append(self.triplet_sampler.sample(self.batch_size))\n print(f'batch queue size: {len(self.queue)}')\n sleep(0.1)",
"def _shuffle_iterators(self):\n shuffle(self.prof_ints)\n shuffle(self.rec_ints)\n shuffle(self.slot_ints)",
"def omp_threads_for(rank_spec):",
"def test_parallel_shot_thread_assignment(self):\n\n max_threads = self.available_threads()\n opts = self.backend_options_parallel(shot_threads=max_threads)\n\n # Test single circuit\n # Parallel experiments and shots should always be 1\n result = execute(self.dummy_circuit(1),\n self.SIMULATOR,\n shots=10*max_threads,\n **opts).result()\n for threads in self.threads_used(result):\n target = {\n 'experiments': 1,\n 'shots': 1,\n 'state_update': max_threads,\n 'total': max_threads\n }\n self.assertEqual(threads, target)\n\n # Test multiple circuit, no noise\n # Parallel experiments and shots should always be 1\n result = execute(max_threads*[self.dummy_circuit(1)],\n self.SIMULATOR,\n shots=10*max_threads,\n **opts).result()\n for threads in self.threads_used(result):\n target = {\n 'experiments': 1,\n 'shots': 1,\n 'state_update': max_threads,\n 'total': max_threads\n }\n self.assertEqual(threads, target)\n\n # Test multiple circuits, with noise\n # Parallel shots should take priority\n result = execute(max_threads*[self.dummy_circuit(1)],\n self.SIMULATOR,\n shots=10*max_threads,\n noise_model=self.dummy_noise_model(),\n **opts).result()\n for threads in self.threads_used(result):\n target = {\n 'experiments': 1,\n 'shots': max_threads,\n 'state_update': 1,\n 'total': max_threads\n }\n self.assertEqual(threads, target)\n\n # Test multiple circuit, with measure in middle, no noise\n # Parallel shots should take priority\n result = execute(max_threads*[self.measure_in_middle_circuit(1)],\n self.SIMULATOR,\n shots=10*max_threads,\n noise_model=self.dummy_noise_model(),\n **opts).result()\n for threads in self.threads_used(result):\n target = {\n 'experiments': 1,\n 'shots': max_threads,\n 'state_update': 1,\n 'total': max_threads\n }\n self.assertEqual(threads, target)\n\n # Test multiple circuits, with memory limitation\n # NOTE: this assumes execution on statevector simulator\n # which required approx 2 MB for 16 qubit circuit.\n opts['max_memory_mb'] = 1\n circuit = QuantumVolume(16, 1, seed=0)\n circuit.measure_all()\n result = execute(2 * [circuit],\n self.SIMULATOR,\n shots=10*max_threads,\n **opts).result()\n for threads in self.threads_used(result):\n target = {\n 'experiments': 1,\n 'shots': 1,\n 'state_update': max_threads,\n 'total': max_threads\n }\n self.assertEqual(threads, target)",
"def __init__(self, stimulus_range=None, repeats=None, frame_angles=None,\n currents=None):\n self.trial_list = []\n for stim, repeat in zip(stimulus_range, repeats):\n for rep in range(repeat):\n for frame in frame_angles:\n for curr in currents:\n self.trial_list.append([stim, frame, curr])\n # shuffle(self.trial_list)",
"def randprobsuntied(self) :\n\t\tself.tasklist = []\n\t\tfeats = self.get_feats_standard()\n\t\n\t\t# Repeat for all the tasks described\t\n\t\tfor taskid in range(self.ntimes) :\t\n\t\t\thmm = HMM()\n\t\t\tself._set_params_randprobsuntied(hmm)\n\t\t\tcmrf = CMRF(hmm)\t\n\t\t\ttask = Task('sim'+STUDY+'_'+self.name+'_'+str(taskid),cmrf,\\\n\t\t\t\tfeats)\t\t\t\t\n\t\t\t# Run Brute force to enumerate the frontier\n\t\t\twith benchmark(task.name+'brute') as t:\n\t\t\t\tseq,energies = self.bruteforce(cmrf,feats)\t\t\t\n\t\t\ttask.all_seq = seq\n\t\t\ttask.all_seq_energy = energies\n\t\t\ttask.brute_time = t.elapsed\t\t\t\n\n\t\t\t# Now run the toy simulation`\n\t\t\twith benchmark(task.name+'pareto') as t : \n\t\t\t\ttask.frontier,task.frontier_energy = \\\n\t\t\t\t\tpareto_frontier(cmrf,feats)\t\t\n\t\t\tif self.plot_all :\n\t\t\t\ttask.plot_frontier()\n\t\t\ttask.pareto_time = t.elapsed\n\t\t\tself.tasklist.append(task)",
"def hyperthreads_for(rank_spec):",
"def run_random_mixer():\n find_existing_mixes()\n RandomMixThread().start()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
slices with specific voxel size volume
|
def slices_in_scale(self, voxel_size: Cartesian) -> tuple:
minpt = tuple( p * s1 // s2 for p, s1, s2 in zip(
self.minpt, self.voxel_size, voxel_size
))
maxpt = tuple( p * s1 // s2 for p, s1, s2 in zip(
self.maxpt, self.voxel_size, voxel_size
))
bbox = BoundingBox(minpt, maxpt)
return bbox.slices
|
[
"def get_slice_from_volume(image, view, slice_id):\n if(view == 1):\n image = np.transpose(image, [2, 0, 1])\n elif(view == 2):\n image = np.transpose(image, [1, 0, 2])\n return image[slice_id]",
"def to_volume(slices):\n volume = np.stack([s.pixel_array for s in slices])\n volume = volume.astype(np.int16)\n\n # Set outside-of-scan pixels to 0\n # The intercept is usually -1024, so air is approximately 0\n volume[volume == -2000] = 0\n\n # Convert to Hounsfield units (HU)\n for n in range(len(slices)):\n intercept = slices[n].RescaleIntercept\n slope = slices[n].RescaleSlope\n if slope != 1:\n volume[n] = slope * volume[n].astype(np.float64)\n volume[n] = volume[n].astype(np.int16)\n volume[n] += np.int16(intercept)\n\n volume = np.array(volume, dtype=np.int16)\n spacing = tuple(map(float, ([slices[0].SliceThickness] + slices[0].PixelSpacing)))\n return volume, spacing",
"def _compute_slices(cubes):\n gigabyte = 2**30\n total_bytes = cubes[0].data.nbytes * len(cubes)\n n_slices = int(np.ceil(total_bytes / gigabyte))\n\n n_timesteps = cubes[0].shape[0]\n slice_len = int(np.ceil(n_timesteps / n_slices))\n\n for i in range(n_slices):\n start = i * slice_len\n end = (i + 1) * slice_len\n if end > n_timesteps:\n end = n_timesteps\n yield slice(start, end)",
"def generate_volume_slices(self, dict_slices, verbose=False):\n\n number_of_slices = dict_slices['number_of_slices']\n\n list_name_final_slices = []\n\n for ind in range(number_of_slices):\n\n self.add_slice(dict_slices['slice_' + str(ind)], ind, verbose=verbose)\n name_final_slice = 'slice_with_holes_' + str(ind)\n list_name_final_slices.append(name_final_slice)\n\n self.perform_actions_current_document.perform_union(list_name_final_slices, 'porous_medium', verbose=verbose)",
"def volume(self, ):\n if self.brain_model_axis.volume_mask.sum() == 0:\n raise ValueError(f\"Can not create volume without voxels in {self}\")\n data = np.full(self.brain_model_axis.volume_shape + self.data.shape[:-1], np.nan,\n dtype=self.data.dtype)\n voxels = self.brain_model_axis.voxel[self.brain_model_axis.volume_mask]\n data[tuple(voxels.T)] = np.transpose(self.data, (-1, ) + tuple(range(self.data.ndim - 1)))[self.brain_model_axis.volume_mask]\n return nib.Nifti1Image(data, affine=self.brain_model_axis.affine)",
"def manual_pv_slice_series():\n\n \"\"\"\n PV cut orientation, vertical or horizontal\n Vertical means slice at a single RA and plot velocity vs Dec\n Horizontal means slice at a single Dec and plot velocity vs RA\n \"\"\"\n orientation = 'horizontal'\n start_idx, step_idx = 25, 50\n\n # Load cube\n line_stub = 'cii'\n if line_stub in large_map_filenames:\n # Use the custom filename rather than the default\n filename = large_map_filenames[line_stub]\n else:\n # Use default filename from cube_utils (many of these are centered around Pillars)\n filename = line_stub\n cube_obj = cube_utils.CubeData(filename).convert_to_K().convert_to_kms()\n dimension_size = (cube_obj.data.shape[2] if orientation=='vertical' else cube_obj.data.shape[1])\n\n # Make image\n ref_vel_lims = (10*kms, 35*kms)\n ref_mom0 = cube_obj.data.spectral_slab(*ref_vel_lims).moment0()\n ref_img = ref_mom0.to_value()\n\n # Set colors\n pv_cmap = 'plasma'\n img_cmap = 'Greys_r'\n line_color = marcs_colors[1]\n\n # Loop thru slice index\n for slice_idx in range(start_idx, dimension_size, step_idx):\n\n if orientation == 'vertical':\n # Cube index order is V,Y,X = Velocity,Dec,RA = V,I,J\n cube_slices = (slice(None), slice(None), slice_idx)\n else:\n cube_slices = (slice(None), slice_idx, slice(None))\n\n pv_slice = cube_obj.data[cube_slices]\n\n # First try to remake fig/axes each time. Try persistent if slow\n fig = plt.figure(figsize=(8, 10))\n gs = fig.add_gridspec(2, 1)\n ax_img = fig.add_subplot(gs[0,0], projection=cube_obj.wcs_flat)\n ax_pv = fig.add_subplot(gs[1,0], projection=pv_slice.wcs)\n\n im = ax_img.imshow(ref_img, origin='lower', vmin=0, cmap=img_cmap)\n fig.colorbar(im, ax=ax_img, label=ref_mom0.unit.to_string('latex_inline'))\n\n im = ax_pv.imshow(pv_slice.to_value(), origin='lower', vmin=0, cmap=pv_cmap)\n fig.colorbar(im, ax=ax_pv, label=pv_slice.unit.to_string('latex_inline'), orientation='horizontal')\n\n # Plot line\n if orientation == 'vertical':\n plot_line = ax_img.axvline\n else:\n plot_line = ax_img.axhline\n plot_line(slice_idx, color=line_color, linewidth=2)\n # Reference image velocity interval stamp\n ax_img.text(0.1, 0.9, make_vel_stub(ref_vel_lims), color=line_color, ha='left', va='bottom')\n\n # Clean up axes labels\n # ax_img.set_xlabel(\"RA\")\n # ax_img.set_ylabel(\"Dec\")\n ax_pv.coords[1].set_format_unit(kms)\n # 2023-04-26, 06-07\n savename = f\"/home/ramsey/Pictures/2023-04-26/m16_pv_{orientation}_{slice_idx:03d}.png\"\n fig.savefig(savename, metadata=catalog.utils.create_png_metadata(title=f'{line_stub}, using stub/file {filename}', file=__file__, func='manual_pv_slice_series'))",
"def multi_slice_viewer(volume, first_index=0, cmap=None):\n remove_keymap_conflicts({'j', 'k'})\n fig, ax = plt.subplots()\n ax.volume = volume\n ax.index = first_index#volume.shape[0] // 2\n ax.imshow(volume[ax.index], cmap=cmap)\n ax.set_title('slice {}'.format(ax.index))\n fig.canvas.mpl_connect('key_press_event', process_key)",
"def xSlice(self, i):\n vslice = vtk.vtkImageDataGeometryFilter()\n vslice.SetInputData(self.imagedata())\n nx, ny, nz = self.imagedata().GetDimensions()\n if i>nx-1:\n i=nx-1\n vslice.SetExtent(i,i, 0,ny, 0,nz)\n vslice.Update()\n return Mesh(vslice.GetOutput())",
"def _make_resample_slices(data, win_size):\r\n row = int(data.shape[0] / win_size[0]) * win_size[0]\r\n col = int(data.shape[1] / win_size[1]) * win_size[1]\r\n slices = []\r\n\r\n for i in range(win_size[0]):\r\n for j in range(win_size[1]):\r\n slices.append(data[i:row:win_size[0], j:col:win_size[1]])\r\n return slices",
"def calc_volume(self:Tensor):\n x,y,z = 1,1,1\n voxel_size = x*y*z\n self.volume = {'background': self._calc_vol_per_class(0, voxel_size)}\n self.volume['total_mask_volume'] = self.size(0)*self.size(1)*self.size(2)*voxel_size - self.volume['background']\n for c in self.unique()[1:]:\n name = 'class '+str(int(c))\n self.volume[name] = self._calc_vol_per_class(c, voxel_size)\n #print(self.volume)\n return self.volume[\"class 1\"]",
"def zSlice(self, k):\n vslice = vtk.vtkImageDataGeometryFilter()\n vslice.SetInputData(self.imagedata())\n nx, ny, nz = self.imagedata().GetDimensions()\n if k>nz-1:\n k=nz-1\n vslice.SetExtent(0,nx, 0,ny, k,k)\n vslice.Update()\n return Mesh(vslice.GetOutput())",
"def test3_generate_volume(self):\n\n radius = 0.4\n center = 0.5 * np.ones(3)\n\n\n x_ax = np.linspace(0, 1, 3)\n y_ax = np.linspace(0, 1, 3)\n z_ax = np.linspace(0, 1, 3)\n\n c = Sphere(center, radius)\n subvol = c.generate_volume(x_ax, y_ax, z_ax)\n\n arr = np.zeros((3,3,3))\n arr[1,1,1] = 1\n\n self.assertTrue(np.allclose(arr, subvol))",
"def test_get_slice_dense(self):\n config.session.execute(\"TRUNCATE TABLE hecuba.istorage\")\n config.session.execute(\"DROP KEYSPACE IF EXISTS hecuba_dislib\")\n\n bn, bm = 5, 5\n x = np.random.randint(100, size=(30, 30))\n ds_data = ds.array(x=x, block_size=(bn, bm))\n data = ds.array(x=x, block_size=(bn, bm))\n data.make_persistent(name=\"hecuba_dislib.test_array\")\n\n slice_indices = [(7, 22, 7, 22), # many row-column\n (6, 8, 6, 8), # single block row-column\n (6, 8, None, None), # single-block rows, all columns\n (None, None, 6, 8), # all rows, single-block columns\n (15, 16, 15, 16), # single element\n # (-10, -5, -10, -5), # out-of-bounds (not\n # implemented)\n # (-10, 5, -10, 5), # out-of-bounds (not implemented)\n (21, 40, 21, 40)] # out-of-bounds (correct)\n\n for top, bot, left, right in slice_indices:\n got = data[top:bot, left:right].collect()\n expected = ds_data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))\n\n # Try slicing with irregular array\n x = data[1:, 1:]\n data = ds_data[1:, 1:]\n\n for top, bot, left, right in slice_indices:\n got = x[top:bot, left:right].collect()\n expected = data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))",
"def resize_slice_wise(volume, slice_shape, interpolation_method=cv2.INTER_AREA):\n slices = list(volume)\n for i in range(len(slices)):\n slices[i] = cv2.resize(slices[i], slice_shape, interpolation=interpolation_method)\n return np.array(slices)",
"def _chopped_volume_default(self):\n grid = self.grid\n grid.trait_set(x_max=self.slicePosition[1])\n\n volume = mlab.pipeline.volume(\n grid,\n figure=self.vscene3d.mayavi_scene,\n vmin=self.dataRange[0],\n vmax=self.dataRange[1]\n )\n\n volume._otf = self.otf\n volume._volume_property.set_scalar_opacity(self.otf)\n\n return volume",
"def spectral_cubeslice(cubefile='orion_13co.combine.fits', ralim=['5h37m30s',\n'5h34m30s'], declim=['-6d43m00s', '-5d54m00s'], \nvlim=[0*u.km/u.s, 20.*u.km/u.s]):\n from spectral_cube import spectralcube\n #if cubefile is a fits file, read it in with spectralcube. if cubefile is already\n #a cube object, bypass the read.\n\n try:\n cube = spectralcube.read(cubefile)\n except (typeerror, valueerror):\n cube = cubefile\n pass\n\n try:\n subcube = cube.spectral_slab(vlim[0], vlim[1])\n ra = subcube.world[0,0,:][2] \n dec = subcube.world[0,:,0][1] \n except (attributeerror):\n subcube = cube\n pass\n\n ra = subcube.world[0,0,:][2] \n dec = subcube.world[0,:,0][1] \n #find indices that correspond to the requested ra,dec,v ranges.\n clo = skycoord(ra=ralim[0], dec=declim[0])\n chi = skycoord(ra=ralim[1], dec=declim[1])\n\n iira = np.where((ra < clo.ra) & (ra > chi.ra))[0] \n iidec = np.where((dec > clo.dec) & (dec < chi.dec))[0]\n \n return subcube[:,iidec[0]:iidec[-1],iira[0]:iira[-1]]",
"def cubeslice(cubefile='orion_13co.combine.fits', ralim=['5h37m30s', '5h34m30s'], declim=['-6d43m00s', '-5d54m00s'], \nvlim=[0*u.km/u.s, 20.*u.km/u.s], ra_axis=2, dec_axis=1, v_axis=0):\n f = pyfits.open(cubefile)\n head = f[0].header\n #first dimension of f[0].data represents different polarizations, this data\n #only has 1.\n data = f[0].data[0] \n f.close()\n\n rastep, decstep, vstep = head['cdelt1']*u.deg, head['cdelt2']*u.deg, head['cdelt3']*u.m/u.s\n\n rarefpix, decrefpix, vrefpix = head['crpix1'], head['crpix2'], head['crpix3']\n\n raref, decref, vref = head['crval1']*u.deg, head['crval2']*u.deg, head['crval3']*u.m/u.s\n\n ran, decn, vn = head['naxis1'], head['naxis2'], head['naxis3']\n\n ra = np.linspace(raref.value - rastep.value*(rarefpix-1),\n raref.value + rastep.value*(ran-rarefpix), num=ran)*u.deg\n dec = np.linspace(decref.value - decstep.value*(decrefpix-1),\n decref.value + decstep.value*(decn-decrefpix), num=decn)*u.deg\n v = np.linspace(vref.value - vstep.value*(vrefpix-1),\n vref.value + vstep.value*(vn-vrefpix), num=vn)*u.m/u.s\n \n #find indices that correspond to the requested ra,dec,v ranges.\n clo = skycoord(ra=ralim[0], dec=declim[0])\n chi = skycoord(ra=ralim[1], dec=declim[1])\n\n iira = np.where((ra < clo.ra) & (ra > chi.ra))[0] \n iidec = np.where((dec > clo.dec) & (dec < chi.dec))[0]\n iiv = np.where((v >= vlim[0]) & (v <= vlim[1]))[0]\n print iira[0], iidec, iiv\n print type(iira) \n return data[iiv[0]:iiv[-1],iidec[0]:iidec[-1],iira[0]:iira[-1]]",
"def build_slices(self) -> list:\n slices = []\n channels, i_z, i_y, i_x = self._image_shape\n k_c, k_z, k_y, k_x = self._patch_size\n s_c, s_z, s_y, s_x = self._step\n z_steps = SliceBuilder.gen_indices(i_z, k_z, s_z)\n for z in z_steps:\n y_steps = SliceBuilder.gen_indices(i_y, k_y, s_y)\n for y in y_steps:\n x_steps = SliceBuilder.gen_indices(i_x, k_x, s_x)\n for x in x_steps:\n slice_idx = (\n slice(z, z + k_z),\n slice(y, y + k_y),\n slice(x, x + k_x)\n )\n if len(self._image_shape) == 4:\n slice_idx = (slice(0, channels),) + slice_idx\n slices.append(slice_idx)\n\n self._slices = slices\n\n return slices",
"def test1_find_sphere_subvolume_ix(self):\n\n\n radius = 0.5\n center = 0.5 * np.ones(3)\n c = Sphere(center, radius)\n\n x_ax = np.linspace(0, 1, 10)\n y_ax = np.linspace(0, 1, 10)\n z_ax = np.linspace(0, 1, 10)\n\n x_ax_subvol_ix, y_ax_subvol_ix, z_ax_subvol_ix = \\\n c.find_subvolume_ix(x_ax, y_ax, z_ax)\n\n self.assertTrue(np.allclose(x_ax, x_ax[x_ax_subvol_ix]))\n self.assertTrue(np.allclose(y_ax, y_ax[x_ax_subvol_ix]))\n self.assertTrue(np.allclose(z_ax, z_ax[x_ax_subvol_ix]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate the ROITree from a single roi. This roi is not required to be aligned with the atomic block size. If it is not aligned, a roi will partially cover the volume.
|
def from_roi(cls, roi: RegionOfInterest, factor: Cartesian,
atomic_block_size: Cartesian, atomic_voxel_size: Cartesian):
pass
# assert roi.voxel_size % atomic_voxel_size == Cartesian(0, 0, 0)
# assert roi.voxel_size // atomic_voxel_size % factor == Cartesian(0, 0, 0)
# if roi.voxel_size == atomic_voxel_size:
# # this is the leaf roi/block
# return cls(roi, None, None, None)
# # find the relatively longest axis to split
# children_voxel_size = roi.voxel_size // factor
# block_nums = roi.physical_size / (children_voxel_size * )
# block_nums = np.ceil(block_nums)
# axis = np.argmax(block_nums)
# # split along axis
# left_start = roi.start * factor
# left_block_nums =
# left_stop = left_start +
# left_roi = RegionOfInterest()
# left = cls.from_roi(left_roi, factor, atomic_block_size, atomic_voxel_size)
|
[
"def set_roi(self, roi):\n with h5py.File(self.data_file, 'r+') as f:\n if 'roi' not in f:\n roigrp = f.create_group('roi')\n else:\n roigrp = f['roi']\n roigrp.create_dataset('roi{}'.format(self._next_roi_idx), data=np.asarray(roi), compression='lzf')",
"def user_roi(self, roi):\n # find which ROI was dragged\n i = 0\n for j, r in enumerate(self.rois):\n if r.roi == roi:\n i = j\n break\n x0, y0 = roi.pos() # lower left corner of bounding rectangle\n w, h = roi.size() # widths\n # note: setting the origin as bottom left but the image has origin top left\n xc, yc = int(x0 + w//2), int(y0 + h//2) # centre\n self.stats['ROIs'][i] = [xc, yc, int(w), int(h), r.t] # should never be indexerror\n r.label.setPos(x0, y0)\n r.w, r.h = int(w), int(h)\n r.translate_mask(xc, yc)\n self.replot_rois() # updates image analysis windows\n self.reset_table() # diplays ROI in table",
"def build_roi_box_head():\n return ROIBoxHead()",
"def create_rois(self):\n viewbox = self.im_canvas.getViewBox()\n for i, mw in enumerate(self.mw[:self._a+1]):\n j = i // self._m\n try: \n x, y, w, h, t = self.stats['ROIs'][j] # xc, yc, width, height, threshold\n except IndexError as e:\n error('Not enough ROIs for main windows: %s\\n'%j+str(e))\n self.stats['ROIs'].append([1,1,1,1,1])\n x, y, w, h, t = 1, 1, 1, 1, 1\n if not i % self._m: # for the first window in each set of _m\n try:\n self.rois[j].roi.show()\n self.rois[j].label.show()\n self.rois[j].resize(x, y, w, h)\n self.rois[j].t = t\n except IndexError: # make a new ROI \n self.rois.append(ROI((self.stats['pic_width'], self.stats['pic_height']), x, y, w, h, t, ID=j))\n self.rois[j].roi.sigRegionChangeFinished.connect(self.user_roi) \n self.rois[j].roi.setZValue(10) # make sure the ROI is drawn above the image\n viewbox.addItem(self.rois[j].roi)\n viewbox.addItem(self.rois[j].label)\n mw.roi.setSize((w, w)) # triggers user_roi. Must set width first.\n mw.roi.setPos(x - w//2, y - w//2) # triggers user_roi\n mw.bias_offset_edit.setText(str(self.stats['bias']))\n for j in range(len(self.mw[:self._a+1])//self._m, len(self.rois)):\n self.rois[j].roi.hide() # remove extra ROIs\n self.rois[j].label.hide()",
"def _generate_right_tree(self):\n right_tree = BinaryTree()\n right_tree.deserialize([0,None,1,None,2,None,3])\n\n return right_tree",
"def get_roi(self, idx=None):\n if idx is None:\n idx = self._latest_roi_idx\n if idx is None:\n return None\n\n with h5py.File(self.data_file, 'r') as f:\n roigrp = f['roi']\n _roi = ROI(roigrp['roi{}'.format(int(idx))])\n\n return _roi",
"def legalize_roi(\n roi,\n camera_type='edge 4.2',\n current_roi=None,\n verbose=True):\n left = roi.get('left')\n right = roi.get('right')\n bottom = roi.get('bottom')\n top = roi.get('top')\n if verbose:\n print(\" Requested camera ROI:\")\n print(\" From pixel\", left, \"to pixel\", right, \"(left/right)\")\n print(\" From pixel\", top, \"to pixel\", bottom, \"(up/down)\")\n min_lr, min_ud = 1, 1\n if camera_type == 'edge 4.2':\n min_width, min_height = 40, 10\n max_lr, max_ud, step_lr, = 2060, 2048, 20\n elif camera_type == 'edge 5.5':\n min_width, min_height = 160, 10\n max_lr, max_ud, step_lr = 2560, 2160, 160\n elif camera_type == 'pixelfly':\n min_width, min_height = 1392, 1040\n max_lr, max_ud, step_lr = 1392, 1040, 1392\n elif camera_type == 'panda 4.2':\n # TODO min_width can be set to 32 when we upgrade the firmware on\n # old pandas.\n min_width, min_height = 192, 10\n max_lr, max_ud, step_lr = 2048, 2048, 32\n if current_roi is None:\n current_roi = {'left': min_lr, 'right': max_lr,\n 'top': min_ud, 'bottom': max_ud}\n \"\"\"\n Legalize left/right\n \"\"\"\n if left is None and right is None:\n \"\"\"\n User isn't trying to change l/r ROI; use existing ROI.\n \"\"\"\n left, right = current_roi['left'], current_roi['right']\n elif left is not None:\n \"\"\"\n 'left' is specified, 'left' is the master.\n \"\"\"\n if left < min_lr: #Legalize 'left'\n left = min_lr\n elif left > max_lr - min_width + 1:\n left = max_lr - min_width + 1\n else:\n left = 1 + step_lr*((left - 1) // step_lr)\n if right is None: #Now legalize 'right'\n right = current_roi['right']\n if right < left + min_width - 1:\n right = left + min_width - 1\n elif right > max_lr:\n right = max_lr\n else:\n right = left - 1 + step_lr*((right - (left - 1)) // step_lr)\n else:\n \"\"\"\n 'left' is unspecified, 'right' is specified. 'right' is the master.\n \"\"\"\n if right > max_lr: #Legalize 'right'\n right = max_lr\n elif right < min_lr - 1 + min_width:\n right = min_width\n else:\n right = step_lr * (right // step_lr)\n left = current_roi['left'] #Now legalize 'left'\n if left > right - min_width + 1:\n left = right - min_width + 1\n elif left < min_lr:\n left = min_lr\n else:\n left = right + 1 - step_lr * ((right - (left - 1)) // step_lr)\n assert min_lr <= left < left + min_width - 1 <= right <= max_lr\n \"\"\"\n Legalize top/bottom\n \"\"\"\n if top is None and bottom is None:\n \"\"\"\n User isn't trying to change u/d ROI; use existing ROI.\n \"\"\"\n top, bottom = current_roi['top'], current_roi['bottom']\n elif top is not None:\n \"\"\"\n 'top' is specified, 'top' is the master.\n \"\"\"\n if top < min_ud: #Legalize 'top'\n top = min_ud\n if top > (max_ud - min_height)//2 + 1:\n top = (max_ud - min_height)//2 + 1\n bottom = max_ud - top + 1 #Now bottom is specified\n else:\n \"\"\"\n 'top' is unspecified, 'bottom' is specified, 'bottom' is the\n master.\n \"\"\"\n if bottom > max_ud: #Legalize 'bottom'\n bottom = max_ud\n if bottom < (max_ud + min_height)//2:\n bottom = (max_ud + min_height)//2\n top = max_ud - bottom + 1 #Now 'top' is specified\n assert min_ud <= top < top + min_height - 1 <= bottom <= max_ud\n new_roi = {'left': left, 'top': top, 'right': right, 'bottom': bottom}\n if verbose and new_roi != roi:\n print(\" ***Requested ROI must be adjusted to match the camera***\")\n return new_roi",
"def raveio2radar(rio, raw=False):\n\n # create metadata retrieval object\n # TODO default mappings, metadata, etc\n # TODO proper Py-ART reading (file_field_name, etc)\n filemetadata = FileMetadata('odim_h5')\n\n # determine some key parameters\n if rio.objectType is _rave.Rave_ObjectType_SCAN:\n nsweeps = 1\n first_scan = rio.object\n elif rio.objectType is _rave.Rave_ObjectType_PVOL:\n nsweeps = rio.object.getNumberOfScans()\n first_scan = rio.object.getScan(0)\n else:\n raise TypeError(\n \"Unsupported object, only SCANs and PVOLs supported.\")\n rays_per_sweep = _collect_attrs(rio, 'nrays')\n total_rays = np.sum(rays_per_sweep)\n bins_per_sweep = np.array(_collect_attrs(rio, 'nbins'))\n max_bins = np.max(bins_per_sweep) # maximim number of bins in any sweep.\n #if np.any(bins_per_sweep != max_bins):\n # TODO fix to support non-uniform number of bins with masking\n # raise NotImplementedError('Non-uniform bins not supported yet')\n\n # latitude, longitude and altitude\n latitude = filemetadata('latitude')\n longitude = filemetadata('longitude')\n altitude = filemetadata('altitude')\n latitude['data'] = np.array([first_scan.latitude * rd])\n longitude['data'] = np.array([first_scan.longitude * rd])\n altitude['data'] = np.array([first_scan.height])\n\n # metadata\n metadata = filemetadata('metadata')\n metadata['source'] = first_scan.source\n metadata['original_container'] = 'odim_h5'\n\n # sweep_start_ray_index, sweep_end_ray_index\n # Not to be confused with where/a1gate\n sweep_start_ray_index = filemetadata('sweep_start_ray_index')\n sweep_end_ray_index = filemetadata('sweep_end_ray_index')\n sweep_start_ray_index['data'] = np.cumsum(\n np.append([0], rays_per_sweep[:-1])).astype('int32')\n sweep_end_ray_index['data'] = np.cumsum(\n rays_per_sweep).astype('int32') - 1\n\n # sweep_number\n sweep_number = filemetadata('sweep_number')\n sweep_number['data'] = np.arange(nsweeps, dtype='int32')\n\n # sweep_mode\n sweep_mode = filemetadata('sweep_mode')\n sweep_mode['data'] = np.array(nsweeps * ['azimuth_surveillance'])\n\n # scan_type\n scan_type = 'ppi'\n\n # fixed_angle, elevation\n sweep_el = np.array(_collect_attrs(rio, 'elangle')) * rd\n fixed_angle = filemetadata('fixed_angle')\n elevation = filemetadata('elevation')\n fixed_angle['data'] = np.array(sweep_el, dtype='float32')\n # A better solution is to use the elevation angles for each ray if\n # available in how/startelA, how/stopelA in ODIM_H5 v2.2\n elevation['data'] = np.repeat(sweep_el, rays_per_sweep).astype('float32')\n\n # range\n # Check that gate spacing is constant for all scans.\n # The Py-ART Radar object does not support radar data where the\n # gate spacing is not constant for all radials.\n # Data of this type raises a TypeError exception.\n rscales = np.array(_collect_attrs(rio, 'rscale'))\n rstarts = np.array(_collect_attrs(rio, 'rstart'))\n if np.any(rscales[0] != rscales) or np.any(rstarts != rstarts[0]):\n raise TypeError(\n \"Py-ART cannot handle volumes containing scans with\",\n \"different (bin) gate spacings.\")\n # This is a generalization, but we'll live with it.\n _range = filemetadata('range')\n _range['data'] = (np.arange(max_bins, dtype='float32') * rscales[0] +\n rstarts[0])\n _range['meters_to_center_of_first_gate'] = float(rstarts[0])\n _range['meters_between_gates'] = float(rscales[0])\n\n # azimuth\n # azimuth angle for all rays collected in the volume\n azimuth = filemetadata('azimuth')\n az_data = np.ones((total_rays, ), dtype='float32')\n # loop over the sweeps, store the starting azimuth angles.\n # an average of the startazA and stopazA would probably be a better\n # estimate, but the discontinuity between 0 and 360 would need to be\n # addressed. This is attempted if startazA is available.\n start = 0\n if rio.objectType is _rave.Rave_ObjectType_SCAN:\n if 'how/startazA' in first_scan.getAttributeNames():\n sweep_az = first_scan.getAttribute('how/startazA')\n sweep_az = np.where(np.greater(sweep_az, 360.0),\n sweep_az-360.0, sweep_az)\n az_data[start:start+first_scan.nrays] = sweep_az\n else:\n az_data = np.arange(first_scan.nrays)+(360./first_scan.nrays/2)\n\n elif rio.objectType is _rave.Rave_ObjectType_PVOL:\n for s in range(nsweeps):\n scan = rio.object.getScan(s)\n if 'how/startazA' in scan.getAttributeNames():\n sweep_az = scan.getAttribute('how/startazA')\n sweep_az = np.where(np.greater(sweep_az, 360.0),\n sweep_az-360.0, sweep_az)\n az_data[start:start+scan.nrays] = sweep_az\n start += scan.nrays\n else:\n az_data[start:start+scan.nrays] = (\n np.arange(scan.nrays)+(360./scan.nrays/2))\n azimuth['data'] = az_data\n\n # time\n # Since startazT and stopazT do not appear to be present in all files\n # and the startepochs and endepochs attributes appear the same for\n # each sweep, just interpolate between these values.\n # XXX This is does not seem correct.\n _time = filemetadata('time')\n attrnames = first_scan.getAttributeNames()\n if 'how/startepochs' in attrnames and 'how/stopepochs' in attrnames:\n start_epoch = first_scan.getAttribute('how/startepochs')\n end_epoch = first_scan.getAttribute('how/stopepochs')\n else:\n start_epoch = time.mktime(datetime.datetime.strptime(\n first_scan.startdate+first_scan.starttime,\n \"%Y%m%d%H%M%S\").timetuple())\n end_epoch = time.mktime(datetime.datetime.strptime(\n first_scan.enddate+first_scan.endtime, \"%Y%m%d%H%M%S\").timetuple())\n start_time = datetime.datetime.utcfromtimestamp(start_epoch)\n delta_sec = end_epoch - start_epoch\n _time['units'] = make_time_unit_str(start_time)\n _time['data'] = np.linspace(0, delta_sec, total_rays).astype('float32')\n\n # fields\n # This assumes that all fields are available in all scans and that\n # the quantities are ordered the same way in each scan.\n # This may not always be true and could cause issues. XXX\n fields = {}\n rave_field_names = first_scan.getParameterNames()\n # loop over the fields, create a field dictionary for each field\n for i, rave_field_name in enumerate(rave_field_names):\n # Assumes the same dtype for each quantity. Potentially dangerous.\n field_data = np.ma.zeros((total_rays, max_bins), dtype='float32')\n start = 0\n # loop over the sweeps, copy data into correct location of data array\n if rio.objectType is _rave.Rave_ObjectType_SCAN:\n sweep_data = _get_scan_data(first_scan, rave_field_name, raw)\n field_data[start:start + first_scan.nrays] = sweep_data[:]\n elif rio.objectType is _rave.Rave_ObjectType_PVOL:\n for i in range(nsweeps):\n scan = rio.object.getScan(i)\n sweep_data = _get_scan_data(scan, rave_field_name, raw)\n field_data[start:start+scan.nrays, :scan.nbins] = sweep_data[:]\n field_data[start:start+scan.nrays, scan.nbins:] = np.ma.masked\n start += scan.nrays\n field_dic = filemetadata(rave_field_name)\n field_dic['data'] = field_data\n fields[rave_field_name] = field_dic\n\n # instrument_parameters\n beam_width_h = filemetadata.get_metadata('radar_beam_width_h')\n beam_width_h['data'] = np.array([first_scan.beamwidth * rd],\n dtype='float32')\n # TODO unambiguous_range (nyquist), etc\n instrument_parameters = {'radar_beam_width_h': beam_width_h}\n\n return Radar(\n _time, _range, fields, metadata, scan_type,\n latitude, longitude, altitude,\n sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,\n sweep_end_ray_index,\n azimuth, elevation,\n instrument_parameters=instrument_parameters)",
"def get_roi(self, frame, roi_size_1d, offset):\n return frame[self.y + offset[0] - roi_size_1d:self.y + offset[0] + roi_size_1d + 1,\n self.x + offset[1] - roi_size_1d:self.x + offset[1] + roi_size_1d + 1]",
"def labelvol_to_rtstruct(roi_vol,\r\n aff,\r\n refdcm_file,\r\n filename,\r\n ordered_slices,\r\n uid_base = '1.2.826.0.1.3680043.9.7147.',\r\n seriesDescription = 'test rois',\r\n structureSetLabel = 'RTstruct',\r\n structureSetName = 'my rois',\r\n connect_holes = True,\r\n roinames = None,\r\n roidescriptions = None,\r\n roigenerationalgs = None,\r\n roi_colors = [['255','0','0'], ['0', '0','255'],['0', '255','0'],\r\n ['255','0','255'],['255','255','0'],['0','255','255']],\r\n tags_to_copy = ['PatientName','PatientID','AccessionNumber','StudyID',\r\n 'StudyDescription','StudyDate','StudyTime',\r\n 'SeriesDate','SeriesTime'],\r\n tags_to_add = None):\r\n\r\n roinumbers = np.unique(roi_vol)\r\n roinumbers = roinumbers[roinumbers > 0]\r\n nrois = len(roinumbers)\r\n\r\n if isinstance(refdcm_file, list):\r\n refdcm = pydicom.read_file(refdcm_file[0]) \r\n else:\r\n refdcm = pydicom.read_file(refdcm_file) \r\n \r\n file_meta = pydicom.Dataset()\r\n \r\n file_meta.ImplementationClassUID = uid_base + '1.1.1'\r\n file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3'\r\n file_meta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid('1.2.840.10008.5.1.4.1.1.481.3.')\r\n \r\n ds = pydicom.FileDataset(filename, {}, file_meta = file_meta, preamble=b\"\\0\" * 128)\r\n\r\n ds.Modality = 'RTSTRUCT'\r\n ds.SeriesDescription = seriesDescription\r\n\r\n #--- copy dicom tags from reference dicom file\r\n for tag in tags_to_copy:\r\n if tag in refdcm:\r\n setattr(ds,tag, refdcm.data_element(tag).value)\r\n else:\r\n warnings.warn(tag + ' not in reference dicom file -> will not be written')\r\n\r\n \r\n ds.StudyInstanceUID = refdcm.StudyInstanceUID\r\n ds.SeriesInstanceUID = pydicom.uid.generate_uid(uid_base)\r\n \r\n ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3'\r\n ds.SOPInstanceUID = pydicom.uid.generate_uid(uid_base)\r\n \r\n ds.StructureSetLabel = structureSetLabel\r\n ds.StructureSetName = structureSetName\r\n\r\n dfr = pydicom.Dataset()\r\n dfr.FrameOfReferenceUID = refdcm.FrameOfReferenceUID\r\n \r\n ds.ReferencedFrameOfReferenceSequence = pydicom.Sequence([dfr])\r\n\r\n if tags_to_add is not None: \r\n for tag, value in tags_to_add.items():\r\n setattr(ds, tag, value)\r\n \r\n #######################################################################\r\n #######################################################################\r\n # write the ReferencedFrameOfReferenceSequence\r\n \r\n contourImageSeq = pydicom.Sequence()\r\n\r\n if isinstance(refdcm_file, list):\r\n # in case we got all reference dicom files we add all SOPInstanceUIDs\r\n # otherwise some RT planning systems refuse to read the RTstructs\r\n for fname in refdcm_file:\r\n with pydicom.read_file(fname) as tmpdcm:\r\n tmp = pydicom.Dataset()\r\n tmp.ReferencedSOPClassUID = tmpdcm.SOPClassUID\r\n tmp.ReferencedSOPInstanceUID = tmpdcm.SOPInstanceUID\r\n contourImageSeq.append(tmp) \r\n else: \r\n tmp = pydicom.Dataset()\r\n tmp.ReferencedSOPClassUID = refdcm.SOPClassUID\r\n tmp.ReferencedSOPInstanceUID = refdcm.SOPInstanceUID\r\n contourImageSeq.append(tmp) \r\n \r\n tmp2 = pydicom.Dataset()\r\n tmp2.SeriesInstanceUID = refdcm.SeriesInstanceUID\r\n tmp2.ContourImageSequence = contourImageSeq\r\n \r\n tmp3 = pydicom.Dataset()\r\n tmp3.ReferencedSOPClassUID = '1.2.840.10008.3.1.2.3.1' \r\n # TODO SOP just copied from MIM rtstructs \r\n tmp3.ReferencedSOPInstanceUID = refdcm.StudyInstanceUID\r\n tmp3.RTReferencedSeriesSequence = pydicom.Sequence([tmp2])\r\n \r\n tmp4 = pydicom.Dataset()\r\n tmp4.FrameOfReferenceUID = refdcm.FrameOfReferenceUID\r\n tmp4.RTReferencedStudySequence = pydicom.Sequence([tmp3])\r\n \r\n ds.ReferencedFrameOfReferenceSequence = pydicom.Sequence([tmp4])\r\n \r\n #######################################################################\r\n #######################################################################\r\n \r\n ds.StructureSetROISequence = pydicom.Sequence()\r\n ds.ROIContourSequence = pydicom.Sequence()\r\n \r\n if roinames is None: roinames = ['ROI-' + str(x) for x in roinumbers]\r\n if roidescriptions is None: roidescriptions = ['ROI-' + str(x) for x in roinumbers]\r\n if roigenerationalgs is None: roigenerationalgs = len(roinumbers) * ['MANUAL']\r\n\r\n # loop over the ROIs\r\n for iroi, roinumber in enumerate(roinumbers):\r\n dssr = pydicom.Dataset()\r\n dssr.ROINumber = roinumber\r\n dssr.ROIName = roinames[iroi]\r\n dssr.ROIDescription = roidescriptions[iroi]\r\n dssr.ROIGenerationAlgorithm = roigenerationalgs[iroi]\r\n dssr.ReferencedFrameOfReferenceUID = dfr.FrameOfReferenceUID\r\n \r\n ds.StructureSetROISequence.append(dssr)\r\n \r\n #######################################################################\r\n #######################################################################\r\n # write ROIContourSequence containing the actual 2D polygon points of the ROI\r\n\r\n ds_contour = pydicom.Dataset()\r\n ds_contour.ReferencedSOPClassUID = refdcm.SOPClassUID ###\r\n\r\n\r\n # generate binary volume for the current ROI\r\n bin_vol = (roi_vol == dssr.ROINumber).astype(int)\r\n \r\n # find the bounding box in the last direction\r\n ob_sls = find_objects(bin_vol)\r\n z_start = min([x[2].start for x in ob_sls]) \r\n z_end = max([x[2].stop for x in ob_sls]) \r\n \r\n ds_roi_contour = pydicom.Dataset()\r\n ds_roi_contour.ROIDisplayColor = roi_colors[iroi % len(roi_colors)]\r\n ds_roi_contour.ReferencedROINumber = dssr.ROINumber\r\n ds_roi_contour.ContourSequence = pydicom.Sequence()\r\n \r\n # loop over the slices in the 2 direction to create 2D polygons \r\n for sl in np.arange(z_start, z_end): \r\n bin_slice = bin_vol[:,:,sl]\r\n\r\n\r\n if bin_slice.max() > 0:\r\n contours = binary_2d_image_to_contours(bin_slice, connect_holes = connect_holes)\r\n \r\n for ic in range(len(contours)):\r\n npoints = contours[ic].shape[0]\r\n \r\n contour = np.zeros((npoints,3))\r\n \r\n for ipoint in range(npoints):\r\n contour[ipoint,:] = (aff @ np.concatenate((contours[ic][ipoint,:],[sl,1])))[:-1] \r\n \r\n dsci = pydicom.Dataset()\r\n dsci.ReferencedSOPInstanceUID = ordered_slices [sl][0]\r\n dsci.ContourGeometricType = 'CLOSED_PLANAR'\r\n dsci.NumberOfContourPoints = contour.shape[0]\r\n dsci.ContourImageSequence = pydicom.Sequence([ds_contour])\r\n dsci.ContourData = contour.flatten().tolist()\r\n \r\n # ContourImageSequence contains 1 element per 2D contour\r\n ds_roi_contour.ContourSequence.append(dsci)\r\n \r\n # has to contain one element per ROI\r\n ds.ROIContourSequence.append(ds_roi_contour)\r\n \r\n #######################################################################\r\n #######################################################################\r\n \r\n pydicom.filewriter.write_file(os.path.join('.',filename), \r\n ds, write_like_original = False)\r\n\r\n return z_start,z_end",
"def user_roi(self, roi):\n # find which ROI was dragged\n for r in self.rh.ROIs:\n if r.roi == roi:\n break\n x0, y0 = roi.pos() # lower left corner of bounding rectangle\n w, h = map(int, roi.size()) # width, height\n xc, yc = int(x0 + w//2), int(y0 + h//2) # centre of ROI\n r.w, r.h = w, h\n r.label.setPos(x0, y0)\n r.translate_mask(xc, yc)\n for key, val in zip(r.edits.keys(), [xc, yc, w, h]):\n r.edits[key].setText(str(val))",
"def make_roi_grid(self, toggle=True, method=''):\n method = method if method else self.sender().text()\n pos, shape = self.rh.ROIs[0].roi.pos(), self.rh.ROIs[0].roi.size()\n if method == 'Single ROI':\n for r in self.rh.ROIs:\n r.resize(*map(int, [pos[0], pos[1], shape[0], shape[1]]))\n elif method == 'Square grid':\n n = len(self.rh.ROIs) # number of ROIs\n d = int((n - 1)**0.5 + 1) # number of ROIs per row\n X = int(self.rh.shape[0] / d) # horizontal distance between ROIs\n Y = int(self.rh.shape[1] / int((n - 3/4)**0.5 + 0.5)) # vertical distance\n for i in range(n): # ID of ROI\n try:\n newx, newy = int(X * (i%d + 0.5)), int(Y * (i//d + 0.5))\n if any([newx//self.rh.shape[0], newy//self.rh.shape[1]]):\n warning('Tried to set square ROI grid with (xc, yc) = (%s, %s)'%(newx, newy)+\n ' outside of the image')\n newx, newy = 0, 0\n self.rh.ROIs[i].resize(*map(int, [newx, newy, 1, 1]))\n except ZeroDivisionError as e:\n error('Invalid parameters for square ROI grid: '+\n 'x - %s, y - %s, pic size - %s, roi size - %s.\\n'%(\n pos[0], pos[1], self.rh.shape[0], (shape[0], shape[1]))\n + 'Calculated width - %s, height - %s.\\n'%(X, Y) + str(e))\n elif method == '2D Gaussian masks':\n try: \n im = self.im_canvas.image.copy() - self.rh.bias\n if np.size(np.shape(im)) == 2:\n for r in self.rh.ROIs:\n r.create_gauss_mask(im) # fit 2D Gaussian to max pixel region\n # then block that region out of the image\n try:\n im[r.x-r.w : r.x+r.w+1, r.y-r.h:r.y+r.h+1] = np.zeros((2*r.w+1, 2*r.h+1)) + np.min(im)\n except (IndexError, ValueError): pass\n except AttributeError: pass",
"def set_roi(self, roi):\n if roi is not None:\n if len(roi) != 4:\n raise ValueError(\"ROI must be a list of four integers\")\n for x in roi:\n if not (isinstance(x, int) and x >= 0):\n raise ValueError(\"ROI must be a (x, y, w, h) tuple\")\n self.roi = roi",
"def roi_target(self,rois,gt_box,label,pos_thresh=cfg.rcnn_pos_thresh,\n neg_thresh_lo=cfg.rcnn_neg_thresh_lo,\n neg_thresh_hi=cfg.rcnn_neg_thresh_hi):\n assert rois.shape[1]==4, \"please remove the img_id\"\n rois=torch.cat([rois,gt_box],dim=0) # [a+b,4]\n \n ious=t_box_iou(rois,gt_box) # [a+b,b]\n max_ious,idx=ious.max(dim=1)\n \n # parameterizd box\n gt_loc=encode_box(gt_box[idx],rois)\n\n # assign the neg:\n assign=torch.full([len(rois)],-1).long().type_as(label)\n\n neg_mask=(max_ious>neg_thresh_lo)*(max_ious<neg_thresh_hi)\n # if neg_mask.sum() == 0:\n # tqdm.write(\"Warning: neg_roi for fast r-cnn is zero\",end=\" \")\n # neg_mask=(max_ious<neg_thresh_hi)\n # raise ValueError(\"there is no negative roi for fast r-cnn\")\n assign[neg_mask]=0\n \n # assign the pos:\n pos_mask=max_ious>pos_thresh\n\n # plus one since 0 denotes the neg, we must begin from the 1\n assign[pos_mask]=label[idx][pos_mask].long()+1 \n\n # normalize?\n mean=self.mean # [4]\n std=self.std # [4]\n\n mean=mean[None].expand_as(gt_loc).type_as(gt_loc)\n std=std[None].expand_as(gt_loc).type_as(gt_loc)\n\n gt_loc-=mean\n gt_loc=gt_loc/std\n\n return rois,gt_loc,assign",
"def full2roi(coords_full, roi):\n\n coords_roi = []\n for ii, c in enumerate(coords_full):\n coords_roi.append(c - roi[2*ii])\n\n return coords_roi",
"def roi_generator(self, requested_rois: Iterator[rectangle.Rectangle]) -> \\\n Iterator[Tuple[rectangle.Rectangle, np.ndarray, int, int]]:\n block_rois = copy.copy(requested_rois)\n\n whole_bounds = rectangle.Rectangle(0, 0, width=self.width(), height=self.height())\n for roi in requested_rois:\n if not whole_bounds.contains_rect(roi):\n raise Exception('Roi outside image bounds: ' + str(roi) + str(whole_bounds))\n\n # gdal doesn't work reading multithreading. But this let's a thread\n # take care of IO input while we do computation.\n jobs = []\n\n total_rois = len(block_rois)\n while block_rois:\n # For the next (output) block, figure out the (input block) aligned\n # data read that we need to perform to get it.\n read_roi = self.block_aligned_roi(block_rois[0])\n\n applicable_rois = []\n\n # Loop through the remaining ROIs and apply the callback function to each\n # ROI that is contained in the section we read in.\n index = 0\n while index < len(block_rois):\n\n if not read_roi.contains_rect(block_rois[index]):\n index += 1\n continue\n applicable_rois.append(block_rois.pop(index))\n\n jobs.append((read_roi, applicable_rois))\n\n # only do a few reads ahead since otherwise we will exhaust our memory\n pending = []\n exe = concurrent.futures.ThreadPoolExecutor(1)\n NUM_AHEAD = 2\n for i in range(min(NUM_AHEAD, len(jobs))):\n pending.append(exe.submit(functools.partial(self.read, jobs[i][0])))\n num_remaining = total_rois\n for (i, (read_roi, rois)) in enumerate(jobs):\n buf = pending.pop(0).result()\n for roi in rois:\n x0 = roi.min_x - read_roi.min_x\n y0 = roi.min_y - read_roi.min_y\n num_remaining -= 1\n yield (roi, buf[x0:x0 + roi.width(), y0:y0 + roi.height(), :],\n (total_rois - num_remaining, total_rois))\n if i + NUM_AHEAD < len(jobs):\n pending.append(exe.submit(functools.partial(self.read, jobs[i + NUM_AHEAD][0])))",
"def getROI(self) -> retval:\n ...",
"def make_roi_grid(self, toggle=True, method=''):\n newmasks = [] # list of masks to pass on to analysis windows\n for r in self.rois: # disconnect slot, otherwise signal is triggered infinitely\n reset_slot(r.roi.sigRegionChangeFinished, self.user_roi, False)\n method = method if method else self.sender().text()\n pos, shape = self.rois[0].roi.pos(), self.rois[0].roi.size()\n if method == 'Single ROI':\n for i in range(len(self.rois)):\n self.stats['ROIs'][i] = list(map(int, [pos[0], pos[1], shape[0], shape[1], self.stats['ROIs'][i][-1]]))\n self.rois[i].resize(*map(int, [pos[0], pos[1], shape[0], shape[1]]))\n elif method == 'Square grid':\n d = int((self._a - 1)**0.5 + 1) # number of ROIs per row\n X = int(self.stats['pic_width'] / d) # horizontal distance between ROIs\n Y = int(self.stats['pic_height'] / int((self._a - 3/4)**0.5 + 0.5)) # vertical distance\n for i in range(self._a // self._m): # ID of ROI\n try:\n newpos = [int(X * (i%d + 0.5)),\n int(Y * (i//d + 0.5))]\n if any([newpos[0]//self.stats['pic_width'], newpos[1]//self.stats['pic_height']]):\n warning('Tried to set square ROI grid with (xc, yc) = (%s, %s)'%(newpos[0], newpos[1])+\n ' outside of the image')\n newpos = [0,0]\n self.stats['ROIs'][i] = list(map(int, [newpos[0], newpos[1], shape[0], shape[1], self.stats['ROIs'][i][-1]]))\n self.rois[i].resize(*map(int, [newpos[0], newpos[1], 1, 1]))\n except ZeroDivisionError as e:\n error('Invalid parameters for square ROI grid: '+\n 'x - %s, y - %s, pic size - (%s, %s), roi size - %s.\\n'%(\n pos[0], pos[1], self.stats['pic_width'], self.stats['pic_height'], (shape[0], shape[1]))\n + 'Calculated width - %s, height - %s.\\n'%(X, Y) + str(e))\n elif method == '2D Gaussian masks':\n try: \n im = self.im_canvas.image.copy()\n if np.size(np.shape(im)) == 2:\n for i, r in enumerate(self.rois):\n r.create_gauss_mask(im) # fit 2D Gaussian to max pixel region\n # then block that region out of the image\n try:\n im[r.x-r.w : r.x+r.w+1, r.y-r.h:r.y+r.h+1] = np.zeros((2*r.w+1, 2*r.h+1)) + np.min(im)\n except (IndexError, ValueError): pass\n newmasks.append(r.mask)\n try:\n self.stats['ROIs'][i] = list(map(int, [r.x, r.y, r.w, r.h, self.stats['ROIs'][i][-1]]))\n except IndexError: \n self.stats['ROIs'].append(list(map(int, [r.x, r.y, r.w, r.h, 1])))\n except AttributeError: pass\n self.reset_table()\n self.replot_rois(newmasks)\n for r in self.rois: # reconnect slot\n reset_slot(r.roi.sigRegionChangeFinished, self.user_roi, True)",
"def preprocess_and_extract_roi(image, left=True):\n image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n if left:\n roi = image_gray[pos_y1:pos_y2, pos_left_x1:pos_left_x2]\n else:\n roi = image_gray[pos_y1:pos_y2, pos_right_x1:pos_right_x2]\n\n roi = cv2.resize(roi, dsize=(image_width, image_width), interpolation=cv2.INTER_CUBIC)\n roi = cv2.threshold(roi, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n return roi"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return column letter for given column.
|
def col_letter(col):
return chr(ord("A") + col - 1)
|
[
"def _get_column_name(self, column):\n return column",
"def _get_header_column_letter(self, title):\n return self._to_letter(self._get_header_index(title))",
"def _series_col_letter(self, series):\n start_col_ascii = ord('A') + series.categories.depth\n return chr(start_col_ascii + series.index)",
"def get_column_letter(col_idx):\n # these indicies corrospond to A -> ZZZ and include all allowed\n # columns\n if not 1 <= col_idx <= 18278:\n raise ValueError(\"Invalid column index {0}\".format(col_idx))\n letters = []\n while col_idx > 0:\n col_idx, remainder = divmod(col_idx, 26)\n # check for exact division and borrow if needed\n if remainder == 0:\n remainder = 26\n col_idx -= 1\n letters.append(chr(remainder+64))\n return ''.join(reversed(letters))",
"def get_column_letter(df, colname, index=True):\n if index:\n try:\n n_idx = len(df.index.levels)\n except AttributeError:\n n_idx = 1\n else:\n n_idx = 0\n\n i = df.columns.tolist().index(colname) + n_idx + 1\n return colnum_string(i)",
"def get_excel_column_index(column: str) -> int:\n\tindex = 0\n\tcolumn = column.upper()\n\tcolumn = column[::-1]\n\tfor i in range(len(column)):\n\t\tindex += ((ord(column[i]) % 65 + 1)*(26**i))\n\treturn index-1",
"def _get_column_header(self, column):\n return self.matrix[0][column]",
"def row_letter(self):\n return self.index_to_letter(self.row)",
"def _convert_column_to_number(\n col_letter: str\n) -> int:\n col_number = 0\n for c in col_letter:\n if c in string.ascii_letters:\n col_number = col_number * 26 + (ord(c.upper()) - ord('A')) + 1\n return col_number - 1",
"def priorCharCol(column, line):\n if app.config.strict_debug:\n assert isinstance(column, int)\n assert isinstance(line, unicode)\n if column == 0:\n return None\n priorColumn = 0\n for ch in line:\n width = charWidth(ch, priorColumn)\n if priorColumn + width >= column:\n return priorColumn\n priorColumn += width\n return None",
"def _get_column_name(self, column):\n if hasattr(column, '__clause_element__'):\n clause_element = column.__clause_element__()\n if not isinstance(clause_element, Column):\n msg = ('Column must be a string or a column attribute'\n ' of SQLAlchemy ORM class')\n raise TypeError(msg)\n model = column.class_\n if model is not self.model:\n msg = ('Cannot specify column of model %s'\n ' while creating API for model %s' % (\n model.__name__, self.model.__name__))\n raise ValueError(msg)\n return clause_element.key\n\n return column",
"def _xl_colname(idx):\n # Check idx argument for errors\n error_msg = \"Argument idx must be integer between 0 and 702\"\n if not isinstance(idx, int):\n raise TypeError(error_msg)\n if idx < 0 or idx >= 702:\n raise IndexError(error_msg)\n\n letters = list(string.ascii_uppercase)\n idx_1, idx_2 = divmod(idx, 26)\n if idx_1 ==0:\n return letters[idx_2]\n else:\n return letters[idx_1 - 1] + letters[idx_2]",
"def get_case(cls, piece, col):\n if len(col) == 2:\n return piece.upper()\n else:\n return piece.lower()",
"def GetColLabelValue(self, col):\n if len(self.dataframe):\n return self.dataframe.columns[col]\n return ''",
"def name (self):\n return self._column;",
"def getCol(self, pos):\n col = pos % self.numCols\n if col > 0:\n return col\n else:\n return 0",
"def get_column_name(self):\r\n columns = list(self.all_data.columns)\r\n # Note: Excludes Year, Month, Day\r\n columns.remove(self._year)\r\n columns.remove(self._month)\r\n columns.remove(self._day_of_week)\r\n index = 1\r\n for col in columns:\r\n print(f'{index}. {col}')\r\n index += 1\r\n \r\n col_number = int(input('Please select column number: '))\r\n while col_number not in [1, 2, 3, 4]:\r\n col_number = int(input('Please select column number: '))\r\n return columns[ col_number - 1]",
"def get_column(self, column, selection=None):\n \n self._check_column_valid(column)\n \n if (selection==None):\n return self._data[column]\n else:\n condition = [(column, selection[0], selection[1])]\n return self.select_given_all_true(condition, cols_to_select=[column])",
"def getColumnName(self, columnIndex): \n return self.columnNames[columnIndex]",
"def _get_col(self, string: str, print_col: int, delimiter: Optional[str]) -> str:\n if print_col == 0:\n return string\n else:\n delimited_str = string.split(delimiter)\n if print_col - 1 > len(delimited_str):\n # somewhat similar to awk behavior?\n # when the print col exceed the col number, awk return the entire string\n return string\n return delimited_str[print_col - 1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a list of variants for a product.
|
def get_product_variants(variants, sku):
product_variants = [
variant for variant in variants
if variant["Product SKU"] == sku and variant["Variant Enabled"] == "Y"
]
product_variants.sort(key=lambda variant: variant["Variant Sort"])
return product_variants
|
[
"def get_variants(self):\n return self.variants or []",
"def get_product_list(self):\n product_list = ProductModel.objects.in_bulk(self.keys())\n return product_list.values()",
"def get_products():",
"def get_all_variants():\n clean_expired_sessions()\n\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n # reads the maximum number of variants to return\n max_no_variants = request.args.get('max_no_variants', default=constants.MAX_NO_VARIANTS_TO_RETURN, type=int)\n\n logging.info(\"get_all_variants start session=\" + str(session) + \" process=\" + str(process))\n\n dictio = {}\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n parameters = {}\n parameters[\"max_no_variants\"] = int(max_no_variants)\n\n variants, log_summary = lh.get_handler_for_process_and_session(process, session).get_variant_statistics(\n parameters=parameters)\n dictio = {\"variants\": variants}\n for key in log_summary:\n dictio[key] = log_summary[key]\n logging.info(\n \"get_all_variants complete session=\" + str(session) + \" process=\" + str(process) + \" user=\" + str(user))\n\n ret = jsonify(dictio)\n\n return ret",
"async def getProductVariantsBySlug(self, slug=None, body=\"\"):\n payload = {}\n \n if slug:\n payload[\"slug\"] = slug\n \n # Parameter validation\n schema = CatalogValidator.getProductVariantsBySlug()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getProductVariantsBySlug\"], proccessed_params=\"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"slug\",\"description\":\"A short, human-readable, URL-friendly identifier of a product. You can get slug value from the endpoint /service/application/catalog/v1.0/products/\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"slug\",\"description\":\"A short, human-readable, URL-friendly identifier of a product. You can get slug value from the endpoint /service/application/catalog/v1.0/products/\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", slug=slug)\n query_string = await create_query_string(slug=slug)\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getProductVariantsBySlug\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/catalog/v1.0/products/{slug}/variants/\", slug=slug), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)",
"def products(self):\n return self.product_set.all()",
"def variants(self) -> QuerySet:\n return StoreItemVariant.objects.filter(item=self)",
"def each_product(self):\n for vendor in self.nvd_cve['cve']['affects']['vendor']['vendor_data']:\n for product in vendor['product']['product_data']:\n yield product",
"def get_substitutes_by_product(self, product, order_by=None, limit=None):\n cursor = db.cursor()\n cursor.execute(\n f\"SELECT id, name, url, nutriscore, description \"\n f\"FROM {self.table} \"\n f\"JOIN {models.Favorite.table} \"\n f\" ON substitute_id = id \"\n f\"WHERE {models.Product.table}_id = %(id)s\"\n f\"{self._format_order_by(order_by)}\"\n f\"{self._format_limit(limit)}\",\n vars(product),\n )\n results = [self.model(*row) for row in cursor]\n cursor.close()\n return results",
"def _variants(self, name):\n return sorted(name.variants, self._cmp_variant)",
"def variants(self) -> pd.DataFrame:\n return self._load_fetch(self.VARIANTS)",
"def get_versions(self, product, release=None):\n\n release_clause = f\" and release='{release}'\" if release else \"\"\n results = self.query_documents(\n 'build',\n where_clause=f\"product='{product}' {release_clause} and version IS NOT NULL\",\n doc_keys=['version'], distinct=True\n )\n\n return [result['version'] for result in results]",
"def with_variants(self):\n return self.prefetch_related('variants')",
"def products(self):\n from models.product import Product\n prods = models.storage.filter_by(Product, 'category', self.id)\n return prods",
"def generate_product_list(self) -> list:\n products = self._driver.find_elements(*RollsPageLocators.LOCATOR_PRODUCTS)\n for product in products:\n self.product_list.append(PropertyOfProduct(product))\n return self.product_list",
"def list(self, products):\n return self.call('cataloginventory_stock_item.list', [products])",
"def list(self):\n return self.call('catalog_product_type.list', [])",
"def get_variants(distro, stack_name):\n if stack_name == 'ROS':\n stack_name = 'ros'\n\n retval = []\n variants = distro.get('variants', {})\n \n for variant_d in variants:\n try:\n variant = variant_d.keys()[0]\n variant_props = variant_d[variant]\n if stack_name in variant_props['stacks']:\n retval.append(variant)\n elif 'extends' in variant_props and variant_props['extends'] in retval:\n retval.append(variant) \n except:\n pass\n return retval",
"def query_variants(self, chrom, start, end):\n variant_list = []\n req_body = {\n 'datasetId' : self.dataset_id,\n 'start': start,\n 'end': end,\n 'referenceName': chrom\n }\n r = requests.post('%s%s' % (self.host_url, 'variants/search'), json=req_body).json()\n for variant in r['results']['variants']:\n variant_list.append(':'.join([chrom, variant['start'], variant['end']]))\n return variant_list",
"def available_products():\n return jsonify(Product.get_gift_repository(get_db()))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add row for each product.
|
def add_products(args, worksheet, row, cc_browser, products):
# Add header row.
set_cell(
worksheet,
row,
COL_ITEM_NO,
"Item No",
font_bold=True,
alignment_horizontal="right"
)
set_cell(worksheet, row, COL_DESCRIPTION, "Description", font_bold=True)
set_cell(
worksheet,
row,
COL_PRICE,
"Price",
font_bold=True,
alignment_horizontal="right"
)
set_cell(
worksheet,
row,
COL_MSRP,
"MSRP",
font_bold=True,
alignment_horizontal="right"
)
set_cell(
worksheet,
row,
COL_SIZE,
"Size",
font_bold=True
)
set_cell(
worksheet,
row,
COL_SKU,
"SKU",
font_bold=True
)
row += 1
# Remove excluded SKUs.
if args.exclude_skus:
products = [
x for x in products if str(x["SKU"]) not in args.exclude_skus
]
# Sort products by category, product_name.
products = sorted(products, key=cc_browser.product_key_by_cat_and_name)
# Fetch variants list.
variants = cc_browser.get_variants()
# Group products by category.
item_no = 1
for _, product_group in itertools.groupby(
products,
key=cc_browser.product_key_by_category
):
# Leave a row for the category name.
category = "unknown"
category_row = row
row += 1
# Add product rows.
for product in product_group:
if product["Available"] != "Y":
continue
row, item_no = add_product(
args,
worksheet,
row,
item_no,
product,
variants
)
category = product["Category"]
# Go back and insert category name.
if category == "":
category = "Uncategorized"
set_cell(
worksheet,
category_row,
COL_DESCRIPTION,
category,
font_bold=True
)
# Set column widths.
worksheet.column_dimensions[col_letter(COL_ITEM_NO)].width = 8
worksheet.column_dimensions[col_letter(COL_DESCRIPTION)].width = 100
worksheet.column_dimensions[col_letter(COL_PRICE)].width = 8
worksheet.column_dimensions[col_letter(COL_MSRP)].width = 8
worksheet.column_dimensions[col_letter(COL_SIZE)].width = 28
worksheet.column_dimensions[col_letter(COL_SKU)].width = 14
|
[
"def insert_product(self, table):\n for i in self.products:\n # extract data\n name = i[\"name\"]\n quantity = i[\"quantity\"]\n brand = i[\"brand\"]\n description = i[\"description\"]\n url = i[\"url\"]\n rating = i[\"rating\"]\n category = i[\"category\"]\n # get cid from category name\n cat = Category(category)\n cat = cat.create()\n arg = \"\\\"\" + category + \"\\\"\"\n cid = table.read(cat, name=arg)\n cid = cid[0]\n cid = cid[\"cid\"]\n # create product object\n product = Product(name, quantity, brand, description, url,\n rating, cid)\n product = product.create()\n # insert in database\n try:\n table.insert(product)\n except ProgrammingError:\n raise",
"def fill_products_table(self, catalogue):\n rows, cols = catalogue.products.shape\n # Set number of entries\n self.tvProducts.setRowCount(rows)\n self.tvProducts.setColumnCount(cols)\n\n for row in range(rows):\n for col in range(cols):\n # Insert item on products TableView\n item = catalogue.products.iloc[row, col]\n self.tvProducts.setItem(row, col,\n QtWidgets.QTableWidgetItem(item))\n\n self.tvProducts.resizeColumnsToContents()\n\n # Set first product as default\n default_product = self.tvProducts.item(0, cols-1).text()\n self.lblProductVersion.setText(default_product)",
"def addRow(self, row_info):\n pass",
"def add_product(self,product):\n self.products.append(product)",
"def add_row(self, row):\n self.results_table_rows.append(row)",
"def add_products(list_products):\n for product in list_products:\n db.session.add(product)\n db.session.commit()\n if product.food_type == SANDWICH:\n new_menu_price_entry(product)",
"def append_process_table(self, id, product, product_energy, time):\n f = open(self.proctable_path, 'a')\n f.write(self.processtable_line % (id, product, product_energy, time))\n f.close()\n if self.procs != None:\n self.procs[id] = {\n \"product\": product,\n \"product_energy\": product_energy,\n \"time\": time\n }",
"def create_products():",
"def append_row(self, row):\n self.rows.append(row)",
"def add_products_to_store(self, store, *products):\n for product in products:\n self.create(product=product, store=store)",
"def addRow( self, data ):\n self.tableData.append( data )",
"def auto_AddRows(self, numrows=None):\n import string\n alphabet = string.lowercase[:26]\n rows = self.getRowCount()\n\n if rows <= 25:\n i=rows\n j=0\n else:\n i=int(rows%25)\n j=int(round(rows/25,1))\n #print i, j\n for x in range(numrows):\n if i >= len(alphabet):\n i=0\n j=j+1\n name = alphabet[i]+str(j)\n if name in self.reclist:\n pass\n else:\n self.addRow(name)\n i=i+1\n #print self.reclist\n return",
"def add_row(self, row):\n\n # Preconditions\n assert isinstance(row, list)\n\n # Start a new file if this is the first row\n if self.fp is None:\n self.start_new_file()\n\n self.writer.writerow(row)",
"def add_row(self, data):\n self.new_row()\n self.rewind_column()\n for item in data:\n try:\n self.set_value(item.encode())\n except AttributeError:\n self.set_value(item)\n if item == \".\":\n self.set_typeofvalue(b\"null\")\n try:\n self.next_column()\n except Exception:\n break",
"def create_product(self):\n if self.cursor:\n self.cursor.execute(\"INSERT INTO products(prod_name, \"\n \"prod_category, prod_price, prod_quantity,\"\n \"minimum_allowed,prod_description) \"\n \"VALUES(%s,%s,%s,%s,%s,%s)\",\n (self.data[\"prod_name\"],\n self.data[\"prod_category\"],\n self.data[\"prod_price\"],\n self.data[\"prod_quantity\"],\n self.data[\"minimum_allowed\"],\n self.data[\"prod_description\"],\n )\n )",
"def update_products(rows):\n\n if len(rows) == 0:\n raise DbError(\"No products provided in CSV file.\")\n product_ids = []\n c = get_cursor()\n for r in rows:\n name = r.get('name', \"\")\n promo_category_id = r.get('promo_category_id', None)\n is_available = r.get('is_available', 1)\n product_id = int(r['product_id'])\n product_ids.append(product_id)\n c.execute(\"\"\"update product\n set name = %s,\n promo_category_id = %s,\n is_available = %s\n where product_id = %s\"\"\",\n (name, promo_category_id, is_available, product_id))\n c.execute(\"\"\"delete from product_price\n where product_id = %s\"\"\",\n (product_id, ))\n for i in range(CSVPRICECOUNT):\n min_quantity = int(r.get(\"min_quantity\" + str(i), 0))\n price = r.get(\"price\" + str(i), 0)\n sale_price = r.get(\"sale_price\" + str(i), 0)\n if min_quantity > 0:\n c.execute(\"\"\"insert into product_price \n (product_id, min_quantity, price, sale_price)\n values (%s, %s, %s, %s)\"\"\",\n (product_id, min_quantity, price, sale_price))\n Db.cache_invalidate()\n\n rows = []\n for product_id in product_ids:\n rows.append(Statics.products.get_id(product_id))\n return rows",
"def create_product_table(width, height):\r\n result = []\r\n for i in range(width):\r\n row = []\r\n for j in range(height):\r\n row.append(i * j)\r\n result.append(row)\r\n return result",
"def add_sale_products(self, *args):\n sale_id = args[0]\n prod_id = args[1]\n quantity = args[2]\n\n add_sale_prod = \"\"\"\n INSERT INTO sale_products(sale_id, prod_id, quantity)\\\n VALUES ('{}', '{}', '{}')\n RETURNING sale_id;\n \"\"\".format(sale_id, prod_id, quantity)\n cursor.execute(add_sale_prod)",
"def addAdditionalTechEntryRow(self):\r\n # create label\r\n self.label = QtGui.QLabel(self.centralWidget)\r\n self.label.setText(self.btnAddTechEntryLine.text())\r\n self.label.setObjectName(_fromUtf8(\"label_\" + str(self.naming_counter)))\r\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\r\n self.label.setSizePolicy(sizePolicy)\r\n self.Addinfogrid.addWidget(self.label, self.current_row, 0, 1, 1)\r\n #create combobox\r\n combobox = QtGui.QComboBox(self.centralWidget)\r\n combobox.setObjectName(_fromUtf8(\"combobox_\" + str(self.naming_counter)))\r\n combobox.addItem(_fromUtf8(\"Operator/Date\"))\r\n combobox.addItem(_fromUtf8(\"SN/Rev\"))\r\n combobox.addItem(_fromUtf8(\"Rev\"))\r\n combobox.addItem(_fromUtf8(\"ID\"))\r\n self.Addinfogrid.addWidget(combobox, self.current_row, 3, 1, 1)\r\n #update\r\n self.row_contents[self.current_row].append(combobox)\r\n self.row_contents[self.current_row].append(self.label)\r\n self.row_contents[self.current_row].append(self.keyAdditionalTechEntry)\r\n self.current_row += 1\r\n self.naming_counter += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create the Wholesale Line Sheet worksheet.
|
def add_line_sheet(args, config, cc_browser, products, worksheet):
# Prepare worksheet.
worksheet.title = "Wholesale Line Sheet"
# Add title.
row = add_title(args, config, worksheet)
# Blank row.
row += 1
# Add products.
add_products(args, worksheet, row, cc_browser, products)
|
[
"def create_sheet(self):\n workbook = xlwt.Workbook()\n borders = Borders()\n header_border = Borders()\n header_title_border = Borders()\n ware_or_loc_border = Borders()\n header_border.left, header_border.right, header_border.top, header_border.bottom = Borders.THIN, Borders.THIN, Borders.THIN, Borders.THICK\n header_title_border.left, header_title_border.right, header_title_border.top, header_title_border.bottom = Borders.THIN, Borders.THIN, Borders.THIN, Borders.THICK\n ware_or_loc_border.left, ware_or_loc_border.right, ware_or_loc_border.top, ware_or_loc_border.bottom = Borders.THIN, Borders.THIN, Borders.THIN, Borders.THICK\n borders.left, borders.right, borders.top, borders.bottom = Borders.THIN, Borders.THIN, Borders.THIN, Borders.THIN\n header_bold = xlwt.easyxf(\n \"font: bold on, height 250; pattern: pattern solid, fore_colour gray25;alignment: horizontal center ,vertical center\")\n header_bold.borders = header_border\n body_style = xlwt.easyxf(\"font: height 200; alignment: horizontal center\")\n style = xlwt.easyxf(\n \"font: height 210, bold True; alignment: horizontal center,vertical center;borders: top medium,right medium,bottom medium,left medium\")\n body_style.borders = borders\n\n header_title = xlwt.easyxf(\n \"font: bold on, height 315; pattern: pattern solid, fore_colour ice_blue;alignment: horizontal center ,vertical center\")\n header_title.borders = header_title_border\n\n xlwt.add_palette_colour(\"light_blue_21\", 0x25)\n workbook.set_colour_RGB(0x25, 179, 255, 240)\n cell_string_style = xlwt.easyxf(\n \"font: height 200, name Arial; align: horiz left, vert center; pattern: pattern solid, fore_colour light_blue_21; borders: top thin,right thin,bottom thin,left thin\")\n\n xlwt.add_palette_colour(\"light_blue_21\", 0x25)\n workbook.set_colour_RGB(0x25, 179, 255, 240)\n cell_number_style = xlwt.easyxf(\n \"font: height 200, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour light_blue_21; borders: top thin,right thin,bottom thin,left thin\")\n return workbook, header_bold, body_style, style, header_title, cell_string_style, cell_number_style",
"def generate_xlsx_report(self, workbook, data, parts):\n # add the worksheet\n worksheet = workbook.add_worksheet(\"product\")\n worksheet.set_column(0, 0, 10)\n worksheet.set_column(1, 1, 15)\n worksheet.set_column(2, 2, 10)\n worksheet.set_column(3, 3, 10)\n worksheet.set_column(4, 4, 9)\n worksheet.set_column(5, 5, 12)\n worksheet.set_column(6, 6, 10)\n worksheet.set_column(7, 7, 15)\n worksheet.set_column(8, 8, 10)\n worksheet.set_column(9, 9, 9)\n worksheet.set_column(10, 10, 9)\n worksheet.set_column(11, 11, 18)\n worksheet.set_column(12, 12, 15)\n worksheet.set_column(13, 13, 12)\n worksheet.set_column(14, 14, 12)\n worksheet.set_column(15, 15, 12)\n bold = workbook.add_format(\n {\"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n tot = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n tot.set_bg_color(\"gray\")\n row = 0\n for pr in parts:\n row += 1\n row += 1\n worksheet.write(row, 3, \" General Parts Listing \", bold)\n row += 3\n worksheet.write(row, 0, \"No.\", tot)\n worksheet.write(row, 1, \"Part No:\", tot)\n worksheet.write(row, 2, \"Part Name\", tot)\n worksheet.write(row, 3, \"Vehicle Make\", tot)\n worksheet.write(row, 4, \"Location \", tot)\n worksheet.write(row, 5, \"Unit Type\", tot)\n worksheet.write(row, 6, \"Qty \", tot)\n worksheet.write(row, 7, \"Incomming \", tot)\n worksheet.write(row, 8, \"Outgoing\", tot)\n worksheet.write(row, 9, \"Ending Balance\", tot)\n worksheet.write(row, 10, \"Reorder point\", tot)\n worksheet.write(row, 11, \"Reorder Qty\", tot)\n row += 2\n counter = 1\n for line in pr:\n worksheet.write(row, 0, counter, bold)\n worksheet.write(row, 1, line.default_code or \"\")\n worksheet.write(row, 2, line.name or \"\")\n worksheet.write(\n row, 3, line.vehicle_make_id and line.vehicle_make_id.name or \"\"\n )\n worksheet.write(row, 4, \"Location\")\n worksheet.write(row, 5, line.uom_id and line.uom_id.name or \"\")\n worksheet.write(row, 6, line.qty_available or 0.0)\n worksheet.write(row, 7, line.incoming_qty or 0.0)\n worksheet.write(row, 8, line.outgoing_qty or 0.0)\n worksheet.write(row, 9, line.virtual_available or 0.0)\n worksheet.write(row, 10, line.re_order_point or 0.0)\n worksheet.write(row, 11, line.re_order_qty or 0.0)\n counter += 1\n row += 8",
"def _initialize_sheet(self, sheet_name):\n \n # Creates the sheet\n write_name = sheet_name[:31] if (len(sheet_name) > 31) else sheet_name\n self.sheets[sheet_name] = self.wb.add_worksheet(write_name)\n \n # Widens the first column\n self.sheets[sheet_name].set_column('A:A', 19)\n \n # Sets the date row format\n self.sheets[sheet_name].set_row(self.date_row, cell_format=self.dateformat)\n \n # Sets the week number row format\n self.sheets[sheet_name].set_row(self.week_row, cell_format=self.weeknumformat)\n \n # Sets the series header and row format\n row = self.top_write_row\n for series_name, _ in self.series_names:\n self.sheets[sheet_name].set_row(row, cell_format=self.itemrowformat)\n self.sheets[sheet_name].write(row, 0, series_name, self.itemnameformat)\n row += 2\n \n # Sets the total header and row format\n self.sheets[sheet_name].write(row, 0, TOTAL, self.totalnameformat)\n self.sheets[sheet_name].set_row(row, cell_format=self.totalrowformat)\n \n return self.sheets[sheet_name]",
"def create_sheet(self, name, rows=1, cols=1):\n self.spread.add_worksheet(name, rows, cols)\n self._refresh_sheets()\n self.open_sheet(name)",
"def create_worksheet(worksheet):\n try:\n local('cl work {}'.format(worksheet), capture=True)\n except BaseException:\n # create new worksheet\n local('cl new {}'.format(worksheet))\n\n # give it a title\n local('cl wedit -t {w} {w}'.format(w=worksheet))\n\n # link to worksheet from home page\n local('cl add worksheet {} wge'.format(worksheet))\n\n # grant permissions\n local('cl wperm {} wge all'.format(worksheet))\n\n print 'Created worksheet: {}'.format(worksheet)",
"def create_example_xl():\n if XL_FILE.exists(): # Don't need to recreate it\n return\n\n df = pd.DataFrame(\n {\n \"tracking\": [\"F12\", \"U23\", \"F34\", \"U45\"],\n \"invoice\": [\"I120\", \"I230\", \"I340\", \"I450\"],\n }\n )\n df.to_excel(XL_FILE, index=False)",
"def generate_pending_repairs_xlsx_report(self, res, fleet_pending):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(\"fleet_pending\")\n worksheet.col(0).width = 6000\n worksheet.col(1).width = 6000\n worksheet.col(2).width = 7500\n worksheet.col(3).width = 12500\n worksheet.col(4).width = 5500\n worksheet.col(5).width = 6000\n worksheet.col(6).width = 7500\n worksheet.col(7).width = 5000\n worksheet.col(8).width = 2500\n font = xlwt.Font()\n # borders = xlwt.Borders()\n font.bold = True\n font.name = \"Arial\"\n font.height = 200\n # pattern = xlwt.Pattern()\n tot = xlwt.easyxf(\"font: bold 1; font: name 1; font: height 200\")\n style1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200\", num_format_str=\"DD/MM/YYYY\"\n )\n # border = xlwt.easyxf('font: name 1; font: height 200')\n format1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200;\\\n pattern: pattern solid, fore_colour yellow;\"\n )\n\n row = 0\n row += 1\n worksheet.write(row, 2, \"Fleet With Pending Repairs\", format1)\n row += 2\n for obj in fleet_pending:\n if obj.pending_repair_type_ids:\n row += 3\n worksheet.write(row, 0, \"Vehicle Information :\", format1)\n row += 2\n worksheet.write(row, 2, \"Kilometer :\", format1)\n worksheet.write(row, 3, obj.odometer or \"\", tot)\n row += 1\n worksheet.write(row, 2, \"Vehicle ID :\", format1)\n worksheet.write(row, 3, obj.name or \"\", tot)\n row += 1\n worksheet.write(row, 2, \"Type :\", format1)\n worksheet.write(\n row,\n 3,\n obj.vechical_type_id and obj.vechical_type_id.name or \"\",\n tot,\n )\n row += 1\n worksheet.write(row, 2, \"VIN :\", format1)\n worksheet.write(row, 3, obj.vin_sn or \"\", tot)\n row += 1\n worksheet.write(row, 2, \"Color :\", format1)\n worksheet.write(\n row,\n 3,\n obj.vehical_color_id and obj.vehical_color_id.name or \"\",\n tot,\n )\n row += 1\n worksheet.write(row, 2, \"Driver :\", format1)\n worksheet.write(row, 3, obj.driver_id and obj.driver_id.name or \"\", tot)\n row += 1\n worksheet.write(row, 2, \"Driver Contact :\", format1)\n worksheet.write(row, 3, obj.driver_contact_no or \"\", tot)\n row += 4\n worksheet.write(row, 0, \"Repair Types :\", format1)\n row += 2\n worksheet.write(row, 1, \"No. :\", format1)\n worksheet.write(row, 2, \"Ref. WO# :\", format1)\n worksheet.write(row, 3, \"Repair Type :\", format1)\n worksheet.write(row, 4, \"Category :\", format1)\n worksheet.write(row, 5, \"Actual Date Issued :\", format1)\n row += 1\n counter = 1\n for line in obj.pending_repair_type_ids:\n worksheet.write(row, 1, counter, tot)\n worksheet.write(row, 2, line.name or \"\", tot)\n worksheet.write(\n row,\n 3,\n line.repair_type_id and line.repair_type_id.name or \"\",\n tot,\n )\n worksheet.write(\n row, 4, line.categ_id and line.categ_id.name or \"\", tot\n )\n\n date = \"\"\n if line.issue_date:\n date = format_date(\n self.env,\n line.issue_date,\n self._context.get(\"lang\"),\n date_format=False,\n )\n worksheet.write(row, 5, date or \"\", style1)\n row += 1\n counter += 1\n row += 3\n worksheet.write(row, 0, \"**************************\")\n worksheet.write(row, 1, \"**************************\")\n worksheet.write(row, 2, \"**************************\")\n worksheet.write(row, 3, \"**************************\")\n worksheet.write(row, 4, \"**************************\")\n worksheet.write(row, 5, \"**************************\")\n worksheet.write(row, 6, \"**************************\")\n row += 1\n worksheet.write(row, 0, \"**************************\")\n worksheet.write(row, 1, \"**************************\")\n worksheet.write(row, 2, \"**************************\")\n worksheet.write(row, 3, \"**************************\")\n worksheet.write(row, 4, \"**************************\")\n worksheet.write(row, 5, \"**************************\")\n worksheet.write(row, 6, \"**************************\")\n fp = io.BytesIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n res = base64.encodebytes(data)\n return res",
"def add_headings(self, categories_or_warehouses, workbook, header_bold):\n row_data = {}\n sheet_data = {}\n\n for record in categories_or_warehouses:\n record.new_workbook = workbook.add_sheet(\" %s\" % (record.display_name), cell_overwrite_ok=True)\n record.new_workbook.write_merge(0, 0, 0, 11, \"Top Growing Products\", header_bold)\n record.new_workbook.row(0).height_mismatch = True\n record.new_workbook.row(0).height = 350\n record.new_workbook.row(1).height_mismatch = True\n record.new_workbook.row(1).height = 350\n record.new_workbook.col(0).width = 1000\n record.new_workbook.col(1).width = 4000\n record.new_workbook.col(2).width = 8000\n record.new_workbook.col(3).width = 5500\n record.new_workbook.col(4).width = 3500\n record.new_workbook.col(5).width = 3500\n record.new_workbook.col(6).width = 3500\n record.new_workbook.col(7).width = 3500\n record.new_workbook.col(8).width = 3500\n record.new_workbook.col(9).width = 3500\n record.new_workbook.col(10).width = 3500\n record.new_workbook.col(11).width = 3500\n\n record.new_workbook.write(1, 0, 'No', header_bold)\n record.new_workbook.write(1, 1, 'SKU', header_bold)\n record.new_workbook.write(1, 2, 'Name', header_bold)\n record.new_workbook.write(1, 3, 'Category', header_bold)\n record.new_workbook.write(1, 4, 'Average Sale Price', header_bold)\n record.new_workbook.col(4).width = (len('Average Sale Price') * 367)\n record.new_workbook.write(1, 5, 'Average Cost Price', header_bold)\n record.new_workbook.col(5).width = (len('Average Cost Price') * 367)\n record.new_workbook.write(1, 6, 'Current Stock', header_bold)\n record.new_workbook.col(6).width = (len('Current Stock') * 367)\n record.new_workbook.write(1, 8, 'Total purchase', header_bold)\n record.new_workbook.col(8).width = (len('Total purchase') * 367)\n record.new_workbook.write(1, 7, 'Total sales', header_bold)\n record.new_workbook.col(7).width = (len('Total sales') * 367)\n record.new_workbook.write(1, 9, 'Last Period Sales', header_bold)\n record.new_workbook.col(9).width = (len('Last Period Sales') * 367)\n record.new_workbook.write(1, 10, 'Selected Period Sales', header_bold)\n record.new_workbook.col(10).width = (len('Selected Period Sales') * 367)\n record.new_workbook.write(1, 11, 'Growth Ratio', header_bold)\n record.new_workbook.col(11).width = (len('Growth Ratio') * 367)\n record.new_workbook.write(1, 12, 'Rack Location', header_bold)\n record.new_workbook.col(12).width = (len('Rack Location') * 367)\n record.new_workbook.set_panes_frozen(True)\n record.new_workbook.set_horz_split_pos(2)\n # #Get categories wise worksheet\n sheet_data.update({record.id: record.new_workbook})\n row_data.update({record.new_workbook: 2})\n return workbook, sheet_data, row_data",
"def create_horizontal_line():\n d = Drawing(100, 1)\n d.add(Line(0, 0, 1000, 0))\n return d",
"def create_xlsx(request):\n\n date_dict = spending_date_parser(request)\n\n individual_spending_history = create_spending_history_individual \\\n (user=date_dict['user_id'],\n start_date=date_dict['start_date'],\n finish_date=date_dict['finish_date'],\n utc_difference=date_dict['utc_difference'])\n group_spending_history = create_spending_history_for_admin \\\n (user=date_dict['user_id'],\n start_date=date_dict['start_date'],\n finish_date=date_dict['finish_date'],\n utc_difference=date_dict['utc_difference'])\n\n output, worksheet, workbook, formats_dict = creating_empty_xlsx_file()\n\n row, col = 2, 1\n if individual_spending_history:\n worksheet.write(row - 1, col, 'Individual spending', formats_dict['head_format'])\n for key in individual_spending_history[0]['history'][0]:\n if key != 'Delete':\n worksheet.write(row - 1, col + 1, key, formats_dict['head_format'])\n col += 1\n\n col = 1\n for spending_dicts in individual_spending_history:\n for history_dict in spending_dicts['history']:\n worksheet.write(row, col, spending_dicts['spending'], formats_dict['cell_format'])\n worksheet.write_number \\\n (row, col + 1, history_dict['value'], formats_dict['value_format'])\n worksheet.write(row, col + 2, history_dict['date'], formats_dict['date_format'])\n worksheet.write(row, col + 3, history_dict['fund'], formats_dict['cell_format'])\n row += 1\n if group_spending_history:\n row = row + 1\n worksheet.write(row, col, 'Group spending', formats_dict['head_format'])\n for key in group_spending_history[0]['history'][0]:\n if key == 'member':\n worksheet.write(row, col - 1, 'Member', formats_dict['head_format'])\n elif key != 'Delete':\n worksheet.write(row, col + 1, key, formats_dict['head_format'])\n col += 1\n\n row, col = row + 1, 1\n for spending_dicts in group_spending_history:\n for history_dict in spending_dicts['history']:\n worksheet.write(row, col - 1, history_dict['member'], formats_dict['cell_format'])\n worksheet.write(row, col, spending_dicts['spending'], formats_dict['cell_format'])\n worksheet.write_number \\\n (row, col + 1, history_dict['value'], formats_dict['value_format'])\n worksheet.write(row, col + 2, history_dict['date'], formats_dict['date_format'])\n worksheet.write(row, col + 3, history_dict['fund'], formats_dict['cell_format'])\n row += 1\n\n workbook.close()\n\n response = file_streaming_response \\\n ('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'spending_history.xlsx', output)\n return response",
"def case_writer(BusinessDocument, workbook):\r\n # Styling stuff\r\n font = xlwt.Font()\r\n font.bold = True\r\n title_text = xlwt.easyxf(\"align: vert top, horiz left\")\r\n title_text.font = font\r\n style_text = xlwt.easyxf(\"align: wrap on, vert top, horiz left\")\r\n\r\n #Create the worksheets for the test cases.\r\n worksheet = workbook.add_sheet(\"Dashboard\")\r\n\r\n # for worksheet in [worksheet_ext, worksheet_int]:\r\n # Write the Header row \r\n worksheet.write(0, 0, \"Test Type\", title_text)\r\n worksheet.write(0, 1, \"HPQC Test Plan Folder name\", title_text)\r\n worksheet.write(0, 2, \"Your User ID\", title_text)\r\n worksheet.write(0, 3, \"Residence Type\", title_text)\r\n worksheet.write(0, 4, \"Test Case ID(Business ReqID+ BR/DFS/SC+screen Name)\", title_text) \r\n worksheet.write(0, 5, \"Test Case Description\", title_text)\r\n worksheet.write(0, 6, \"Pre-Condition\", title_text)\r\n worksheet.write(0, 7, \"Execution\", title_text)\r\n worksheet.write(0, 8, \"Expected Result\", title_text)\r\n worksheet.write(0, 9, \"Release\", title_text)\r\n worksheet.write(0, 10, \"Created by(optional)\", title_text)\r\n worksheet.write(0, 11, \"Creation Date(optional)\", title_text)\r\n\r\n # Size adjustments \r\n worksheet.col(0).width = 30*70\r\n worksheet.col(1).width = 30*200\r\n worksheet.col(2).width = 30*70\r\n worksheet.col(3).width = 30*70\r\n worksheet.col(4).width = 30*180\r\n worksheet.col(5).width = 30*270\r\n worksheet.col(6).width = 30*70\r\n worksheet.col(7).width = 30*256\r\n worksheet.col(8).width = 30*220\r\n worksheet.col(9).width = 30*70\r\n worksheet.col(10).width = 30*70\r\n worksheet.col(11).width = 30*100\r\n\r\n \r\n # # The first sheet is revision history so we don't need it.\r\n # business_sheets = BusinessDocument.sheets()[1:]\r\n\r\n # We're going to write to both the external and internal sheets simultaneously\r\n # a will serve as the counter for external, and b for internal\r\n \r\n c = 1\r\n # Iterate through the business documents, first iteration would be CRA, next YJ etc\r\n\r\n \r\n RuleSheet = BusinessDocument.sheets()[0]\r\n \r\n \r\n for i in range (1, RuleSheet.nrows):\r\n # easy references\r\n Role = RuleSheet.row_values(i)[0]\r\n Table = RuleSheet.row_values(i)[1]\r\n Section = RuleSheet.row_values(i)[2]\r\n col_list = []\r\n for column in range(3,11):\r\n col_list.append(RuleSheet.row_values(i)[column])\r\n \r\n \r\n \r\n # Setting up the content for the cells\r\n Description = (\"Verify for the \" + Role + \", in the table named:\\n\\n\" +\r\n Table + \"\\n\\nUnder the section:\\n\\n\" + Section + \"\\n\\nThe columns are oganized correctly.\")\r\n columnstring = \"\"\r\n counter = 1\r\n for entry in col_list:\r\n if entry == \"\":\r\n break #Nothing to do if its blank\r\n \r\n columnstring += \"\\nColumn \" + str(counter) + \": \" + entry\r\n counter +=1\r\n\r\n \r\n Expected_Result = \"The columns are organized as follows:\" + columnstring\r\n \r\n \r\n\r\n Execution = (\"1) Login as \" + Role \r\n + \"\\n2) View the dashboard.\" \r\n + \"\\n3) Verify the table, section, and column labels are correct.\"\r\n + \"\\n4) Verify that the columns are organized correctly.\")\r\n \r\n case_id = \"SO_Dashboard_Roles_\" + str(c)\r\n\r\n \r\n # Now that we have all the information, we can just write it to the correct sheet\r\n worksheet.write(c, 0, \"Manual\", style_text)\r\n worksheet.write(c, 1, \"*****_SO_Dashboard\", style_text)\r\n worksheet.write(c, 2, \"*****\", style_text)\r\n worksheet.write(c, 3, \"All\", style_text)\r\n worksheet.write(c, 4, case_id, style_text)\r\n worksheet.write(c, 5, Description, style_text)\r\n worksheet.write(c, 6, \"N/A\", style_text)\r\n worksheet.write(c, 7, Execution, style_text)\r\n worksheet.write(c, 8, Expected_Result, style_text)\r\n worksheet.write(c, 9, \"2.0\", style_text)\r\n worksheet.write(c, 10, \"Sagar\", style_text)\r\n worksheet.write(c, 11, now.strftime(\"%m-%d-%Y\"), style_text)\r\n worksheet.write\r\n c+=1\r\n\r\n \r\n workbook.save(\"DashboardRoles_test_Cases.xls\")",
"def add_content_xl_styles():\n def apply_styles_to_rows():\n def swap_version_row_color():\n if cur_version_style == light_version_row_style:\n return dark_version_row_style\n else:\n return light_version_row_style\n\n cur_version_style = light_version_row_style\n veh_col_letter = utils.get_column_letter(xl(self.POSITION['vehicle_col']))\n prod_mdl_yr_col_letter = utils.get_column_letter(xl(self.POSITION['prod_model_year_col']))\n\n for row in range(self.POSITION['first_sample_row'], xl(last_row_index)):\n if self.matrix[row][self.vehicle_desc_mark_up_col] == 'v': # version row\n if self.matrix[row][self.POSITION['vehicle_col']] != '':\n cur_version_style = swap_version_row_color()\n cur_style = cur_version_style\n elif self.matrix[row][self.vehicle_desc_mark_up_col] == 'm': # model row\n cur_style = model_row_style\n else: # make row\n cur_style = make_row_style\n\n self.ws['{}{}'.format(veh_col_letter, xl(row))].style = cur_style\n self.ws['{}{}'.format(prod_mdl_yr_col_letter, xl(row))].style = cur_style\n sample_headers_amount = len(self.sample_headers)\n for sample_date_index in range(len(self.sample_dates)):\n for sample_header in self.sample_headers:\n cell = '{}{}'.format(utils.get_column_letter(xl(\n self.POSITION['first_sample_col']\n + sample_headers_amount * sample_date_index\n + sample_header.offset)), xl(row))\n self.ws[cell].style = cur_style\n self.ws[cell].number_format = sample_header.number_format\n\n make_row_style = styles.NamedStyle(name='make_row',\n font=styles.Font(sz=10, b=True, color=styles.Color('F1F2F2')),\n fill=styles.PatternFill(patternType='solid',\n fgColor=styles.Color('000000')))\n model_row_style = styles.NamedStyle(name='model_row',\n font=styles.Font(sz=10, b=True, color=styles.Color('000000')),\n fill=styles.PatternFill(patternType='solid',\n fgColor=styles.Color('939598')))\n light_version_row_style = styles.NamedStyle(name='light_version_row',\n font=styles.Font(sz=10, b=True, color=styles.Color('000000')),\n fill=styles.PatternFill(patternType='solid',\n fgColor=styles.Color('F1F2F2')))\n dark_version_row_style = styles.NamedStyle(name='dark_version_row',\n font=styles.Font(sz=10, b=True, color=styles.Color('000000')),\n fill=styles.PatternFill(patternType='solid',\n fgColor=styles.Color('DCDDDE')))\n apply_styles_to_rows()",
"def create_new_sale_order(self, cr, uid, order, lines):\n\n order_data = {\n 'partner_id': order.partner_id.id,\n 'partner_invoice_id': order.partner_invoice_id.id,\n 'partner_order_id': order.partner_order_id.id,\n 'partner_shipping_id': order.partner_shipping_id.id,\n 'shop_id': order.shop_id.id,\n 'client_order_ref': order.client_order_ref,\n 'incoterm': order.incoterm.id,\n 'picking_policy': order.picking_policy,\n 'order_policy': order.order_policy,\n 'pricelist_id': order.pricelist_id.id,\n 'project_id': order.project_id.id,\n 'note': order.note,\n 'invoice_quantity': order.invoice_quantity,\n 'payment_term': order.payment_term.id,\n 'fiscal_position': order.fiscal_position.id,\n 'order_line': [],\n 'origin' : order.name,\n 'state': 'manual',\n }\n\n today = date.today()\n subscription_start_date = date_helper.get_first_day_next_month(today)\n subscription_end_date = date_helper.get_last_day_month(subscription_start_date)\n\n for line in lines:\n line_data = {\n 'name': line.name,\n 'delay': line.delay,\n 'product_id': line.product_id.id,\n 'price_unit': line.price_unit,\n 'tax_id': line.tax_id,\n 'type': line.type,\n 'address_allotment_id': line.address_allotment_id.id,\n 'product_uom_qty': line.product_uom_qty,\n 'product_uom': line.product_uom.id,\n 'product_uos_qty': line.product_uos_qty,\n 'product_uos': line.product_uos.id,\n 'product_packaging': line.product_packaging.id,\n 'notes': line.notes,\n 'discount': line.discount,\n 'subscription_end_date': subscription_end_date,\n 'subscription_start_date': subscription_start_date,\n }\n order_data['order_line'].append((0, 0, line_data))\n\n sale_order_object = self.pool.get('sale.order')\n new_order_id = sale_order_object.create(cr, uid, order_data)",
"def _make_xlsx(self, report_type, report_data, report_day):\r\n filename = \"{}_{}.xlsx\".format(report_type, report_day)\r\n path = os.path.join(self.report_path, filename)\r\n logger.debug(\"Reporter: creating {}\".format(filename))\r\n\r\n workbook = xlsxwriter.Workbook(path)\r\n\r\n cell_format = {\r\n \"hat\": workbook.add_format({\r\n 'bold': True,\r\n 'text_wrap': True,\r\n 'font_size': 14,\r\n }),\r\n \"header\": workbook.add_format({\r\n 'bold': True,\r\n 'align': 'center',\r\n 'valign': 'vcenter',\r\n 'text_wrap': True,\r\n 'border': 1,\r\n }),\r\n \"text\": workbook.add_format({\r\n 'valign': 'vcenter',\r\n 'align': 'center',\r\n 'border': 1,\r\n }),\r\n \"black\": workbook.add_format({\r\n 'font_color': 'black',\r\n }),\r\n \"grey\": workbook.add_format({\r\n 'font_color': '#b2b2b2',\r\n }),\r\n \"date\": workbook.add_format({\r\n 'num_format': 'yyyy.mm.dd',\r\n 'align': 'center',\r\n 'valign': 'vcenter',\r\n 'border': 1,\r\n }),\r\n \"time\": workbook.add_format({\r\n 'num_format': 'hh:mm:ss',\r\n 'align': 'center',\r\n 'valign': 'vcenter',\r\n 'border': 1,\r\n }),\r\n }\r\n\r\n for day in sorted(report_data.keys()):\r\n data = report_data[day]\r\n\r\n worksheet = workbook.add_worksheet(day)\r\n worksheet.set_column(0, 0, 5)\r\n worksheet.set_column(1, 2, 12)\r\n worksheet.set_column(3, 3, 25)\r\n worksheet.set_column(4, 5, 15)\r\n\r\n if data:\r\n worksheet.merge_range(0, 0, 0, 5,\r\n u'Количество записей в отчете: {}'.format(len(data)),\r\n cell_format[\"hat\"]\r\n )\r\n\r\n worksheet.write(1, 0, u\"№\", cell_format[\"header\"])\r\n worksheet.write(1, 1, u\"Дата\", cell_format[\"header\"])\r\n worksheet.write(1, 2, u\"Время\", cell_format[\"header\"])\r\n worksheet.write(1, 3, u\"Название канала\", cell_format[\"header\"])\r\n worksheet.write(1, 4, u\"Название зоны\", cell_format[\"header\"])\r\n worksheet.write(1, 5, u\"Кол-во людей\", cell_format[\"header\"])\r\n\r\n else:\r\n worksheet.merge_range(0, 0, 0, 5,\r\n u'Днные отсутствуют',\r\n cell_format[\"hat\"])\r\n\r\n for idx, row in enumerate(data, 2):\r\n\r\n dt = self.ts_to_datetime(row[-1])\r\n\r\n worksheet.write(idx, 0, idx - 1, cell_format['text'])\r\n worksheet.write(idx, 1, dt, cell_format['date'])\r\n worksheet.write(idx, 2, dt, cell_format['time'])\r\n worksheet.write(idx, 3, row[1], cell_format['text'])\r\n worksheet.write(idx, 4, row[2], cell_format['text'])\r\n worksheet.write(idx, 5, row[3], cell_format['text'])\r\n\r\n workbook.close()\r\n\r\n return path",
"def get_or_create_new(worksheet_title):\n for worksheet in worksheets:\n if worksheet.title == worksheet_title:\n return worksheet\n return sh.add_worksheet(title=worksheet_title, rows=str(num_students), cols=\"26\")",
"def create_worksheets(workbook: openpyxl.Workbook):\r\n general_utils.check_wb_obj(workbook)\r\n\r\n # Remove any sheets that have the name 'Sheet' (default sheet names)\r\n for name in workbook.sheetnames:\r\n if \"Sheet\" in name:\r\n workbook.remove(workbook[name])\r\n\r\n new_worksheet_names = [\r\n \"Table of Contents\",\r\n \"Components\",\r\n \"Treatment Schedules\",\r\n \"Plan Attributes\",\r\n \"Review questions\",\r\n ]\r\n\r\n for name in new_worksheet_names:\r\n workbook.create_sheet(title=name)\r\n\r\n return workbook",
"def _finalize_sheet(self, sheet, final_col):\n \n # Adds week number to the top of the sheet\n sheet.write('A1', 'Number of Weeks:', self.regformat)\n sheet.write('B1', final_col, self.regformat)\n \n # Resizes the widths of the columns\n sheet.set_column(1, final_col, 4)\n \n # Obtains the row of the total count and writes Total on the right\n row = self.top_write_row + (2 * len(self.series_names))\n sheet.write(row, final_col + 2, TOTAL, self.totalnameformat)\n \n # Writes the description of each item on the right of the counts\n for _, desc in reversed(self.series_names):\n row -= 2\n sheet.write(row, final_col + 2, desc, self.itemnameformat)",
"def _create_chart(self, sheet, final_col, chart_name, issue_type='Issue', \n insertion_row=None, weeks=None, fill_map=None, min_y=None):\n \n # Initializes x-axis parameters\n x_axis = {\n 'name': 'Dates',\n 'name_font': {'size': 12, 'bold': True, 'font': 'Arial'},\n 'num_font': {'italic': True, 'font': 'Arial'},\n 'date_axis': True, 'num_format': 'mm/dd/yyyy'\n }\n \n # Updates chart name if weeks is given\n if (weeks):\n if (weeks == 26):\n chart_name += ' (past 6 months)'\n elif (weeks == 9):\n chart_name += ' (past 2 months)'\n else:\n chart_name += ' (past %d weeks)' % weeks\n \n # Separate code changing x-axis information based on week range\n if (weeks <= 26):\n x_axis.update({\n 'major_unit': 7, 'major_unit_type': 'days', \n 'minor_unit': 1, 'minor_unit_type': 'days'\n })\n \n # Creates the chart\n chart = self.wb.add_chart({'type':'area', 'subtype':'stacked', 'name':chart_name})\n chart.set_size({'width': 1200, 'height': 600})\n chart.set_title({\n 'name': chart_name,\n 'name_font': {'name': 'Arial'}\n })\n chart.set_y_axis({\n 'name': 'Number of %ss' % issue_type,\n 'name_font': {'size': 12, 'bold': True, 'font': 'Arial'},\n 'min' : min_y\n })\n chart.set_x_axis(x_axis)\n \n # Determines starting column of range\n first_col = 1 if (not weeks or final_col <= weeks) else (final_col - weeks)\n \n # Sets insertion row for chart, if it has not been set\n if (not insertion_row):\n insertion_row = 8 + (2 * len(self.series_names))\n \n # Iterates through each item being counted\n row = 4 + (2 * len(self.series_names))\n for series_name, _ in self.series_names:\n # Adds series to the chart based on the current row of data\n chart.add_series({\n 'name' : \"='%s'!$A$%d\" % (sheet.name, row+1),\n 'categories': [\"'%s'\" % sheet.name, 3, first_col, 3, final_col],\n 'values': [\"'%s'\" % sheet.name, row, first_col, row, final_col],\n 'fill': None if (not fill_map) else fill_map[series_name],\n })\n \n row -= 2\n \n # Insert chart into the sheet below the data\n sheet.insert_chart(insertion_row, 1, chart)",
"def print_wo_xlsx_report(self):\n for vehicle in self:\n wo_obj = self.env[\"fleet.vehicle.log.services\"]\n records = wo_obj.search([])\n if vehicle.vehicle_ids:\n records = wo_obj.search([(\"vehicle_id\", \"in\", vehicle.vehicle_ids.ids)])\n if vehicle.select_report == \"wo_month_sum_rep\":\n wo_obj = self.env[\n \"report.fleet_operations.workorder.monthly.summary.xls\"\n ]\n file = wo_obj.generate_xlsx_report(records)\n vehicle.write(\n {\"name\": \"WorkOrder Monthly Summary Report.xls\", \"file\": file}\n )\n return {\n \"view_type\": \"form\",\n \"view_mode\": \"form\",\n \"res_model\": \"work.order.reports\",\n \"type\": \"ir.actions.act_window\",\n \"target\": \"new\",\n \"res_id\": vehicle.id,\n }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This is an initialization routine for the app. App needs to know the range of purchase dates in order to distinguish between prehistory and days with zero sales. Sets a timeseries config dict (ts_config) to contain these min and max dates.
|
def db_get_ts_config():
db_connection = iopro.connect(**db_config)
db_cursor = db_connection.cursor()
db_cursor.execute("select * from dbo.vTransactionStats") # Application needs to know, minimally, first and last overall transaction dates
result = db_cursor.fetchone()
ts_config["minPurchaseDate"] = result.minPurchaseDate
ts_config["maxPurchaseDate"] = result.maxPurchaseDate # Assumes the most recent PurchaseDate applies to all products, so zeros can be filled in appropriately for trending
db_connection.close()
del(db_cursor)
del(db_connection)
|
[
"def _create_init_time_series(self, ts_data):\n\n # Avoid changing ts_data outside function\n ts_data_used = ts_data.copy()\n\n if self.model_name == '1_region':\n expected_columns = {'demand', 'wind'}\n elif self.model_name == '6_region':\n expected_columns = {'demand_region2', 'demand_region4',\n 'demand_region5', 'wind_region2',\n 'wind_region5', 'wind_region6'}\n if not expected_columns.issubset(ts_data.columns):\n raise AttributeError('Input time series: incorrect columns')\n\n # Detect missing leap days -- reset index if so\n if detect_missing_leap_days(ts_data_used):\n logging.warning('Missing leap days detected in input time series.'\n 'Time series index reset to start in 2020.')\n ts_data_used.index = pd.date_range(start='2020-01-01',\n periods=self.num_timesteps,\n freq='h')\n\n # Demand must be negative for Calliope\n ts_data_used.loc[:, ts_data.columns.str.contains('demand')] = (\n -ts_data_used.loc[:, ts_data.columns.str.contains('demand')]\n )\n\n return ts_data_used",
"def __init__(self):\n\n # Isolating date range for the target restructuring week\n self.restr_week_start = (config.setf_restructure_trigger['week']\n - 1) * 7\n self.restr_week_end = (config.setf_restructure_trigger['week']) * 7\n\n # Isolating date range for the target rebalancing week\n self.reb_week_start = (config.rebalance_trigger['week'] - 1) * 7\n self.reb_week_end = (config.rebalance_trigger['week']) * 7\n\n # Flags for keeping track of monthly triggers (to enable day wildcards)\n self.last_month_restructure = 0\n self.last_month_rebalance = 0",
"def init_app(self, app):\n\n self.app = app\n self.app.apscheduler = self\n\n self.__load_config()\n self.__load_jobs()\n\n if self.__views_enabled:\n self.__load_views()",
"def startup(self, app):\n self.scheduler.add_job(\n fetch_news,\n 'interval',\n seconds=self.TEN_MINUTES_SECS,\n misfire_grace_time=10,\n next_run_time=datetime.now())\n app.logger.info('Starting news service...')\n self.scheduler.start()",
"def set_date_range(self, begin_date, end_date):\r\n pass",
"def __init__(self, portfolio_report_config, daily):\n self._config = portfolio_report_config\n self._daily = daily",
"def __init__(\n self,\n satellite=config[\"timerange\"].get(\"satellite\"),\n product=config[\"timerange\"].get(\"product\"),\n domain=config[\"timerange\"].get(\"domain\"),\n bands=None,\n channel=None,\n ):\n self.satellite = satellite\n self.product = product\n self.domain = domain\n\n if channel is not None:\n self.bands = channel\n else:\n self.bands = bands\n\n if self.product.startswith(\"ABI\") and self.product in _product:\n # Sometimes the user might inavertantly give the domain\n # in the product name.\n self.domain = self.product[-1]\n self.product = self.product[:-1]\n\n self._check_satellite()\n self._check_product()",
"def init(self):\n self.ne_range = np.concatenate((np.array([-1]),np.linspace(0, self.awi.n_lines+1, self.needs_res)))\n \n self._best_selling, self._best_buying = 0.0, float(\"inf\")\n self._best_acc_selling, self._best_acc_buying = 0.0, float(\"inf\")\n self._best_opp_selling = defaultdict(float)\n self._best_opp_buying = defaultdict(lambda: float(\"inf\"))\n self._best_opp_acc_selling = defaultdict(float)\n self._best_opp_acc_buying = defaultdict(lambda: float(\"inf\"))\n self._sales = self._supplies = 0\n \n self.online_balance = self.awi.current_balance",
"def __init__(self, fig=None, *args, **kwargs):\n # Retrieve the series ...................\n _series = kwargs.pop('series',None)\n Subplot.__init__(self,fig,*args,**kwargs)\n# # Force fig to be defined .....\n# if fig is None:\n# fig = TSFigure(_series)\n # Process options .......................\n if _series is not None:\n assert hasattr(_series, \"dates\")\n self._series = _series.ravel()\n self.xdata = _series.dates\n self.freq = _series.dates.freq\n self.xaxis.set_major_locator\n\n else:\n self._series = None\n self.xdata = None\n self.freq = None\n self._austoscale = False\n # Get the data to plot\n self.legendsymbols = []\n self.legendlabels = []",
"def initializeDailyHistoryDatabase(self, daily, history):\n\n if daily['enable']:\n if not history['enable']:\n print (\"Daily history cannot be enabled because 'history' is not enabled\")\n return\n\n self.dbDailyHistoryHeader = {'date' : 'TEXT'}\n self.dbDailyHistoryHeader.update({key + 'MAX': value for key,value in self.sensorTypes.items()})\n self.dbDailyHistoryHeader.update({key + 'MIN': value for key,value in self.sensorTypes.items()})\n self.dbDailyHistoryHeader.update({key + 'AVG': value for key,value in self.sensorTypes.items()})\n\n self.dailyHistoryDataName = daily['name']\n self.dbc.createContainer(self.dailyHistoryDataName, self.dbDailyHistoryHeader)\n\n self.alarms.addDaily('updateDailyHistory', self.updateDailyHistoryDatabase)\n\n self.dbc.upsert('nextUpdates', 'name', {'name':'dailyHistory','nextUpdate': self.alarms.getNextUpdateStr('updateDailyHistory')})",
"def main(db):\n db_config[\"Database\"] = db\n # Load queue file\n db_get_ts_config()\n \n # Load Product Table on initialization\n productIDs = db_get_productlist()\n \n for productID in productIDs:\n timeseries(productID)\n print()\n \n \n \n #print(ts_config[\"productIDList\"][0:3]) ",
"def __init__(self, **kwargs):\n super(CoNLL_2013Config, self).__init__(**kwargs)",
"def __init__(self, config):\n self.apiKey = config['apiKey']\n self.urlBase = config['urlBase']\n self.endpoints = config['endpoints']\n self.limits = config['rateLimits']\n self.authHeader = {\n 'authorization': f'ApiKey {self.apiKey}'\n }\n self.callsMade = {\n 'second': 0,\n 'minute': 0,\n 'hour': 0,\n 'day': 0,\n 'month': 0,\n }\n self.callsRemain = self.limits.copy()\n\n ms = self.milliseconds()\n\n self.lastCall = ms\n\n timers = {\n 'second': {\n },\n 'minute': {\n },\n 'hour': {\n },\n 'day': {\n },\n 'month': {\n }\n }\n self.timers = timers\n self.start_timers()",
"def __init__(self, customDatetime):\n \n super(self.__class__, self).__init__()\n \n self.setupUi(self)\n\n self.purgeOldFiles()\n\n self.setDate(customDatetime)\n\n with open(os.path.abspath('Resources/GUI/PyForecastStyle.qss'), 'r') as styleFile:\n self.setStyleSheet(styleFile.read())\n \n self.show()\n\n self.connectEventsMenuBar()\n self.initDirectory()\n self.connectEventsSummaryTab()\n self.connectEventsStationsTab()\n self.connectEventsDataTab() \n self.connectEventsForecastOptionsTab()\n self.connectEventsRegressionTab()\n self.connectEventsDensityTab()\n\n self.threadPool = QtCore.QThreadPool()\n writeConfig('savefilename','')\n\n return",
"def __init__(self, m_and_y, sales):\r\n self.month_and_year = m_and_y\r\n self.total_sales = sales",
"def __init__(self, **kwargs):\n \n Base.__init__(self, **kwargs)\n \n if \"creationDate\" not in kwargs:\n self.creationDate = datetime.datetime.today()",
"def __init__(self, year: int, start_m: int = 0, end_m: int = 11):\n self._year = year\n self._first = year_starts_on(year)\n self._start_m = min(start_m, end_m)\n self._end_m = max(start_m, end_m)",
"def on_combo_box_start_year_activated(self) -> None:\n self.set_date_range()",
"def __init__(self):\n self.min_time = 6.0*60.0*60.0\n self.min_temp = -10.0\n self.max_temp = 10.0\n self.period = 60.0*60.0*24.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Connects to an existing database view containing the distinct ProductIDs for a given client, and returns those IDs as a list. This is highly suboptimal but works as a proof of concept.
|
def db_get_productlist():
db_connection = iopro.connect(**db_config)
db_cursor = db_connection.cursor()
productIDs = []
db_cursor.execute("exec TimeSeriesQueueGet") # Expects a table or view containing distinct ProductIDs in a 'ProductID' int field
for row in db_cursor.fetchall():
productIDs.append(row[0])
db_connection.commit()
db_connection.close()
return productIDs # Return result as a list of integers
|
[
"def _get_product_ids(prefix):\n from accelpy._application import Application\n return Application.list(prefix)",
"def get_affected_products_by_cve(self, cve):\n assert self.cursor is not None, 'DB connection not set!'\n LOGGER.debug('Looking for affected products: cve={}.'.format(cve))\n \n self.cursor.execute(\"\"\"SELECT product_id FROM affected WHERE cve = ?\"\"\", (cve,))\n \n return [id[0] for id in self.cursor.fetchall()]",
"def _scg_get_vios_ids(context, scg_pk_id, transaction=None):\n vioses = vios_find_all(context, transaction=transaction)\n session = Transaction_sqla.find_session(transaction,\n create_if_not_found=True)\n query = model_query(\n context, pvc_models.ScgViosAssociationDTO, session=session)\n assocs = query.filter_by(_scg_pk_id=scg_pk_id).all()\n vios_pk_ids = [assoc._vios_pk_id for assoc in assocs]\n return [vios['id'] for vios in vioses if vios['_pk_id'] in vios_pk_ids]",
"def get_all() -> list:\n clientes = []\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM clientes\")\n for row in cursor:\n cliente = Cliente(row[1], row[2], row[3], row[4], row[5], row[0])\n clientes.append(cliente)\n if debug:\n print(str(cliente))\n conn.close()\n return clientes",
"def get_all_customer_ids_from_table(table):\n\n all_id = set()\n for row in table: \n all_id.add(str(row[-1]))\n ui.print_result(all_id, \"All customers ID: \") \n return all_id",
"def get_ids():",
"def get_clients_info(clients):\n\n ids = [c.id for c in clients]\n groups = {c.id: c.group for c in clients}\n num_samples = {c.id: c.num_samples for c in clients}\n return ids, groups, num_samples",
"def system_clients_list(self, args, var_d):\n arg_d = [\n '--uuid'\n ]\n return self.check_args(args, arg_d, var_d)",
"def get_all_customer_ids_from_table(table):\n customer_ids = set()\n for row in table:\n id_customer = str(row[0])\n customer_ids.add(id_customer)\n\n return customer_ids # sales_comtroller print the table of this set",
"def sample_clients(self):\n id_list = [i + 1 for i in range(self.client_num_in_total)]\n selection = random.sample(id_list, self.client_num_per_round)\n return selection",
"def getAllClientes(self):\n database = self.database\n sql = f\"SELECT * FROM hermes.clientes;\"\n data = database.executeQuery(sql)\n lista = {}\n final = []\n if len(data) > 0:\n for x in data:\n lista = self.convertTuplaToDicc(x, True)\n final.append(lista)\n return final",
"def fetch_iup_ids(cls):\n with CursorFromConnectionPool() as cursor:\n \"\"\"\n Open and close the connection --> calling connection_pool.getconn() and after committing and closing the\n connection calling the connection_pool.putconn(self.connection) to put the connection in the pool\n \"\"\"\n inf_user_ids = []\n try:\n cursor.execute(\"SELECT iup.id FROM iup;\")\n data = cursor.fetchall()\n for infected_user_id in data:\n inf_user_ids.append(infected_user_id[0])\n return inf_user_ids\n except:\n print(\"Failed to read the table contents ...\")",
"def build_index_of_products_by_id(products):\n\n print (\"{:40}\".format(inspect.stack()[0][3]), end='', flush=True)\n products_by_id = {}\n for product in products:\n products_by_id[product[\"id\"]] = product\n return products_by_id",
"def getActiveIds(self):\n ret = []\n for client in self.clients:\n if self.isClientActive(self.getClient(client)):\n ret.append(client)\n return ret",
"def add_product_ids(self, driver, productlist):\n frame = driver.find_element_by_id(\"searchResults\")\n wrappers = frame.find_elements_by_class_name(\"productBrand\")\n for i in wrappers:\n productlist.append(i.find_element_by_xpath('.//a').get_attribute('href').split(\"skuId=\")[-1])",
"def get_all_ids(conn, table_name):\r\n try:\r\n c = conn.cursor()\r\n c.execute(\"SELECT Student_ID FROM {tn}\".format(tn=table_name))\r\n ids = c.fetchall()\r\n id_vals = [str(tup[0]) for tup in ids]\r\n print(id_vals)\r\n return id_vals\r\n except Exception as e:\r\n print(\"Something went wrong with getting to db\")\r\n print(e)",
"def get_client_list(self, globs):\n return self._expand_globs(globs, self.core.metadata.clients)",
"def view_client():\n\n\tclient_id = request.args.get('client_id')\n\tclient_result = Client.query.filter_by(id=client_id).first()\n\tinteraction_results = db.session.query(Interaction).filter_by(client_id=client_id)[:-30:-1]\n\tnote_results = db.session.query(ClientNote).filter_by(client_id=client_id)[:-30:-1]\n\n\treturn render_template('specific_client.html', client=client_result, \n\t\t\t\t\t\t interactions=interaction_results, yearsList = years, \n\t\t\t\t\t\t notes=note_results, pretty_date=pretty_date)",
"def load_customer_ids(data_main):\n data_main = data_main.dropna()\n return data_main['customer_id'].drop_duplicates().to_list()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Accepts a single ProductID. Queries the profile database to get the DAILY sales counts for that single ProductID. This is then converted into a clean time series, bounded by the min and max sales dates, with all missing dates filled in with zero sales. Returns a Pandas timeseries object for further processing.
|
def db_get_trx_series(productID):
db_connection = iopro.connect(**db_config)
db_cursor = db_connection.cursor()
db_cursor.execute("select * from dbo.fxTransactionSeries(?)", productID)
result = db_cursor.fetchsarray()
db_connection.close()
ts_idx = pd.date_range(ts_config["minPurchaseDate"], ts_config["maxPurchaseDate"])
df = pd.DataFrame(result)
df.set_index("PurchaseDate", drop=True, append=False, inplace=True, verify_integrity=False) # Set Pandas index to the date column
ts = pd.Series(df["Purchases"])
ts.index = pd.DatetimeIndex(ts.index)
ts = ts.reindex(ts_idx, fill_value=0)
return ts # Returns a Series indexed by Date, no missing dates and all zeros filled
|
[
"def __get_product_ts(self, product_id):\n d_range_s = FROM\n d_range_e = TO\n resample = \"W\"\n new_ts = self._ts[self._ts[\"IDPRODUCTO\"] == product_id][\"#UNIDADES\"]\n ts = new_ts[d_range_s:d_range_e].resample(resample).mean().fillna(1)\n ts = np.log(ts)\n ts[ts == -inf] = 0\n return ts",
"def get_sales(start_date: datetime.datetime, end_date: datetime.datetime, seller_skus: set) -> List:\n\n print(\"getting sales data...\")\n interval = create_date_interval(start_date, end_date)\n\n return _get_sales(interval, Granularity.HOUR, seller_skus)",
"def get_sales(self):\n return torch.from_numpy(self.sales_df.iloc[:, 5:].values).type(torch.get_default_dtype())",
"def get_count_timeseries():\n\n\n count_data = (db.session.query(CountItem)\n .join(UserCountType)\n .join(UserCondition)\n .filter(UserCountType.is_tracked==True,\n CountItem.count > 0,\n UserCondition.user_id==session['userid'])\n .order_by(CountItem.count_date)\n .order_by(UserCountType.usercond_id)\n .all())\n \n count_data_list = []\n for count_item in count_data:\n user_count_name = count_item.user_count_type.count_type.count_name\n count_data_list.append({\"name\": user_count_name,\n \"date\": str(count_item.count_date.date()),\n \"count\": count_item.count})\n\n return(jsonify(count_data_list))",
"def timeseries(productID):\n ts = db_get_trx_series(productID) # Get a Time-Series vector for a specific product #1587\n ts_values = {}\n \n # Compute exponentially weighted moving averages (EWMAs) for specific time periods\n ewma7 = pd.Series(pd.ewma(ts, span=7, freq=\"D\"))\n ewma14 = pd.Series(pd.ewma(ts, span=14, freq=\"D\"))\n ewma30 = pd.Series(pd.ewma(ts, span=30, freq=\"D\"))\n \n # Compute moving average convergence-divergence to identify strength and direction of trend\n # ASSUMES no partial days are provided; transaction counts are for a full day\n macd = pd.Series(ewma14 - ewma30)\n \n # Get the tail value or last value we observed from each of the EWMA calculations\n ts_values[\"macd\"] = get_single_value(macd, 1)\n ts_values[\"ewma7\"] = get_single_value(ewma7, 1)\n ts_values[\"ewma14\"] = get_single_value(ewma14, 1)\n ts_values[\"ewma30\"] = get_single_value(ewma30, 1)\n \n try:\n # Apply Hodrick-Prescott filter to separate out seasonality (ts_cycle) from overall linear trend (ts_trend)\n ts_cycle, ts_trend = sm.tsa.filters.hpfilter(ts.resample(\"M\", how=\"sum\"), 129600)\n \n except ValueError:\n #print(\"Skipping ValueError (sparse matrix) for ProductID=\" + str(productID)) \n ts_values[\"ts_cycle\"] = 0\n ts_values[\"ts_cycle_z\"] = 0\n print(productID, \"***********************************ERROR -- Time Series\")\n \n else:\n ts_cycle_z = (ts_cycle - ts_cycle.mean()) / ts_cycle.std()\n #ts_trend_z = (ts_trend - ts_trend.mean()) / ts_trend.std()\n ts_values[\"ts_cycle\"] = get_single_value(ts_cycle, 13) \n ts_values[\"ts_cycle_z\"] = get_single_value(ts_cycle_z, 13)\n #print(\"OK\", productID, ts_values[\"ts_cycle\"])\n \n print(productID, \"-- Time Series Completed\")\n db_update_weights(productID, ts_values)",
"def get_production_data():\n\n f_start = time.time()\n\n # read in production data\n prod_df = pd.DataFrame()\n for sheet in [ 'Monthly_PROD1_OFM', 'Monthly_PROD2_OFM', 'Monthly_PROD3_OFM', 'Monthly_PROD4_OFM' ]:\n sheet_df = pd.read_excel( xlsx, sheetname=sheet )\n prod_df = pd.concat( [ prod_df, sheet_df ], ignore_index=True )\n\n\n # drop missing values\n start = time.time()\n prod_df.dropna( how='all', inplace=True ) # 2208 rows in PROD2 and 617 rows in PROD3 are completely blank\n prod_df.dropna( subset=[ 'OIL [bbl]', 'GAS [Mcf]', 'WATER [bbl]' ], how='all', inplace=True ) # 12 rows in PROD2, 9 rows in PROD3, and 10 rows in PROD4 have no prod values for rows with an assigned UNIQUEID & date\n utils.print_log( 'drop missing: ' + utils.secs_to_hms( time.time() - start ) )\n\n # resample to ensure monthly increments\n start = time.time()\n prod_df = prod_df.set_index( 'Date' ).groupby( 'UNIQUEID' ).resample( '1M' ).sum().reset_index()\n prod_df.loc[ ~prod_df[ 'DAYS [days]' ].isnull(), 'Well_Open' ] = 1\n prod_df[ 'Well_Open' ].fillna( 0, inplace=True )\n prod_df[ 'OIL [bbl]' ].fillna( 0, inplace=True )\n prod_df[ 'GAS [Mcf]' ].fillna( 0, inplace=True )\n prod_df[ 'WATER [bbl]' ].fillna( 0, inplace=True )\n prod_df.drop( 'DAYS [days]', axis=1, inplace=True )\n utils.print_log( 'resample: ' + utils.secs_to_hms( time.time() - start ) )\n\n # add counter for months of production history\n start = time.time()\n prod_df[ 'Well_Age' ] = prod_df.groupby( 'UNIQUEID' ).cumcount() + 1\n utils.print_log( 'age counter: ' + utils.secs_to_hms( time.time() - start ) )\n\n utils.print_log( 'production prepped: ' + utils.secs_to_hms( time.time() - f_start ) )\n return prod_df",
"def all_sales():\n return [\n {\n \"sale_id\": 1,\n \"product\": \"Samsung Flatscreen Tv\",\n \"quantity\": 2,\n \"price\": 4500000\n },\n {\n \"sale_id\": 2,\n \"product\": \"Toshiba Flatscreen Tv\",\n \"quantity\": 6,\n \"price\": 9000000\n },\n {\n \"sale_id\": 3,\n \"product\": \"LG Flatscreen Tv\",\n \"quantity\": 12,\n \"price\": 1500000\n },\n {\n \"sale_id\": 4,\n \"product\": \"Sony Flatscreen Tv\",\n \"quantity\": 1,\n \"price\": 500000\n },\n {\n \"sale_id\": 5,\n \"product\": \"Hisense Flatscreen Tv\",\n \"quantity\": 2,\n \"price\": 800000\n },\n ]",
"def get_sale_by_prod_id(self, prod_id):\n self.cursor.execute(\"Select * from sales where product_id = %s\",\n (prod_id,))\n sales = self.cursor.fetchall()\n if sales:\n return sales",
"def getDailyReturns(self, startDate, endDate):\n self.startDate = startDate\n self.endDate = endDate\n \n price = yf.download(stock,startDate,endDate)\n self.dReturns = pd.DataFrame(np.log(price)-np.log(price).shift(1),index=price.index)\n self.dReturns.columns = self.tickers\n self.dReturns.dropna(inplace = True)",
"def get_sales_from_date(date):\n \n return Sale.query.filter(Sale.date==date).all().order_by(\"date\")",
"def get_total_sales_daywise(self, type_of_plot):\r\n # Add Total Sales\r\n best_selling_day = self.all_data.groupby(self._day_of_week)[self._gross_sale].sum().reindex(self._ordered_day)\r\n\r\n # Reset Index\r\n best_selling_day = best_selling_day.reset_index()\r\n\r\n # Plot\r\n self.plot_data(type_of_plot, self._day_of_week, self._gross_sale, best_selling_day, \"Best Selling Day For The Year 2018\")",
"def get_user_product_sales(self, current_user):\n try:\n conn = open_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT products.product_id, products.product_name,\\\n products.product_model, sales.quantity, sales.total_price FROM \\\n products INNER JOIN sales ON products.product_id = \\\n sales.product_id WHERE sales.created_by = %s\", (current_user,))\n product_sales = cur.fetchall()\n close_connection(conn)\n return product_sales\n except Exception as e:\n print(e)",
"def daily_counts(self, account):\n Memo = apps.get_model(\"diary\", \"Memo\")\n memo = Memo.objects.filter(date=date.today(), author=account.user).first()\n\n # If there is no memo object, we cannot create the daily_counts - no error\n if not memo:\n return\n\n # Associate the article counts with the memo\n if not hasattr(memo, \"article_counts\"):\n counts = self.create(memo=memo)\n else:\n counts = memo.article_counts\n\n # Compute the number of articles read today\n counts.read = memo.articles.count()\n\n # Create a queryset for the instapaper account\n Article = apps.get_model(\"reading\", \"Article\")\n qs = Article.instapaper.account(account)\n\n # Add the current unread count for today\n counts.unread = qs.unread().count()\n\n # Filter for year to date counts\n qs = qs.ytd(memo.date.year)\n counts.archived= qs.archived().count()\n counts.starred = qs.starred().count()\n counts.save()\n\n return counts",
"def find_product_mentions_time_series(product_url, start_date):\n from debra.models import ProductModelShelfMap\n import datetime\n pmsm = ProductModelShelfMap.objects.all()\n\n pmsm = pmsm.filter(product_model__prod_url__icontains=product_url)\n\n pmsm = pmsm.filter(added_datetime__gte=start_date)\n\n pmsm = pmsm.prefetch_related('product_model__brand')\n pmsm = pmsm.filter(shelf__name__iexact='Products from my blog')\n print(\"* Starting for %s since %s\" % (product_url, start_date))\n month = timedelta(days=30)\n tod = datetime.date.today()\n start = start_date\n while start <= tod:\n\n next = start + month\n pmsm_range = pmsm.filter(added_datetime__gte=start).filter(added_datetime__lte=next)\n print(\"[%s]\\t[%s]\\t%d\\t%d\\t%d\" % (start, next, pmsm_range.count(), pmsm_range.distinct('post').count(), pmsm_range.distinct('post__influencer').count()))\n start = next",
"def get_daily_returns():\n portfolio = request.get_json(force=True)\n start_date = parse_date(request.args.get('start'))\n end_date = parse_date(request.args.get('end'))\n prices_df = prepare_dataframe(portfolio, start_date, end_date)\n performance = compute_daily_returns(prices_df)\n return performance.to_json(orient='index')",
"def toSeries(self):\n x = np.linspace(self._domain[0], self._domain[1], self._xbins)\n y = self._mass\n return pd.DataFrame (cbind(x,y), columns=['x','density'])",
"def _collect_price_time_series(self):\n r = requests.get(self.GRAPH_URL)\n #dictionary of 2 dictionaries, \"daily\" and \"average\"\n response = r.json()\n daily_series = TimeSeries.from_dictionary(response[\"daily\"])\n average_series = TimeSeries.from_dictionary(response[\"average\"])\n return (daily_series, average_series)",
"def index_sales(sale_count):\r\n data['index'] = list(range(sale_count))\r\n \r\n date = 0 \r\n price = 1\r\n \r\n for i in data['index']:\r\n sales['sale_' + str(i)] = [data['sales'][date], data['sales'][price]]\r\n date += 2\r\n price += 2",
"def get_prices(self, fillna=0.):\n x = torch.from_numpy(self.prices_df.values).type(torch.get_default_dtype())\n x[torch.isnan(x)] = fillna\n x = x.repeat_interleave(7, dim=-1)[:, :self.calendar_df.shape[0]]\n assert x.shape == (self.num_timeseries, self.num_days)\n return x"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Accepts a single ProductID as a paremeter. Retrieves a timeseries vector for that product, and creates several moving averages (e.g., ewma7) from that data to identify upward/downward trends. Plucks the last values from those moving averages and writes them to a ts_values dict. Attempts to separate seasonality from trend into two values (ts_cycle, ts_trend) and write to ts_values dict also. Loads all resulting weights to a DB for that ProductID.
|
def timeseries(productID):
ts = db_get_trx_series(productID) # Get a Time-Series vector for a specific product #1587
ts_values = {}
# Compute exponentially weighted moving averages (EWMAs) for specific time periods
ewma7 = pd.Series(pd.ewma(ts, span=7, freq="D"))
ewma14 = pd.Series(pd.ewma(ts, span=14, freq="D"))
ewma30 = pd.Series(pd.ewma(ts, span=30, freq="D"))
# Compute moving average convergence-divergence to identify strength and direction of trend
# ASSUMES no partial days are provided; transaction counts are for a full day
macd = pd.Series(ewma14 - ewma30)
# Get the tail value or last value we observed from each of the EWMA calculations
ts_values["macd"] = get_single_value(macd, 1)
ts_values["ewma7"] = get_single_value(ewma7, 1)
ts_values["ewma14"] = get_single_value(ewma14, 1)
ts_values["ewma30"] = get_single_value(ewma30, 1)
try:
# Apply Hodrick-Prescott filter to separate out seasonality (ts_cycle) from overall linear trend (ts_trend)
ts_cycle, ts_trend = sm.tsa.filters.hpfilter(ts.resample("M", how="sum"), 129600)
except ValueError:
#print("Skipping ValueError (sparse matrix) for ProductID=" + str(productID))
ts_values["ts_cycle"] = 0
ts_values["ts_cycle_z"] = 0
print(productID, "***********************************ERROR -- Time Series")
else:
ts_cycle_z = (ts_cycle - ts_cycle.mean()) / ts_cycle.std()
#ts_trend_z = (ts_trend - ts_trend.mean()) / ts_trend.std()
ts_values["ts_cycle"] = get_single_value(ts_cycle, 13)
ts_values["ts_cycle_z"] = get_single_value(ts_cycle_z, 13)
#print("OK", productID, ts_values["ts_cycle"])
print(productID, "-- Time Series Completed")
db_update_weights(productID, ts_values)
|
[
"def predict_product(self, product_id):\n product_ts = self.__get_product_ts(product_id)\n\n model = SARIMAX(product_ts, order=(0,1,2),\n time_varying_regression=True,\n mle_regression=False,\n trend='n',\n seasonal_order=(1,1,1,11)).fit()\n steps = PREDICTION_TIME * 4\n forecast = model.get_forecast(steps=steps, dynamic=True)\n history = product_ts[(product_ts.index > \"2015\") & (product_ts.index < \"2016\")]\n history = history.fillna(0)\n # Output\n predicted_mean = forecast.predicted_mean\n conf_int = forecast.conf_int()\n return np.exp(history), np.exp(predicted_mean), np.exp(conf_int)\n # return history, predicted_mean, conf_int",
"def __get_product_ts(self, product_id):\n d_range_s = FROM\n d_range_e = TO\n resample = \"W\"\n new_ts = self._ts[self._ts[\"IDPRODUCTO\"] == product_id][\"#UNIDADES\"]\n ts = new_ts[d_range_s:d_range_e].resample(resample).mean().fillna(1)\n ts = np.log(ts)\n ts[ts == -inf] = 0\n return ts",
"def db_get_trx_series(productID):\n db_connection = iopro.connect(**db_config)\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"select * from dbo.fxTransactionSeries(?)\", productID)\n result = db_cursor.fetchsarray()\n db_connection.close()\n \n ts_idx = pd.date_range(ts_config[\"minPurchaseDate\"], ts_config[\"maxPurchaseDate\"])\n df = pd.DataFrame(result)\n df.set_index(\"PurchaseDate\", drop=True, append=False, inplace=True, verify_integrity=False) # Set Pandas index to the date column\n ts = pd.Series(df[\"Purchases\"])\n ts.index = pd.DatetimeIndex(ts.index)\n ts = ts.reindex(ts_idx, fill_value=0)\n \n return ts # Returns a Series indexed by Date, no missing dates and all zeros filled",
"def main(db):\n db_config[\"Database\"] = db\n # Load queue file\n db_get_ts_config()\n \n # Load Product Table on initialization\n productIDs = db_get_productlist()\n \n for productID in productIDs:\n timeseries(productID)\n print()\n \n \n \n #print(ts_config[\"productIDList\"][0:3]) ",
"def db_update_weights(productID, weights_dict):\n db_connection = iopro.connect(**db_config) \n db_cursor = db_connection.cursor()\n \n for k, v in weights_dict.items():\n db_cursor.execute(\"insert into dbo.TimeSeriesWeights_TMP values (?,?,?)\", productID, k, v)\n \n db_connection.commit()\n db_connection.close()\n print(productID, \"-- Loading Weights...\")",
"def generate_station_timeseries(self, hoverData, rows):\n df_rows = Utility.to_dataframe(rows)\n\n try:\n if hoverData[\"points\"][0][\"customdata\"] != \"\":\n station_post_code = hoverData[\"points\"][0][\"customdata\"]\n else:\n station_post_code = df_rows[\"Post Code\"].iloc[0] # [13]\n except (KeyError, TypeError):\n station_post_code = df_rows[\"Post Code\"].iloc[0] # [13]\n\n df = self.get_station_data(station_post_code)\n brand = df.iloc[0][\"Brand\"] # [13]\n station_post_code = df.iloc[0][\"PostCode\"] # [13]\n if df[\"1-Day Prediction Confidence\"].iloc[0] > 200: # [13]\n hoverData = {\"points\": [{\"customdata\": \"\"}]}\n df = self.get_station_data(df_rows[\"Post Code\"].iloc[0]) # [13]\n\n db = DatabaseModel()\n master = db.get_master()\n prediction = Processor(\n df.iloc[0][\"Brand\"],\n df.iloc[0][\"Town\"],\n df.iloc[0][\"County\"],\n df.iloc[0][\"PostCode\"],\n df.iloc[0][\"FuelType\"],\n df.iloc[0][\"Price\"],\n df.iloc[0][\"SearchPostCode\"],\n master,\n ) # [13]\n prediction = prediction.get_predictions()\n df1, predicted_df = prediction[\"df\"], prediction[\"prediction\"]\n df1.set_index(\"Date\", inplace=True) # [22]\n df1.rename(columns={\"Price\": \"Prediction\"}, inplace=True) # [8]\n df = pd.concat([df1, predicted_df]) # [23]\n data = {\"df\": df, \"brand\": brand, \"station_post_code\": station_post_code}\n return data",
"def _collect_price_time_series(self):\n r = requests.get(self.GRAPH_URL)\n #dictionary of 2 dictionaries, \"daily\" and \"average\"\n response = r.json()\n daily_series = TimeSeries.from_dictionary(response[\"daily\"])\n average_series = TimeSeries.from_dictionary(response[\"average\"])\n return (daily_series, average_series)",
"def calculateAverageProductData( self, style, indent = '', **kwargs ) :\n\n self.__product.calculateAverageProductData( style, indent = indent, **kwargs )",
"def create_transit_all_lightcurves_dataproduct(self) -> DataProduct:\n\n print(\n f\"Transit processing {len(self.data_products)} data products: {self.data_products}\"\n )\n\n return self.extract_and_save_transit_all_light_curves()",
"def derive_variables(self, window, freq):\r\n \r\n length = len(self.price)\r\n window = window # time window for FFM regression model\r\n freq = freq # frequency of regression calibration\r\n \r\n sp = pd.Series(-1, index=self.price.index)\r\n # sp: Equals 1 when the slope of price trend is significantly positive\r\n sn = pd.Series(-1, index=self.price.index)\r\n # sn: Equals 1 when the slope of price trend is significantly negative \r\n c_f = pd.Series(0.0, index=self.price.index)\r\n # c_f: forecast close from linear model using previous 14 close\r\n fo = pd.Series(0.0, index=self.price.index)\r\n # fo: forecast oscillator\r\n ma3 = pd.Series(0.0, index=self.price.index)\r\n # 3-day mover average of the forecast oscillator\r\n lu = pd.Series(-1, index=self.price.index)\r\n # equals 1 when the oscillator crosses upward over its ma3\r\n ld = pd.Series(-1, index=self.price.index)\r\n # equals 1 when the oscillator crosses downward over its ma3\r\n \r\n up_moment = pd.Series(0.0, index=self.price.index)\r\n # up-day moment, equal |close_t - close_t-1| if close_t > close_t-1 o.w. 0\r\n down_moment = pd.Series(0.0, index=self.price.index)\r\n # down-day moment, equal |close_t - close_t-1| if close_t < close_t-1 o.w. 0\r\n ud = pd.Series(-1, index=self.price.index)\r\n # equals 1 when the closing price of the index is up at the present day\r\n aud = pd.Series(-1, index=self.price.index)\r\n # equals 1 when the closing prices are either up or down consecutively \r\n # for at least 3 days\r\n \r\n upd = pd.Series(0, index=self.price.index)\r\n # equals 1 when the closing price of next day exceeds present day\r\n dnd = pd.Series(0, index=self.price.index)\r\n # equals 1 when the closing price of next day is less than present day\r\n \r\n sd = pd.Series(0.0, index=self.price.index)\r\n # up-day moment over 14-days\r\n su = pd.Series(0.0, index=self.price.index)\r\n # down-day moment over 14-days\r\n rsi = pd.Series(0.0, index=self.price.index)\r\n # relative strength index\r\n rsi_h = pd.Series(0.0, index=self.price.index)\r\n # highest RSI over past 14 days (incl. current)\r\n rsi_l = pd.Series(0.0, index=self.price.index)\r\n # lowest RSI over past 14 days (incl. current)\r\n stoch_rsi = pd.Series(0.0, index=self.price.index)\r\n # stochastic RSI\r\n \r\n rsi1 = pd.Series(-1, index=self.price.index)\r\n # equals 1 when the stochastic RSI falls from 100\r\n rsi2 = pd.Series(-1, index=self.price.index)\r\n # equals 1 when the stochastic RSI rises from 0\r\n rsi3 = pd.Series(-1, index=self.price.index)\r\n # equals 1 when the stochastic RSI is greater than 90\r\n rsi4 = pd.Series(-1, index=self.price.index)\r\n # equals 1 when the stochastic RSI is less than 10\r\n \r\n x = sm.add_constant(range(1, window+1)) # prepare x for regression\r\n \r\n # below variables start at index window, since regression takes window data points to start\r\n for t in range(window, length):\r\n if t % freq == 0:\r\n y = self.price[(t - window):t].values\r\n # run regression and evaluate beta and p-value\r\n model = regression.linear_model.OLS(y, x).fit()\r\n if model.params[1] > 0 and model.pvalues[1] < 0.05:\r\n sp[t] = 1 \r\n elif model.params[1] < 0 and model.pvalues[1] < 0.05:\r\n sn[t] = 1 \r\n x1 = (1, window+1) # prepare X for one-step forecast\r\n c_f[t] = np.dot(x1, model.params) # forecast price using regression\r\n fo[t] = 100*(self.price[t] - c_f[t])/self.price[t]\r\n\r\n # below variables start at index window+2, since ma3 takes another 2 data points to start\r\n for t in range(window + 2, length):\r\n ma3[t] = (fo[t] + fo[t-1] + fo[t-2])/3\r\n if fo[t-1] < ma3[t-1] and fo[t] > ma3[t]: \r\n lu[t] = 1 # fo cross upward over ma3\r\n elif fo[t-1] > ma3[t-1] and fo[t] < ma3[t]:\r\n ld[t] = 1 # fo cross downward over ma3\r\n \r\n # below variables start at index 1\r\n for t in range(1, length):\r\n if self.price[t] > self.price[t-1]:\r\n up_moment[t] = abs(self.price[t] - self.price[t-1])\r\n ud[t] = 1\r\n elif self.price[t] < self.price[t-1]:\r\n down_moment[t] = abs(self.price[t] - self.price[t-1])\r\n\r\n # below variables start at index 3\r\n for t in range(3, length):\r\n if ((self.price[t] > self.price[t-1] > self.price[t-2] > self.price[t-3]) or\r\n (self.price[t] < self.price[t-1] < self.price[t-2] < self.price[t-3])):\r\n aud[t] = 1\r\n \r\n # below variables start at index 0 till index length - 1\r\n for t in range(0, length - 1):\r\n if self.price[t+1] > self.price[t]:\r\n upd[t] = 1 # equals 0 otherwise\r\n elif self.price[t+1] < self.price[t]:\r\n dnd[t] = 1 # equals 0 otherwise\r\n \r\n # below variables start at index window, since up_moment & down_moment takes\r\n # 1 data point to start, and RSI takes (window-1) to start\r\n # All three include time t value\r\n for t in range(window, length):\r\n su[t] = up_moment[t - window + 1:t + 1].sum()\r\n sd[t] = down_moment[t - window + 1:t + 1].sum()\r\n rsi[t] = 100 * su[t] / (su[t] + sd[t])\r\n '''corrected RSI formula from original paper'''\r\n \r\n # below variables start at index 2*window-1, since rsi_h and rsi_l take\r\n # another (window-1) data points to start\r\n # All three include time t value\r\n for t in range(2*window - 1, length):\r\n rsi_h[t] = max(rsi[t - window + 1:t + 1])\r\n rsi_l[t] = min(rsi[t - window + 1:t + 1])\r\n stoch_rsi[t] = (100 * (rsi[t] - rsi_l[t]) / (rsi_h[t] - rsi_l[t]))\r\n \r\n # below variables start at index 2*window-1, since stoch_rsi takes 2*window-1 data points to start\r\n for t in range(2*window - 1, length):\r\n if stoch_rsi[t-1] == 100.0 and stoch_rsi[t] < 100.0:\r\n rsi1[t] = 1\r\n elif stoch_rsi[t-1] == 0.0 and stoch_rsi[t] > 0.0:\r\n rsi2[t] = 1\r\n if stoch_rsi[t] > 90.0:\r\n rsi3[t] = 1\r\n elif stoch_rsi[t] < 10.0:\r\n rsi4[t] = 1\r\n \r\n # append calculated variables to price and define data frames\r\n self.intermediate_vars = pd.concat([self.price, c_f, fo, ma3, up_moment,\r\n down_moment, su, sd, rsi, rsi_h, rsi_l,\r\n stoch_rsi], axis=1).iloc[2*window - 1:, ]\r\n self.intermediate_vars.columns = [\"close\", \"forec_close\", \"forecast_oscillator\",\r\n \"ma3\", \"up_moment\", \"down_moment\", \"su\", \"sd\",\r\n \"rsi\", \"rsi_h\", \"rsi_l\", \"stoch_rsi\"]\r\n self.sample = pd.concat([self.price, sp, sn, lu, ld, ud, aud, upd, dnd, \r\n rsi1, rsi2, rsi3, rsi4], axis=1).iloc[2*window - 1:, ]\r\n self.sample.columns = [\"close\", \"sp\", \"sn\", \"lu\", \"ld\", \"ud\", \"aud\",\r\n \"upd\", \"dnd\", \"rsi1\", \"rsi2\", \"rsi3\", \"rsi4\"]\r\n \r\n return self.sample",
"async def tp_trend(self, ctx, *, item: str):\n\t\tuser = ctx.message.author\n\t\tcolor = self.getColor(user)\n\t\tchoice = await self.itemname_to_id(item, user)\n\t\ttry:\n\t\t\tcommerce = 'commerce/prices/'\n\t\t\tchoiceid = str(choice[\"_id\"])\n\t\t\tshinies_endpoint = 'history/' + choiceid\n\t\t\thistory = await self.call_shiniesapi(shinies_endpoint)\n\t\texcept ShinyAPIError as e:\n\t\t\tawait self.bot.say(\"{0.mention}, API has responded with the following error: \"\n\t\t\t\t\t\t\t \"`{1}`\".format(user, e))\n\t\t\treturn\n\t\t\n\t\ttime_now = int(time.time())\n\t\t\n\t\t# Select 96 entries, each (usually) spaced 15 minutes apart.\n\t\tlast_week = history[:96]\n\t\t\n\t\t# No data returned?\n\t\tif not last_week:\n\t\t\tawait self.bot.say(\"{0.mention}, there was no historical data found.\".format(user))\n\t\t\treturn\n\t\t\n\t\tbuy_avg = 0\n\t\tsell_avg = 0\n\t\tbuy_min = float(\"inf\")\n\t\tsell_min = float(\"inf\")\n\t\tbuy_max = 0\n\t\tsell_max = 0\n\t\t\n\t\t# Get average from 96 most recent entries\n\t\tfor record in last_week:\n\t\t\tbuy = int(record[\"buy\"])\n\t\t\tsell = int(record[\"sell\"])\n\t\t\tbuy_avg += buy\n\t\t\tsell_avg += sell\n\t\t\tbuy_min = min(buy_min, buy)\n\t\t\tsell_min = min(sell_min, sell)\n\t\t\tbuy_max = max(buy_max, buy)\n\t\t\tsell_max = max(sell_max, sell)\n\t\t\n\t\tbuy_avg /= len(last_week)\n\t\tsell_avg /= len(last_week)\n\t\t\n\t\t# Display data\n\t\tdata = discord.Embed(title=\"Daily average of id \" + choiceid, colour=color)\n\t\tdata.add_field(name=\"Average Buy\",value=self.gold_to_coins(buy_avg))\n\t\tdata.add_field(name=\"Minimum Buy\",value=self.gold_to_coins(buy_min))\n\t\tdata.add_field(name=\"Maximum Buy\",value=self.gold_to_coins(buy_max))\n\t\tdata.add_field(name=\"Average Sell\",value=self.gold_to_coins(sell_avg))\n\t\tdata.add_field(name=\"Minimum Sell\",value=self.gold_to_coins(sell_min))\n\t\tdata.add_field(name=\"Maximum Sell\",value=self.gold_to_coins(sell_max))\n\t\t\n\t\ttry:\n\t\t\tawait self.bot.say(embed=data)\n\t\texcept discord.HTTPException:\n\t\t\tawait self.bot.say(\"Issue embedding data into discord\")",
"def calculateAverageProductData( self, style, indent = '', **kwargs ) :\n\n if( not( isinstance( style, stylesModule.averageProductData ) ) ) : raise TypeError( 'Invalid style' )\n\n verbosity = kwargs.get( 'verbosity', 0 )\n kwargs['verbosity'] = verbosity\n\n incrementalIndent = kwargs.get( 'incrementalIndent', ' ' )\n kwargs['incrementalIndent'] = incrementalIndent\n indent2 = indent + incrementalIndent\n\n logFile = kwargs.get( 'logFile', nullDevice( ) )\n kwargs['logFile'] = logFile\n\n energyAccuracy = kwargs.get( 'energyAccuracy', 1e-5 )\n kwargs['energyAccuracy'] = energyAccuracy\n momentumAccuracy = kwargs.get( 'momentumAccuracy', 1e-3 )\n kwargs['momentumAccuracy'] = momentumAccuracy\n\n kwargs['incidentEnergyUnit'] = self.reactions[0].crossSection.domainUnit\n kwargs['momentumDepositionUnit'] = kwargs['incidentEnergyUnit'] + '/c'\n kwargs['massUnit'] = kwargs['incidentEnergyUnit'] + '/c**2'\n kwargs['projectileMass'] = self.PoPs[self.projectile].getMass( kwargs['massUnit'] )\n kwargs['targetMass'] = self.PoPs[self.target].getMass( kwargs['massUnit'] )\n\n if( verbosity > 0 ) : print '%s%s' % ( indent, self.inputParticlesToReactionString( suffix = \" -->\" ) )\n\n kwargs['reactionSuite'] = self\n for reaction in self.reactions :\n reaction.calculateAverageProductData( style, indent = indent2, **kwargs )\n for reaction in self.orphanProducts :\n reaction.calculateAverageProductData( style, indent = indent2, **kwargs )",
"def _sensor(product_id):\n sid = product_id[:2]\n if sid == 'LC':\n return 'OLI_TIRS'\n elif sid == 'LO':\n return 'OLI'\n elif sid == 'LE':\n return 'ETM'\n elif sid == 'LT':\n return 'TM'\n elif sid == 'LM':\n return 'MSS'",
"def get_moving_average():\n portfolio = request.get_json(force=True)\n try:\n window = int(request.args.get('window'))\n except:\n pass\n start_date = parse_date(request.args.get('start'))\n end_date = parse_date(request.args.get('end'))\n print(start_date)\n print(end_date)\n\n prices_df = prepare_dataframe(portfolio, start_date, end_date)\n performance = compute_moving_average(prices_df, window)\n return performance.to_json(orient='index')",
"def _smooth_price_data(self, sigma):\n self.High = features.gaussian_filter(self.High_raw, sigma)\n self.Low = features.gaussian_filter(self.Low_raw, sigma)\n self.Close = features.gaussian_filter(self.Close_raw, sigma)\n self.Open = features.gaussian_filter(self.Open_raw, sigma)\n self.Volume = features.gaussian_filter(self.Volume_raw, sigma)",
"def find_product_mentions_time_series(product_url, start_date):\n from debra.models import ProductModelShelfMap\n import datetime\n pmsm = ProductModelShelfMap.objects.all()\n\n pmsm = pmsm.filter(product_model__prod_url__icontains=product_url)\n\n pmsm = pmsm.filter(added_datetime__gte=start_date)\n\n pmsm = pmsm.prefetch_related('product_model__brand')\n pmsm = pmsm.filter(shelf__name__iexact='Products from my blog')\n print(\"* Starting for %s since %s\" % (product_url, start_date))\n month = timedelta(days=30)\n tod = datetime.date.today()\n start = start_date\n while start <= tod:\n\n next = start + month\n pmsm_range = pmsm.filter(added_datetime__gte=start).filter(added_datetime__lte=next)\n print(\"[%s]\\t[%s]\\t%d\\t%d\\t%d\" % (start, next, pmsm_range.count(), pmsm_range.distinct('post').count(), pmsm_range.distinct('post__influencer').count()))\n start = next",
"def run(self):\n # get aggregate output\n print(\"------Called run function-------------------------------------\")\n record_distributions = (1, self.nb_of_timesteps - 1)\n for i in range(self.nb_of_timesteps):\n print(file_marker + \"Model {}: t {}/{}\".format(\n self.id, i, self.nb_of_timesteps), end=\"\\r\")\n self.t = i\n # 1. Update firm positions on the product space\n self.add_firm_positions()\n\n # 2. Compute the prices for all products on the product space\n self.add_prices() # If output=0, then delta is used as price\n\n # 3.1 Reset production statistics on product space:\n old_output_dict = nx.get_node_attributes(\n self.product_space, \"aggregate_output\")\n\n excess_nodes = \\\n {k: v for k, v in old_output_dict.items() \\\n if v > self.nominal_demand}\n assert len(excess_nodes.keys()) == 0, \\\n \"For some nodes production > nominal demand: {}\".format(\n excess_nodes)\n\n nx.set_node_attributes(self.product_space, 0.0, \"aggregate_output\")\n\n new_output_dict = {}\n capital_stock_dict = {}\n all_deposits = 0\n all_loans = 0\n individual_output_dict = {}\n total_output_dict = dict( # Starts with value 0 for each node\n zip(self.product_space.nodes(),\n self.parameters[\"number_of_products\"] * [0]))\n\n # 3.2 Compute output for each firm, save it,\n # add it to total_output_dict\n for firm in self.firm_list:\n price = self.product_space.nodes[ \\\n firm.product_space_position][\"price\"]\n capital_stock = firm.capital_stock[-1]\n output = Firm.output_and_capability_costs(\n firm, capital_stock, price)\n if math.isnan(output) == True:\n pdb.set_trace()\n total_output_dict[firm.product_space_position] += output\n individual_output_dict[firm] = output\n\n # 3.3 If total output for a product exceeds demand,\n # update actual outputs\n nodes_with_excess_demands = \\\n {k: v for k, v in total_output_dict.items() \\\n if v > self.nominal_demand}\n\n for k in nodes_with_excess_demands.keys():\n for firm in self.firm_list:\n price = self.product_space.nodes[ \\\n firm.product_space_position][\"price\"]\n if firm.product_space_position == k:\n possible_output = \\\n firm.output_and_capability_costs(\n firm.capital_stock[-1], price)\n adapted_output = \\\n possible_output / total_output_dict[k] \\\n * self.nominal_demand\n if math.isnan(adapted_output):\n pdb.set_trace()\n individual_output_dict[firm] = \\\n self.round_half_down(adapted_output, 2)\n\n # 4. Location decision, investment of the firms and\n # update capital stock\n for firm in self.firm_list:\n # 4.0 Save preparatory values\n output = individual_output_dict.get(firm)\n total_output_this_period = \\\n total_output_dict.get(firm.product_space_position)\n price = self.product_space.nodes[\n firm.product_space_position][\"price\"]\n total_output_prev_t = \\\n old_output_dict[firm.product_space_position]\n\n # 4.1 Write firm output into new_output_dict\n if firm.product_space_position in new_output_dict.keys():\n new_output_dict[firm.product_space_position] = \\\n int(new_output_dict[firm.product_space_position] +\n output)\n else:\n new_output_dict[firm.product_space_position] = output\n\n assert new_output_dict[firm.product_space_position] <= \\\n self.nominal_demand, \\\n \"new_output_dict value {} exceeds maximum {}\".format(\n new_output_dict[firm.product_space_position],\n self.nominal_demand)\n\n # 4.2 Firms choose new spot on the product space, compute actual\n # profits, make investment decisions demand money from banks \n # and update capital account\n capital_stock, account_firm = firm.update_firm(\n price=price,\n financial_regime_parameter=self.financial_regime_parameter,\n output=output,\n total_output_this_period=total_output_this_period)\n if account_firm > 0:\n all_deposits += account_firm\n else:\n all_loans += account_firm\n capital_stock_dict[firm] = capital_stock\n\n # 5. Update bank accounts\n account_bank = self.bank_list[0].update_bank(all_deposits,\n all_loans)\n assert account_bank == all_loans + all_deposits, \\\n \"ALERT: account_bank={}, all_loans + all_deposits={}+{}={}!\"\\\n .format(account_bank,\n all_loans,\n all_deposits,\n all_loans + all_deposits)\n\n # 6. Update the output for each product *on the product space*\n excess_nodes = \\\n {k: v for k, v in new_output_dict.items()\n if v > self.nominal_demand}\n assert len(excess_nodes.keys()) == 0, \\\n \"For some nodes production > nominal demand ({}): {}. \" \\\n \"Check also nodes_with_excess_demands: {}\".format(\n self.nominal_demand,\n excess_nodes,\n nodes_with_excess_demands)\n\n nx.set_node_attributes(self.product_space, new_output_dict,\n \"aggregate_output\")\n\n # 7. Compute relevant statistics \n # Get the complexities of those products actually produced:\n comp_prod_prdcts = \\\n [self.product_space.nodes[i][\"complexity\"] for i in \\\n self.product_space.nodes if \\\n self.product_space.nodes[i][\"aggregate_output\"] > 0.0]\n\n assert len(comp_prod_prdcts) > 0, \"NOTHING GETS PRODUCED!!!\"\n assert np.isnan(comp_prod_prdcts).any() == False, \\\n \"Nan values in comp_prod_prdcts ({})\".format(comp_prod_prdcts)\n\n # Get the prices of those products actually produced:\n price_prod_prdcts = \\\n [self.product_space.nodes[i][\"price\"] for i in \\\n self.product_space.nodes if \\\n self.product_space.nodes[i][\"aggregate_output\"] > 0.0]\n assert len(price_prod_prdcts) > 0, \"NOTHING HAS A PRICE!!!\"\n assert np.isnan(price_prod_prdcts).any() == False, \\\n \"Nan values in price_prod_prdcts ({})\".format(price_prod_prdcts)\n\n self.record_state_variables(\n time=i,\n new_output_dict=new_output_dict,\n comp_prod_prdcts=comp_prod_prdcts,\n capital_stock_dict=capital_stock_dict,\n price_prod_prdcts=price_prod_prdcts,\n t_record_distributions=record_distributions)",
"def test_product_product_strengths_incremental_with_new_impressions_two_new_products(self):\n # Saves two new, identical products. Initially, no users will have impressions on them.\n id_twin_product_1 = \"p_tec_TWIN_1\"\n id_twin_product_2 = \"p_tec_TWIN_2\"\n\n date = self.session_context.get_present_date() - dt.timedelta(days=2)\n\n twin_product_1 = {\"external_id\": id_twin_product_1,\n \"language\": \"english\",\n \"date\": date,\n \"resources\": {\"title\": \"Whatever Gets You Through The Night\"},\n \"full_content\": \"\"\"Begin. Technology. Technology. This is all we got. End.\"\"\",\n \"category\": \"Nonsense\"}\n\n twin_product_2 = {\"external_id\": id_twin_product_2,\n \"language\": \"english\",\n \"date\": date,\n \"resources\": {\"title\": \"Whatever Gets You Through The Night\"},\n \"full_content\": \"\"\"Begin. Technology. Technology. This is all we got. End.\"\"\",\n \"category\": \"Nonsense\"}\n\n self.db_proxy.insert_product(twin_product_1)\n self.db_proxy.insert_product(twin_product_2)\n\n user1 = \"u_eco_1\"\n user2 = \"u_eco_2\"\n activity_type = self.session_context.activities_by_rating[5][0]\n\n # Saves an impression on just one of the new products\n date = pytz.utc.localize(dateutil.parser.parse(\"1988-11-06 9:00:00\"))\n self.db_proxy.increment_impression_summary(user_id=user1, product_id=id_twin_product_1,\n date=date, anonymous=False)\n\n # Saves a couple of activities for another user using the new products\n\n activity = {\"external_user_id\": user2,\n \"external_product_id\": id_twin_product_1,\n \"activity\": activity_type,\n \"created_at\": self.session_context.get_present_date()}\n pt.update_templates(self.session_context, activity)\n tasks.update_summaries(self.session_context, activity)\n\n self.compare_incremental_vs_from_scratch()\n\n activity = {\"external_user_id\": user2,\n \"external_product_id\": id_twin_product_2,\n \"activity\": activity_type,\n \"created_at\": self.session_context.get_present_date()}\n pt.update_templates(self.session_context, activity)\n tasks.update_summaries(self.session_context, activity)\n\n self.compare_incremental_vs_from_scratch()",
"def get_moving_standard_deviation():\n portfolio = request.get_json(force=True)\n try:\n window = int(request.args.get('window'))\n except:\n pass\n start_date = parse_date(request.args.get('start'))\n end_date = parse_date(request.args.get('end'))\n prices_df = prepare_dataframe(portfolio, start_date, end_date)\n performance = compute_moving_standard_deviation(prices_df, window)\n return performance.to_json(orient='index')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loads a set of weights to a timeseries weights table in the DB. Could benefit from some connection pooling all around.
|
def db_update_weights(productID, weights_dict):
db_connection = iopro.connect(**db_config)
db_cursor = db_connection.cursor()
for k, v in weights_dict.items():
db_cursor.execute("insert into dbo.TimeSeriesWeights_TMP values (?,?,?)", productID, k, v)
db_connection.commit()
db_connection.close()
print(productID, "-- Loading Weights...")
|
[
"def loadWeights():\r\n final = []\r\n f = open(\"data/weight.txt\", 'r')\r\n for line in f:\r\n final.append(float(line))\r\n f.close()\r\n return final",
"def _load_weights(self):\n\n if not self.load_weights_file_path:\n return\n beh_load_file_path = re.sub(r'\\.h5', r'_beh.h5', self.load_weights_file_path)\n self.beh_model.load_weights(beh_load_file_path)\n tar_load_file_path = re.sub(r'\\.h5', r'_tar.h5', self.load_weights_file_path)\n self.tar_model.load_weights(tar_load_file_path)\n # print(\"Load_weights\")",
"def ingest(self, session):\n print \"Ingesting atomic weights\"\n atomic_df = self.parser.prepare_atomic_dataframe()\n atomic_df = atomic_df[pd.notnull(atomic_df[AW_VAL_COL])]\n\n data_source = DataSource.as_unique(session, short_name=self.ds_short_name)\n\n for atom_num, row in atomic_df.iterrows():\n atom = session.query(Atom).filter(Atom.atomic_number==atom_num).one()\n atom.merge_quantity(session,\n AtomicWeight(data_source=data_source, value=row[AW_VAL_COL], std_dev=row[AW_SD_COL], unit=u.u))",
"def load_chick_weights():\n return stream.iter_csv(\n os.path.join(os.path.dirname(__file__), 'chick-weights.csv'),\n target_name='weight',\n converters={'time': int, 'weight': int, 'chick': int, 'diet': int}\n )",
"def load_weights(self, filepath):\n fbase, fext = splitext(filepath)\n for i, agent in enumerate(self.agents):\n agent.load_weights('%s%i%s' % (fbase,i,fext))",
"def _set_weights(self):\n cols = ['attr', 'weight']\n data = []\n for i in range(len(self.attrs_names)):\n data.append(\n [self.attrs_names[i],\n self.attrs_weight[i]]\n )\n self.weights_df = pd.DataFrame(data=data, columns=cols)",
"def load_weights(self):\n self.weight_file_name = './learn/q-function-weights/' \\\n 'Q_weight_n_msg_passing_1_mse_weight_1.0_optimizer_' \\\n 'adam_seed_%d_lr_0.0001_operator_two_arm_pick_two_arm_place_n_layers_2_n_hidden_32' \\\n '_top_k_1_num_train_5000_loss_%s.hdf5' % (self.config.seed, self.config.loss)\n print \"Loading weight\", self.weight_file_name\n self.loss_model.load_weights(self.weight_file_name)",
"def gen_load_from_daily_monthly(ML, DWL, DNWL, weight=0.5, year=2015):\n #TODO: refactor. Can i use disag_upsample() ?\n if not(np.isclose(DWL.sum(), 1) and np.isclose(DNWL.sum(), 1)):\n raise ValueError('Daily profiles should be normalized')\n #TODO: Normalize here?\n out = make_timeseries(year=year, length=8760, freq='H') # Create empty pandas with datetime index\n import calendar\n febdays = 29 if calendar.isleap(year) else 28\n Days = np.array([31, febdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n # Assumptions for non working days per month. Only weekends\n # TODO: Custom Calendars with holidays BDays\n DaysNW = countweekend_days_per_month(out.resample('d').mean())\n DaysW = Days - DaysNW\n for month in range(12):\n # Estimate total load for working and non working day\n TempW = (ML[month] * weight * DaysW[month] /\n (weight * DaysW[month] + (1 - weight) * DaysNW[month]) / DaysW[month] )\n TempNW = (ML[month] * (1 - weight) * DaysNW[month] /\n (weight * DaysW[month] + (1 - weight) * DaysNW[month]) / DaysNW[month])\n for hour in range(24):\n out.loc[(out.index.month == month + 1) & #months dont start from 0\n (out.index.weekday < 5) &\n (out.index.hour == hour)] = (TempW * DWL[hour])\n out.loc[(out.index.month == month + 1) &\n (out.index.weekday >= 5) &\n (out.index.hour == hour)] = (TempNW * DNWL[hour])\n return out",
"def setWeightsList(self, weights_list) -> None:\n ...",
"def set_weights(self, weights):\n self.actor_critic.load_state_dict(weights)\n self.alpha_optimizer.step()\n self.alpha = self.log_alpha.detach().exp()\n\n # Update target networks by polyak averaging.\n self.iter += 1\n self.update_target_networks()",
"def load_weights(self):\n if self.arg.weights:\n self.fileio.load_weights(self.model, self.arg.weights, self.arg.ignore_weights)",
"def load_weights(model, filepath):\n print('Loading', filepath, 'to', model.name)\n with h5py.File(filepath, mode='r') as f:\n # new file format\n layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]\n\n # we batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for name in layer_names:\n print(name)\n g = f[name]\n weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]\n if len(weight_names):\n weight_values = [g[weight_name] for weight_name in weight_names]\n try:\n layer = model.get_layer(name=name)\n except(Exception):\n layer = None\n if not layer:\n print('failed to find layer', name, 'in model')\n print('weights', ' '.join(str_shape(w) for w in weight_values))\n print('stopping to load all other layers')\n weight_values = [np.array(w) for w in weight_values]\n break\n symbolic_weights = layer.trainable_weights + layer.non_trainable_weights\n weight_value_tuples += zip(symbolic_weights, weight_values)\n weight_values = None\n K.batch_set_value(weight_value_tuples)\n return weight_values",
"def init_weights(listInst):\n for inst in listInst:\n inst.dblWeight = 1.0\n normalize_weights(listInst)",
"def assign_weights(model, weights):\n state_dict = model.state_dict(keep_vars=True)\n # The index keeps track of location of current weights that is being un-flattened.\n index = 0\n # just for safety, no grads should be transferred.\n with torch.no_grad():\n for param in state_dict.keys():\n # ignore batchnorm params\n if 'running_mean' in param or 'running_var' in param or 'num_batches_tracked' in param:\n continue\n param_count = state_dict[param].numel()\n param_shape = state_dict[param].shape\n state_dict[param] = nn.Parameter(torch.from_numpy(weights[index:index+param_count].reshape(param_shape)))\n index += param_count\n model.load_state_dict(state_dict)\n return model",
"def set_weights(self, weights):\n\n weight_index = 0\n for layer in self.NN:\n for node in layer:\n for i in range(len(node.weights)):\n #print(weight_index)\n try:\n node.weights[i] = weights[weight_index]\n except Exception as e:\n print(weight_index)\n print(len(weights))\n sys.exit()\n\n weight_index += 1",
"def set_weights(self, weights):\n self.weights = copy.deepcopy(weights)",
"def _weight_df(self) -> pd.DataFrame:\n # DataFrame: cols=tissue, normal_tissue, weight\n weights = []\n tissues = self.tumor.tissue\n for sample in os.listdir(self.sample_dir):\n sample_tissue = tissues.loc[sample]\n w = pd.read_csv(\n os.path.join(self.sample_dir, sample, \"weights.tsv\"), sep=\"\\t\"\n )\n w.columns = [\"normal_tissue\", \"Median\", \"std\"]\n w[\"tissue\"] = sample_tissue\n w[\"sample\"] = sample\n weights.append(w.drop(\"std\", axis=1))\n return pd.concat(weights).reset_index(drop=True)",
"def weights(self, weights):\n if isinstance(weights, dict) and weights == GET_WEIGHTS_FROM_CACHE:\n return\n if not self.sublayers:\n self._weights = weights\n else:\n # When having sublayers, self._weights just marks which are cached,\n # the actual weights are stored by sublayers.\n self._weights = []\n for w in weights:\n if isinstance(w, dict) and w == GET_WEIGHTS_FROM_CACHE:\n self._weights.append(w)\n else:\n self._weights.append(None)\n # Set sublayer weights.\n n_layers = len(self.sublayers)\n if len(weights) != n_layers:\n raise ValueError(\n f'Number of weight elements ({len(weights)}) does not equal the '\n f'number of sublayers ({n_layers}) in: {str(self)}.')\n for sublayer, sublayer_weights in zip(self.sublayers, weights):\n sublayer.weights = sublayer_weights",
"def add_weights(self, key, weights):\n if self.num_structures != len(weights):\n raise AttributeError(\n \"Length of weights must match number of structures \"\n f\"{len(weights)} != {self.num_structures}.\"\n )\n for weight, entry in zip(weights, self._entries):\n entry.data[\"weights\"][key] = weight"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Main programflow logic. Sets a db_config parameter to the desired database, Gets required purchasedate parameters to apply to all ProductIDs, Gets the list of all known ProductIDs, Runs timeseries extraction for daily sales totals for each ProductID (serially), and Writes the resulting weights to a database.
|
def main(db):
db_config["Database"] = db
# Load queue file
db_get_ts_config()
# Load Product Table on initialization
productIDs = db_get_productlist()
for productID in productIDs:
timeseries(productID)
print()
#print(ts_config["productIDList"][0:3])
|
[
"def db_update_weights(productID, weights_dict):\n db_connection = iopro.connect(**db_config) \n db_cursor = db_connection.cursor()\n \n for k, v in weights_dict.items():\n db_cursor.execute(\"insert into dbo.TimeSeriesWeights_TMP values (?,?,?)\", productID, k, v)\n \n db_connection.commit()\n db_connection.close()\n print(productID, \"-- Loading Weights...\")",
"def run(self):\n # get aggregate output\n print(\"------Called run function-------------------------------------\")\n record_distributions = (1, self.nb_of_timesteps - 1)\n for i in range(self.nb_of_timesteps):\n print(file_marker + \"Model {}: t {}/{}\".format(\n self.id, i, self.nb_of_timesteps), end=\"\\r\")\n self.t = i\n # 1. Update firm positions on the product space\n self.add_firm_positions()\n\n # 2. Compute the prices for all products on the product space\n self.add_prices() # If output=0, then delta is used as price\n\n # 3.1 Reset production statistics on product space:\n old_output_dict = nx.get_node_attributes(\n self.product_space, \"aggregate_output\")\n\n excess_nodes = \\\n {k: v for k, v in old_output_dict.items() \\\n if v > self.nominal_demand}\n assert len(excess_nodes.keys()) == 0, \\\n \"For some nodes production > nominal demand: {}\".format(\n excess_nodes)\n\n nx.set_node_attributes(self.product_space, 0.0, \"aggregate_output\")\n\n new_output_dict = {}\n capital_stock_dict = {}\n all_deposits = 0\n all_loans = 0\n individual_output_dict = {}\n total_output_dict = dict( # Starts with value 0 for each node\n zip(self.product_space.nodes(),\n self.parameters[\"number_of_products\"] * [0]))\n\n # 3.2 Compute output for each firm, save it,\n # add it to total_output_dict\n for firm in self.firm_list:\n price = self.product_space.nodes[ \\\n firm.product_space_position][\"price\"]\n capital_stock = firm.capital_stock[-1]\n output = Firm.output_and_capability_costs(\n firm, capital_stock, price)\n if math.isnan(output) == True:\n pdb.set_trace()\n total_output_dict[firm.product_space_position] += output\n individual_output_dict[firm] = output\n\n # 3.3 If total output for a product exceeds demand,\n # update actual outputs\n nodes_with_excess_demands = \\\n {k: v for k, v in total_output_dict.items() \\\n if v > self.nominal_demand}\n\n for k in nodes_with_excess_demands.keys():\n for firm in self.firm_list:\n price = self.product_space.nodes[ \\\n firm.product_space_position][\"price\"]\n if firm.product_space_position == k:\n possible_output = \\\n firm.output_and_capability_costs(\n firm.capital_stock[-1], price)\n adapted_output = \\\n possible_output / total_output_dict[k] \\\n * self.nominal_demand\n if math.isnan(adapted_output):\n pdb.set_trace()\n individual_output_dict[firm] = \\\n self.round_half_down(adapted_output, 2)\n\n # 4. Location decision, investment of the firms and\n # update capital stock\n for firm in self.firm_list:\n # 4.0 Save preparatory values\n output = individual_output_dict.get(firm)\n total_output_this_period = \\\n total_output_dict.get(firm.product_space_position)\n price = self.product_space.nodes[\n firm.product_space_position][\"price\"]\n total_output_prev_t = \\\n old_output_dict[firm.product_space_position]\n\n # 4.1 Write firm output into new_output_dict\n if firm.product_space_position in new_output_dict.keys():\n new_output_dict[firm.product_space_position] = \\\n int(new_output_dict[firm.product_space_position] +\n output)\n else:\n new_output_dict[firm.product_space_position] = output\n\n assert new_output_dict[firm.product_space_position] <= \\\n self.nominal_demand, \\\n \"new_output_dict value {} exceeds maximum {}\".format(\n new_output_dict[firm.product_space_position],\n self.nominal_demand)\n\n # 4.2 Firms choose new spot on the product space, compute actual\n # profits, make investment decisions demand money from banks \n # and update capital account\n capital_stock, account_firm = firm.update_firm(\n price=price,\n financial_regime_parameter=self.financial_regime_parameter,\n output=output,\n total_output_this_period=total_output_this_period)\n if account_firm > 0:\n all_deposits += account_firm\n else:\n all_loans += account_firm\n capital_stock_dict[firm] = capital_stock\n\n # 5. Update bank accounts\n account_bank = self.bank_list[0].update_bank(all_deposits,\n all_loans)\n assert account_bank == all_loans + all_deposits, \\\n \"ALERT: account_bank={}, all_loans + all_deposits={}+{}={}!\"\\\n .format(account_bank,\n all_loans,\n all_deposits,\n all_loans + all_deposits)\n\n # 6. Update the output for each product *on the product space*\n excess_nodes = \\\n {k: v for k, v in new_output_dict.items()\n if v > self.nominal_demand}\n assert len(excess_nodes.keys()) == 0, \\\n \"For some nodes production > nominal demand ({}): {}. \" \\\n \"Check also nodes_with_excess_demands: {}\".format(\n self.nominal_demand,\n excess_nodes,\n nodes_with_excess_demands)\n\n nx.set_node_attributes(self.product_space, new_output_dict,\n \"aggregate_output\")\n\n # 7. Compute relevant statistics \n # Get the complexities of those products actually produced:\n comp_prod_prdcts = \\\n [self.product_space.nodes[i][\"complexity\"] for i in \\\n self.product_space.nodes if \\\n self.product_space.nodes[i][\"aggregate_output\"] > 0.0]\n\n assert len(comp_prod_prdcts) > 0, \"NOTHING GETS PRODUCED!!!\"\n assert np.isnan(comp_prod_prdcts).any() == False, \\\n \"Nan values in comp_prod_prdcts ({})\".format(comp_prod_prdcts)\n\n # Get the prices of those products actually produced:\n price_prod_prdcts = \\\n [self.product_space.nodes[i][\"price\"] for i in \\\n self.product_space.nodes if \\\n self.product_space.nodes[i][\"aggregate_output\"] > 0.0]\n assert len(price_prod_prdcts) > 0, \"NOTHING HAS A PRICE!!!\"\n assert np.isnan(price_prod_prdcts).any() == False, \\\n \"Nan values in price_prod_prdcts ({})\".format(price_prod_prdcts)\n\n self.record_state_variables(\n time=i,\n new_output_dict=new_output_dict,\n comp_prod_prdcts=comp_prod_prdcts,\n capital_stock_dict=capital_stock_dict,\n price_prod_prdcts=price_prod_prdcts,\n t_record_distributions=record_distributions)",
"def initiateDatabase(ROLLING_WINDOW_SIZE, RISK_FREE_RATE, IV_TOLERENCE, path_from_main):\n # files required for initiating database, all config related data present in config.txt\n config = configparser.ConfigParser()\n config.readfp(open(r'config.txt'))\n path = config.get('Input Data Section', 'data_file_path') # if single file needs to be run we can provide that file name in config.txt\n if path_from_main != None:\n path = path_from_main # in case of automation.py we need to provide file name as agrument to main and pass it on here for deriving the folder name for output\n\n global data, current_directory, folder_name\n # derive folder for storage from path name (change accordingly)\n folder_name = path.split('_')[2].split('.')[0] # ****file name specific function (deriving folder name for storage under output from data file name)\n\n # for graphical data make folder for storing (floder name for storage is derived from data file name above so change if needed)\n current_directory = os.getcwd()\n Path(current_directory + '/output/{}/graphs/volatility'.format(folder_name)).mkdir(parents = True, exist_ok = True)\n Path(current_directory + '/output/{}/graphs/vega'.format(folder_name)).mkdir(parents = True, exist_ok = True)\n # remove preexisting files if present\n files = glob.glob(current_directory + '/output/{}/graphs/volatility/*'.format(folder_name))\n for f in files:\n os.remove(f)\n files = glob.glob(current_directory + '/output/{}/graphs/vega/*'.format(folder_name))\n for f in files:\n os.remove(f)\n \n # read data from file\n data = pd.read_csv(path)\n # functions specific to dataset currently in use\n datasetSpecificFunction() # any preprocessing that needs to be done according to structure of dataset if dataset structure changes this needs to be changed\n convertToNumeric() # convert all data to numeric\n STRIKE_PRICE = data.loc[0, 'strike'] # ****load strike from datasetfor using and returning to main function (change col name if you name strike price column as something else)\n # calculateAvgFuturePrice() # if future avg not calculated calculate future average price\n calculateImpliedVolatility(data.shape[0], STRIKE_PRICE, RISK_FREE_RATE, IV_TOLERENCE) # calculate the implied volatility and smoothen it on window of size 10 \n calculateHistoricalVolatility(data.shape[0], ROLLING_WINDOW_SIZE) # calculate the historical volatility on specified window size\n calculateVega(data.shape[0], STRIKE_PRICE, RISK_FREE_RATE) # calculate vega for the dataset, not required if already present\n plotHV_IV() # plot of Historical Volatility and Implied volatility v/s index stored in under output in specified folder name\n plotVega_x_diff() # plot of Vega * (IV - HV) v/s index stored in under output in specified folder name\n # return the required data to main\n return data.shape[0], STRIKE_PRICE, folder_name # returning folder name so as to create output folder with name same as data file name",
"def retrieve_from_db(state_code):\n\n # add parent directory to the path, so can import model.py\n # need model in order to update the database when this task is activated by cron\n import os\n parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.sys.path.insert(0,parentdir)\n\n import model\n s = model.connect()\n\n # retrive DECEMBER production data, for all turbines at all power plants in California\n CA_gen_dec13_obj = s.execute('SELECT plant_name, state, fuel_type, dec_mwh_gen FROM \"ProdGensDec2013\" WHERE state=\\'%s\\' ' % state_code)\n\n CA_gen_dec13_data = CA_gen_dec13_obj.fetchall()\n df_dec2013 = DataFrame(CA_gen_dec13_data)\n df_dec2013.columns = ['plant_name', 'state', 'fuel_type', 'dec_mwh_gen']\n\n # retrive JAN-NOV 2014 production data, for all turbines at all power plants in California\n CA_gen_2014_obj = s.execute('SELECT plant_name, state, fuel_type, jan_mwh_gen, feb_mwh_gen, mar_mwh_gen, apr_mwh_gen, may_mwh_gen, jun_mwh_gen, jul_mwh_gen, aug_mwh_gen, sep_mwh_gen, oct_mwh_gen, nov_mwh_gen FROM \"ProdGens\" WHERE state=\\'%s\\' ' % state_code)\n\n CA_gen_2014_data = CA_gen_2014_obj.fetchall()\n df_2014 = DataFrame(CA_gen_2014_data)\n df_2014.columns = ['plant_name', 'state', 'fuel_type', 'jan_mwh_gen', 'feb_mwh_gen', 'mar_mwh_gen', 'apr_mwh_gen', 'may_mwh_gen', 'jun_mwh_gen', 'jul_mwh_gen', 'aug_mwh_gen', 'sep_mwh_gen', 'oct_mwh_gen', 'nov_mwh_gen']\n\n # retrieve county name, assigned to each turbine at each plant in California\n CA_counties_obj = s.execute('SELECT plant_name, county FROM \"StatsGens\" WHERE state=\\'%s\\' GROUP BY plant_name, county' % state_code)\n\n CA_plant_counties = CA_counties_obj.fetchall()\n df_counties = DataFrame(CA_plant_counties)\n df_counties.columns = ['plant_name', 'county']\n # now convert into dict, so caan easily add county to other df.\n dict_counties={}\n for idx, row in enumerate(df_counties.values):\n plant_name, county = row\n # clean the county name\n county = unicodedata.normalize('NFKD', county).encode('ascii', 'ignore')\n county = county.lower().title()\n county = county.replace(\" County\", \"\")\n dict_counties[plant_name] = county\n\n\n return df_dec2013, df_2014, dict_counties",
"def get_products(config):\n\n basepath = os.path.dirname(__file__)\n products_path = os.path.join(basepath,config['input_path'],'scrape_products.csv')\n log_output = os.path.join(basepath,config['log_path'],'outstanding_products.csv')\n output_dir = os.path.join(basepath,config['output_path'],\"products\")\n products_df = pd.read_csv(products_path)\n\n freq = 200\n max_rows = products_df.shape[0] + 1\n for start_row in range(0, max_rows, freq):\n end_row = min(start_row + freq, max_rows)\n output_path = output_dir + '/products_info_{time}_{s_row}_{e_row}.csv'\\\n .format(s_row=start_row, e_row=end_row, time=datetime.now().strftime(\"%H%M%S\"))\n\n cmd = 'scrapy runspider ' + basepath + '/spiders/amazon_products.py -o {output_path} '\\\n '-a config=\"{products_path},{log_file},{s_row},{e_row},main\"'\\\n .format(output_path=output_path, log_file=log_output, products_path=products_path, s_row=start_row, e_row=end_row)\n call(cmd, shell=True)",
"def retrieve_from_db_usa():\n\n # add parent directory to the path, so can import model.py\n # need model in order to update the database when this task is activated by cron\n import os\n parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.sys.path.insert(0,parentdir)\n\n import model\n s = model.connect()\n\n # retrive DECEMBER production data, for all turbines at all power plants in California\n USA_gen_dec13_obj = s.execute('SELECT plant_name, state, fuel_type, dec_mwh_gen FROM \"ProdGensDec2013\" ')\n USA_gen_dec13_data = USA_gen_dec13_obj.fetchall()\n df_dec2013 = DataFrame(USA_gen_dec13_data)\n df_dec2013.columns = ['plant_name', 'state', 'fuel_type', 'dec_mwh_gen']\n\n # retrive JAN-NOV 2014 production data, for all turbines at all power plants in USA\n USA_gen_2014_obj = s.execute('SELECT plant_name, state, fuel_type, jan_mwh_gen, feb_mwh_gen, mar_mwh_gen, apr_mwh_gen, may_mwh_gen, jun_mwh_gen, jul_mwh_gen, aug_mwh_gen, sep_mwh_gen, oct_mwh_gen, nov_mwh_gen FROM \"ProdGens\" ')\n USA_gen_2014_data = USA_gen_2014_obj.fetchall()\n df_2014 = DataFrame(USA_gen_2014_data)\n df_2014.columns = ['plant_name', 'state', 'fuel_type', 'jan_mwh_gen', 'feb_mwh_gen', 'mar_mwh_gen', 'apr_mwh_gen', 'may_mwh_gen', 'jun_mwh_gen', 'jul_mwh_gen', 'aug_mwh_gen', 'sep_mwh_gen', 'oct_mwh_gen', 'nov_mwh_gen']\n\n return df_dec2013, df_2014",
"def main():\n hostname,dbname = \"127.0.0.1\",\"sparkifydb\"\n create_tables.main(hostname, dbname)\n\n process_song_data(hostname, dbname, \"./data/song_data/\")\n process_log_data(hostname, dbname, \"./data/log_data/\")",
"def run(self):\t\t\n\t\tconn,curs = db_connect(self.hostname, self.dbname,self.schema)\n\t\t\n\t\tself.gene_no2go_no = self.prepare_gene_no2go_no(curs)\n\t\tself.get_function_edge_matrix_data(curs, self.no_of_nas, self.table)\n\t\t\n\t\t#make a directory first\n\t\tif not os.path.isdir(self.output_dir):\n\t\t\tos.makedirs(self.output_dir)\n\t\t\n\t\tfor go_no, edge_data in self.go_no2edge_matrix_data.iteritems():\n\t\t\tif len(edge_data)>=self.min_no_of_edges:\n\t\t\t\tself.edge_data_output(self.output_dir, go_no, edge_data)\n\t\t\t\tself.go_no_qualified.append(go_no)",
"def main():\n #Get number of pages\n\n number_of_pages = get_number_of_pages()\n pages=list(np.arange(1,number_of_pages+1))\n timer_utils=TimerUtils()\n timer_utils.start(f'Start getting products information of {len(pages)} pages')\n\n # Split to different = number of process\n buckets = np.array_split(pages, NUMBER_OF_PROCESS)\n pool =Pool(NUMBER_OF_PROCESS)\n jobs=[]\n index\t=0\n\n now=dt.now()\n dt_string=now.strftime(\"%d/%m/%Y %H:%M:%S\")\n logging.info(f'Start\t{NUMBER_OF_PROCESS} workers\tat {dt_string}')\n\n #Create a global variable.\n while index\t<\tlen(buckets):\n process_id\t=index\n pages\t= buckets[index]\n process\t=pool.apply_async(get_products_df,\targs=(process_id,\tpages,))\n jobs.append(process)\n index\t+=1\n\n \t#C1ose the pool\n pool.close()\n\n \t#wait\tuntil\tfinishing\tall\tprocess\n results=[job.get() for job in\tjobs]\n timer_utils.stop(f'End getting products\tinformation\tof total {len(pages)} pages')",
"def db_get_ts_config():\n db_connection = iopro.connect(**db_config)\n db_cursor = db_connection.cursor()\n \n db_cursor.execute(\"select * from dbo.vTransactionStats\") # Application needs to know, minimally, first and last overall transaction dates\n result = db_cursor.fetchone()\n ts_config[\"minPurchaseDate\"] = result.minPurchaseDate\n ts_config[\"maxPurchaseDate\"] = result.maxPurchaseDate # Assumes the most recent PurchaseDate applies to all products, so zeros can be filled in appropriately for trending\n db_connection.close()\n del(db_cursor)\n del(db_connection)",
"def main():\n \n logger.info('---[ Create Tables ]---')\n mylib.log_timestamp()\n print(\"Logfile : \" + mylib.get_log_file_name())\n\n # read config parameters for database connection string\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n try:\n conn_string = \"host={} dbname={} user={} password={} port={}\"\n conn_string = conn_string.format(*config['CLUSTER'].values())\n conn = psycopg2.connect( conn_string )\n cur = conn.cursor()\n\n print(conn_string)\n logger.info('DB connection : open')\n\n except Exception as e:\n logger.info(\"Error : Could not make connection to the sparkify DB\")\n print(e)\n\n # Drop (if exists) and create new tables for sparkify database\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()\n logger.info('DB connection : closed')",
"def run():\n # Initialize db\n connection = init_db()\n sql_helper.nuke_tables(connection)\n\n logger.info(\"Populating probabilistic database...\")\n number_of_elements = 1000000\n generator.run(connection, size=number_of_elements)\n logger.info(\"Populating complete!\")\n\n # Let the benchmark test the database\n benchmark_results = benchmark.runBenchmark(connection, logger)\n\n # Clear the database\n logger.info(\"Clearing the database...\")\n sql_helper.nuke_tables(connection)\n logger.info(\"Clear complete\")\n\n # Close the db connection.\n connection.close()\n logger.info(\"Database connection ended.\")\n\n # Save the results to a file\n date_time = datetime.now().strftime(\"%Y%m%d-%H%M\")\n export_results(results=benchmark_results, filename=\"{}_{}-elements_maybms-benchmark-result.csv\".format(date_time, number_of_elements))\n\n logger.info(\"Bye!\")",
"def process():\n db = DataParser.get_connection()\n cursor = db.cursor()\n DataParser.set_up_database(cursor)\n config = DataParser.get_config()\n cursor.execute(\"use %s\" % config[\"database\"][\"database_name\"])\n DataParser.import_articles(cursor)\n DataParser.import_citations(cursor)\n DataParser.import_words(cursor)\n DataParser.import_users(cursor)\n DataParser.clean_up(db, cursor)",
"def ingest_data() -> None:\n secrets = sts.get_secrets()\n DB_NAME = secrets[\"DB_NAME\"]\n HOST = secrets[\"HOST\"]\n PORT_NAME = secrets[\"PORT_NAME\"]\n USER = secrets[\"USER\"]\n PASSWORD = secrets[\"PASSWORD\"]\n\n try:\n logging.debug(\"Connecting to redshift warehouse.\")\n wh_conn = psycopg2.connect(\n dbname=DB_NAME, host=HOST, port=PORT_NAME, user=USER, password=PASSWORD\n )\n wh_conn.autocommit = True\n cur = wh_conn.cursor()\n\n try:\n logging.debug(\n \"Executing ingest_tweets_aapl_sentiments_minute_sum_query query.\"\n )\n cur.execute(staq.ingest_tweets_aapl_sentiments_minute_sum_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_aapl_minute_avg_query query.\")\n cur.execute(staq.ingest_tickers_aapl_minute_avg_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_aapl_query query.\")\n cur.execute(staq.ingest_tickers_aapl_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\n \"Executing ingest_tweets_goog_sentiments_minute_sum_query query.\"\n )\n cur.execute(staq.ingest_tweets_goog_sentiments_minute_sum_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_goog_minute_avg_query query.\")\n cur.execute(staq.ingest_tickers_goog_minute_avg_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_goog_query query.\")\n cur.execute(staq.ingest_tickers_goog_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\n \"Executing ingest_tweets_amzn_sentiments_minute_sum_query query.\"\n )\n cur.execute(staq.ingest_tweets_amzn_sentiments_minute_sum_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_amzn_minute_avg_query query.\")\n cur.execute(staq.ingest_tickers_amzn_minute_avg_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_amzn_query query.\")\n cur.execute(staq.ingest_tickers_amzn_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n logging.debug(\"Commiting cursor execution.\")\n wh_conn.commit()\n wh_conn.close()\n cur.close()\n except psycopg2.DatabaseError as error:\n logging.error(error)\n raise (error)",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # load staging tables\n #load_staging_tables(cur, conn)\n \n # load analytics tables\n insert_tables(cur, conn)\n\n # Check for and remove duplicate rows\n quality_check_data(cur, conn, tablename='songplays', idcol='songplay_id')\n quality_check_data(cur, conn, tablename='songs', idcol='song_id')\n quality_check_data(cur, conn, tablename='artists', idcol='artist_id')\n quality_check_data(cur, conn, tablename='time', idcol='start_time')\n quality_check_data(cur, conn, tablename='users', idcol='user_id')\n \n # Check that the unique user_ids in the user and songplay table match\n cur.execute(\"SELECT DISTINCT(user_id) FROM songplays;\")\n uid_songplays = [r[0] for r in cur.fetchall()]\n print(\"{} unique user_ids in songplays table\".format(len(uid_songplays)))\n cur.execute(\"SELECT DISTINCT(user_id) FROM users;\")\n uid_users = [r[0] for r in cur.fetchall()]\n print(\"{} unique user_ids in users table\".format(len(uid_users)))\n \n # Return the user_ids in users table with no \"NextSong\" clicks\n diff = np.setdiff1d(uid_users, uid_songplays)\n print(\"The following users have no 'NextSong' clicks: user ids {}\".format(str(diff)))\n \n # Check that the unique times in the time and songplays table match\n cur.execute(\"SELECT DISTINCT(start_time) FROM songplays;\")\n ts_songplays = [r[0] for r in cur.fetchall()]\n print(\"{} unique times in songplays table\".format(len(ts_songplays)))\n cur.execute(\"SELECT DISTINCT(start_time) FROM time;\")\n ts_time = [r[0] for r in cur.fetchall()]\n print(\"{} unique times in time table\".format(len(ts_time)))\n \n conn.close()",
"def build_sc_trustee(accdb):\n\n os.chdir('/home/nate/dropbox-caeser/Data/MIDT/Data_Warehouse/sc_trustee')\n table_names = subprocess.Popen(['mdb-tables','-1', accdb],\n stdout=subprocess.PIPE).communicate()[0]\n tables = table_names.split('\\n') \n df = pd.DataFrame(columns={'startyr':np.int,\n 'parid':np.str,\n 'sumrecv':np.float,\n 'sumdue':np.float,\n 'status':np.str})\n cols = {'MinOfTownCntlYearYY':'startyr',\n 'Assr Parcel':'parid',\n 'SumOfReceivTaxDue':'sumrecv',\n 'SumOfTotalDue':'sumdue'}\n\n status = set(['Active', 'Redemption', 'Eligible'])\n\n for table in tables:\n if 'Assr' in table:\n rows = subprocess.Popen(['mdb-export', accdb, table],\n stdout=subprocess.PIPE).communicate()[0]\n print table\n print len(rows.split('\\n'))\n df_tbl = pd.read_table(StringIO.StringIO(rows), sep=',', \n header=0, quotechar='\"', lineterminator='\\n',\n usecols=cols.keys())\n df_tbl = df_tbl.rename(columns=cols)\n df_tbl['status'] = status.intersection(table.split(' ')).pop()\n df = df.append(df_tbl, ignore_index=True)\n \n today = datetime.today()\n df['load_date'] = '{0}-{1}-{2}'.format(today.year, \n today.month,today.day)\n df.to_sql('sc_trustee', engine, if_exists='append')\n #delete rows that contain tax deliq to only show new records\n clean_tax = \"\"\"update combined_table \\\n set startyr = NULL,\n sumdue = NULL,\n sumrecv = NULL,\n status = NULL,\n load_date = current_date\n where startyr is not NULL;\"\"\"\n conn.execute(clean_tax)\n #update new tax information\n update_tax = \"\"\"update combined_table\n set startyr = tax.startyr, sumdue = tax.sumdue, \n sumrecv = tax.sumrecv, \n status = tax.status,\n load_date = tax.load_date\n from (select parid, min(startyr) startyr, sum(sumdue) sumdue, \n sum(sumrecv) sumrecv, max(load_date) load_date,\n status\n from sc_trustee where load_date = current_date\n group by parid, status) tax\n where combined_table.parid = tax.parid\"\"\"\n conn.execute(update_tax)",
"def store_results(transactions):\r\n\r\n server='LAPTOP-N3JOPONO'\r\n database='TD_Ameritrade'\r\n data_connection=pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};\\\r\n SERVER=' + server + ';\\\r\n DATABASE=' + database + ';\\\r\n Trusted_Connection=yes;')\r\n\r\n data_cursor=data_connection.cursor()\r\n\r\n symbol = transactions['Stock Symbol']\r\n #Add buy history to SQL table\r\n for (orderID,price,quantity,placed_time,filled_time) in transactions['Buy History']:\r\n\r\n #Insert query to insert new data into Buy_Orders table\r\n insert_query_buy = '''INSERT INTO Buy_Orders(Buy_Order_ID,Stock_Ticker,Price,Quantity,Time_Placed,Time_Filled)\r\n VALUES(?,?,?,?,?,?);'''\r\n\r\n #Information on buy transactions\r\n values_buy=(orderID,symbol,price,quantity,placed_time,filled_time)\r\n data_cursor.execute(insert_query_buy,values_buy)\r\n\r\n #Add sell history to SQL Table\r\n for (orderID,price,quantity,placed_time,filled_time,parentID) in transactions['Sell History']:\r\n\r\n #Insert query to insert new data into Sell_Orders table\r\n insert_query_sell = '''INSERT INTO Sell_Orders(Sell_Order_ID,Stock_Ticker,Price,Quantity,Time_Placed,Time_Filled,Buy_Order_ID_link)\r\n VALUES(?,?,?,?,?,?,?);'''\r\n\r\n #Information on sell transactions\r\n values_sell=(orderID,symbol,price,quantity,placed_time,filled_time,parentID)\r\n data_cursor.execute(insert_query_sell,values_sell)\r\n\r\n #Add current open sell orders to SQL Table\r\n for (orderID,price,parentID) in transactions['Limit Sells']:\r\n\r\n #Insert query to insert new data into Open_Sell_Orders table\r\n insert_query_sell_open = '''INSERT INTO Open_Sell_Orders(Sell_Order_ID,Stock_Ticker,Price,Date,Buy_Order_ID_link)\r\n VALUES(?,?,?,?,?);'''\r\n\r\n #Information on sell transactions\r\n values_sell_open=(orderID,symbol,price,datetime.datetime.now().date(),parentID)\r\n data_cursor.execute(insert_query_sell_open,values_sell_open)\r\n\r\n\r\n data_connection.commit()\r\n data_cursor.close()\r\n data_connection.close()",
"def direct_results():\n\n countries = load_yaml_file(\"data/countries.yaml\")\n\n dic_uuids = {}\n\n #for country in [\"CH\",]:\n for country in countries:\n job_id = str(uuid.uuid1())\n\n dic_uuids[job_id] = country\n\n # Add task to db\n task = Task(\n id=job_id,\n progress=0,\n )\n db.session.add(task)\n db.session.commit()\n\n # fetch current year\n year = int(datetime.datetime.now().year)\n\n params_dict = {\n (\"Functional unit\",): {\n \"powertrain\": [\"ICEV-p\", \"ICEV-d\", \"ICEV-g\", \"BEV\"],\n \"year\": [year],\n \"size\": [\"Medium\"],\n \"fu\": {\"unit\": \"vkm\", \"quantity\": 1},\n },\n (\"Driving cycle\",): \"WLTC\",\n (\"Background\",): {\n \"country\": country,\n \"energy storage\": {\n \"electric\": {\"Medium\": {\"type\": \"NMC-622\", \"origin\": \"CN\"}}\n },\n },\n (\"Foreground\",): {\n (\"Glider\", \"all\", \"all\", \"average passengers\", \"none\"): {\n (year, \"loc\"): 1.5\n },\n (\"Glider\", \"all\", \"all\", \"cargo mass\", \"none\"): {(year, \"loc\"): 20.0},\n (\"Driving\", \"all\", \"all\", \"lifetime kilometers\", \"none\"): {\n (year, \"loc\"): 200000.0\n },\n (\"Driving\", \"all\", \"all\", \"kilometers per year\", \"none\"): {\n (year, \"loc\"): 12000.0\n },\n },\n }\n data, i = app.calc.process_results(params_dict, \"en\", job_id)\n data = json.loads(data)\n data.append(job_id)\n\n with open(\n f\"data/precalculated results/quick_results_{country}.pickle\", \"wb\"\n ) as f:\n pickle.dump(data, f)\n\n # generate inventories\n for software in [\"brightway2\", \"simapro\"]:\n for ecoinvent_version in [\n #\"3.6\",\n #\"3.7\",\n \"3.8\"\n ]:\n if software == \"brightway2\" or (\n software == \"simapro\" and ecoinvent_version == \"3.7\"\n ):\n data = i.export_lci(\n ecoinvent_version=ecoinvent_version,\n software=software,\n format=\"string\",\n )\n\n with open(\n f\"data/inventories/quick_inventory_{country}_{software}_{ecoinvent_version}.pickle\",\n \"wb\",\n ) as f:\n pickle.dump(data, f)\n\n with open(\"data/precalculated results/quick_results_job_ids.pickle\", \"wb\") as f:\n pickle.dump(dic_uuids, f)\n\n res = make_response(jsonify({\"job id\": 0}), 200)\n return res",
"def main():\n write_to_db(parse_arguments(),\n ALDER_TOPICS + ELM_TOPICS + MAPLE_TOPICS + MERCER_TOPICS)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a set of CCDlevel FITS headers according to the FITS template file, which is supposed to implement the FITS standard for sensors (LCA10140).
|
def fits_headers(template=template_file):
headers = OrderedDict()
hdr = fits.header.Header()
with open(template) as fd:
for line in fd:
# Skip comments and whitespace lines.
if line[0] == '#' or len(line.strip()) == 0:
continue
if line[:3] == 'END':
if len(headers) == 0:
# First hdu must be the Primary HDU.
headers['PRIMARY'] = hdr
else:
# Subsequent ones must be extensions with an EXTNAME
headers[hdr['EXTNAME']] = hdr
hdr = fits.header.Header()
continue
data = line.split('=')
key, value = data[0].strip(), '='.join(data[1:]).strip()
data = value.split('/')
value, comment = data[0].strip(), '/'.join(data[1:]).strip()
hdr[key] = (_cast(value), comment)
return headers
|
[
"def _getFITSHeader(self, hdulist, options):\n JWInstrument._getFITSHeader(self,hdulist, options)\n\n hdulist[0].header.update('MODULE',self.module, 'NIRCam module: A or B')\n hdulist[0].header.update('CHANNEL', 'Short' if self.pixelscale == self._pixelscale_short else 'Long', 'NIRCam channel: long or short')\n # filter, pupil added by calcPSF header code\n hdulist[0].header.update('PILIN', 'False', 'Pupil imaging lens in optical path: T/F')",
"def make_header(self):\n cards = [fits.Card(\"TELESCOP\", \"GLAST\"),\n fits.Card(\"INSTRUME\", \"LAT\"),\n fits.Card(self._conv.coordsys, self._coordsys),\n fits.Card(\"PIXTYPE\", \"HEALPIX\"),\n fits.Card(\"ORDERING\", self.ordering),\n fits.Card(\"ORDER\", self._order),\n fits.Card(\"NSIDE\", self._nside),\n fits.Card(\"FIRSTPIX\", 0),\n fits.Card(\"LASTPIX\", self._maxpix - 1),\n fits.Card(\"HPX_CONV\", self._conv.convname)]\n\n if self._coordsys == \"CEL\":\n cards.append(fits.Card(\"EQUINOX\", 2000.0,\n \"Equinox of RA & DEC specifications\"))\n\n if self._region is not None:\n cards.append(fits.Card(\"HPX_REG\", self._region))\n cards.append(fits.Card(\"INDXSCHM\", \"PARTIAL\"))\n elif self._ipix is not None:\n cards.append(fits.Card(\"INDXSCHM\", \"EXPLICIT\")) \n else:\n if self._conv.convname in ['FGST_SRCMAP_SPARSE']:\n cards.append(fits.Card(\"INDXSCHM\", \"SPARSE\"))\n else:\n cards.append(fits.Card(\"INDXSCHM\", \"IMPLICIT\"))\n\n header = fits.Header(cards)\n return header",
"def read_header(fits_file):\n\n head = {}\n F = pf.open(fits_file)\n H = F[0].header\n head['Ntot'] = H['N_TOT']\n head['Nmu'] = H['N_MU']\n head['Nsig'] = H['N_SIGMA']\n head['Nv'] = H['N_VOIGT']\n head['Ncoef'] = H['N_COEF']\n head['Nspa'] = H['N_SPARSE']\n head['mu'] = [H['MU1'], H['MU2']]\n head['sig'] = [H['SIGMA1'], H['SIGMA2']]\n head['z'] = F[1].data.field('redshift')\n F.close()\n return head",
"def _getFITSHeader(self, hdulist, options):\n JWInstrument._getFITSHeader(self, hdulist, options)\n hdulist[0].header.update('GRATING', 'None', 'NIRSpec grating element name')\n hdulist[0].header.update('APERTURE', str(self.image_mask), 'NIRSpec slit aperture name')",
"def createHeader (self, inFileDIR):\n\n # Change to file directory (can work with relative paths)\n os.chdir(inFileDIR)\n\n # Get name for all expected files\n inHHFileList = glob.glob('*HH')\n inHVFileList = glob.glob('*HV')\n inDateFileList = glob.glob('*_date')\n inIncFileList = glob.glob('*_linci')\n inMaskFileList = glob.glob('*_mask')\n inHeaderFileList = glob.glob('KC*.hdr')\n\n if len(inHeaderFileList) == 1:\n inHeaderFile = inHeaderFileList[0]\n else:\n raise Exception('Could not find header.')\n\n # Open JAXA header file for reading (r)\n inHeader = open(inHeaderFile, 'r')\n\n inULat = ''\n inULon = ''\n\n # Get upper left coordinates from header\n i = 1\n for line in inHeader:\n if i == 13:\n inULat = line.strip()\n elif i == 14:\n inULon = line.strip()\n i+=1\n\n # Convert degrees to minutes\n inULat = str(int(inULat) * 3600)\n inULon = str(int(inULon) * 3600)\n\n # Close header file\n inHeader.close()\n\n # Set up string with header information for 16-bit image\n headerText = '''ENVI\ndescription = {\n %s}\nsamples = 4500\nlines = 4500\nbands = 1\nheader offset = 0\nfile type = ENVI Standard\ndata type = 12\ninterleave = bsq\nsensor type = Unknown\nbyte order = 0\nmap info = {Geographic Lat/Lon, 1.0000, 1.0000, %s, %s, 8.0000000000e-01, 8.0000000000e-01, WGS-84, units=Seconds}\nwavelength units = Unknown\n''' %(inHeaderFile, inULon, inULat)\n\n # Set up string with header information for 8-bit ancillary files\n headerTextByte = '''ENVI\ndescription = {\n %s}\nsamples = 4500\nlines = 4500\nbands = 1\nheader offset = 0\nfile type = ENVI Standard\ndata type = 1\ninterleave = bsq\nsensor type = Unknown\nbyte order = 0\nmap info = {Geographic Lat/Lon, 1.0000, 1.0000, %s, %s, 8.0000000000e-01, 8.0000000000e-01, WGS-84, units=Seconds}\nwavelength units = Unknown\n''' % (inHeaderFile, inULon, inULat)\n\n # Initialise variables to be returned\n inHHFile = None\n inHVFile = None\n\n # Check if files were found\n # Write header to text file if they were\n if len(inHHFileList) == 1:\n inHHFile = inHHFileList[0]\n inHHHeaderFile = inHHFile + '.hdr'\n inHHHeader = open(inHHHeaderFile, 'w')\n inHHHeader.write(headerText)\n inHHHeader.close()\n\n if len(inHVFileList) == 1:\n inHVFile = inHVFileList[0]\n inHVHeaderFile = inHVFile + '.hdr'\n inHVHeader = open(inHVHeaderFile, 'w')\n inHVHeader.write(headerText)\n inHVHeader.close()\n\n if len(inDateFileList) == 1:\n inDateFile = inDateFileList[0]\n inDateHeaderFile = inDateFile + '.hdr'\n inDateHeader = open(inDateHeaderFile, 'w')\n inDateHeader.write(headerText)\n inDateHeader.close()\n\n if len(inIncFileList) == 1:\n inIncFile = inIncFileList[0]\n inIncHeaderFile = inIncFile + '.hdr'\n inIncHeader = open(inIncHeaderFile, 'w')\n inIncHeader.write(headerText)\n inIncHeader.close()\n\n if len(inMaskFileList) == 1:\n inMaskFile = inMaskFileList[0]\n inMaskHeaderFile = inMaskFile + '.hdr'\n inMaskHeader = open(inMaskHeaderFile, 'w')\n inMaskHeader.write(headerText)\n inMaskHeader.close()\n\n # Return names of HH and HV files\n return inHHFile, inHVFile",
"def _getFITSHeader(self, hdulist, options):\n JWInstrument._getFITSHeader(self, hdulist, options)\n\n if self.image_mask is not None:\n hdulist[0].header.update('CORONPOS', self.image_mask, 'NIRISS coronagraph spot location')\n hdulist[0].header.update('FOCUSPOS',0,'NIRISS focus mechanism not yet modeled.')",
"def _det_header(self,):\n from astropy.io import fits\n from astropy import units\n coef = \"\"\"XTENSION= 'IMAGE ' / IMAGE extension \nBITPIX = -32 / number of bits per data pixel \nNAXIS = 2 / number of data axes \nNAXIS1 = 1987 / length of data axis 1 \nNAXIS2 = 2046 / length of data axis 2 \nPCOUNT = 0 / required keyword; must = 0 \nGCOUNT = 1 / required keyword; must = 1 \nCRPIX1S = 1448.000000 \nCRPIX2S = 703.000000 \nCRVAL1S = 136.204166175583 \nCRVAL2S = -32.4930169210235 \nCDELT1S = -0.000156666785871793 \nCDELT2S = 0.000156666785871793 \nPC1_1S = 0.755670245086613 \nPC1_2S = -0.654951085758962 \nPC2_1S = 0.654952042271387 \nPC2_2S = 0.755671475100696 \nCTYPE1S = 'RA---TAN-SIP' \nCTYPE2S = 'DEC--TAN-SIP' \nCUNIT1S = 'deg ' / X coordinate units \nCUNIT2S = 'deg ' / Y coordinate units \nCRPIX1 = 996.5 \nCRPIX2 = 1021.5 \nCRVAL1 = 0. \nCRVAL2 = 0. \nCDELT1 = 0.009075 \nCDELT2 = 0.009075 \nCTYPE1 = 'DETX ' / X coordinate type \nCTYPE2 = 'DETY ' / Y coordinate type \nCUNIT1 = 'mm ' / X coordinate units \nCUNIT2 = 'mm ' / Y coordinate units \nA_ORDER = 3 \nB_ORDER = 3 \nA_1_0 = -0.00125153527908 \nA_2_0 = -1.21308092203E-05 \nA_1_1 = 3.57697489791E-06 \nA_0_2 = -4.98655501953E-06 \nA_3_0 = -2.23440999701E-10 \nA_2_1 = 2.81157465077E-10 \nA_1_2 = 1.07794901513E-09 \nA_0_3 = 1.81850672672E-09 \nB_0_1 = -0.0119355520972 \nB_2_0 = 1.29190114841E-06 \nB_1_1 = -6.22446958796E-06 \nB_0_2 = 6.50166571708E-06 \nB_3_0 = 1.5607230673E-09 \nB_2_1 = 3.10676603198E-09 \nB_1_2 = 1.83793386146E-09 \nB_0_3 = 3.0412214095E-12 \nAP_ORDER= 3 / Polynomial order, axis 1, detector to sky \nBP_ORDER= 3 / Polynomial order, axis 2, detector to sky \nAP_1_0 = 0.00125480395117 \nAP_0_1 = -1.36411236372E-07 \nAP_2_0 = 1.2138698679E-05 \nAP_1_1 = -3.57720222046E-06 \nAP_0_2 = 5.12067402118E-06 \nAP_3_0 = 5.04857662962E-10 \nAP_2_1 = -4.41525720641E-10 \nAP_1_2 = -8.91001063794E-10 \nAP_0_3 = -2.06470726234E-09 \nBP_1_0 = 4.40624953378E-07 \nBP_0_1 = 0.0121093187715 \nBP_2_0 = -1.42450854484E-06 \nBP_1_1 = 6.34534204537E-06 \nBP_0_2 = -6.67738246399E-06 \nBP_3_0 = -1.675660935E-09 \nBP_2_1 = -3.07108005097E-09 \nBP_1_2 = -2.02039013787E-09 \nBP_0_3 = 8.68667185361E-11 \n \"\"\"\n hdr = fits.Header.fromstring(coef,'\\n') \n hdr['CRVAL1S'] = self.pointing.ra.deg\n hdr['CRVAL2S'] = self.pointing.dec.deg\n hdr['CRPIX1S'], hdr['CRPIX2S'] = self.grism_boresight(order=0) # this is in IMG coordinate\n x = self.PA(self.roll.to(units.deg)).to(units.rad).value\n hdr['PC1_1S'] = np.cos(x)\n hdr['PC1_2S'] = np.sin(x)\n hdr['PC2_1S'] = -np.sin(x)\n hdr['PC2_2S'] = np.cos(x)\n return hdr",
"def readHeader(self, filename):\n f = Usrxxx.readHeader(self, filename)\n\n for _ in range(1000):\n # Header\n data = fortran.read(f)\n if data is None:\n break\n size = len(data)\n\n if size != 50:\n if not f.closed:\n f.close()\n raise IOError(\"Invalid TRACK file\")\n\n # Parse header\n # see http://www.fluka.org/flair/ustsuw.f for reference\n header = struct.unpack(\"=i10siiififfif\", data)\n\n bin_det = Detector()\n bin_det.nb = header[0] # mtc\n bin_det.name = header[1].strip() # TITUTC\n bin_det.type = header[2] # ITUSTC\n bin_det.region = header[4] # IDUSTC\n bin_det.volume = header[5] # VUSRTC\n bin_det.low_en_neutr_sc = header[6] # LLNUTC\n bin_det.elow = header[7] # ETCLOW minimum energy\n bin_det.ehigh = header[8] # ETCHGH maximum energy\n bin_det.ne = header[9] # NETCBN number of energy intervals\n bin_det.de = header[10] # DETCBN energy bin width\n\n bin_det.xlow = bin_det.elow\n bin_det.xhigh = bin_det.ehigh\n bin_det.nx = header[6]\n bin_det.nx = bin_det.ne\n\n bin_det.ylow = 0.0\n bin_det.yhigh = 0.0\n bin_det.ny = 1\n\n bin_det.zlow = 0.0\n bin_det.zhigh = 0.0\n bin_det.nz = 1\n\n self.detector.append(bin_det)\n\n if bin_det.low_en_neutr_sc:\n data = fortran.read(f)\n bin_det.ngroup = struct.unpack(\"=i\", data[:4])[0]\n bin_det.egroup = struct.unpack(\"=%df\" % (bin_det.ngroup + 1), data[4:])\n else:\n bin_det.ngroup = 0\n bin_det.egroup = []\n\n size = (bin_det.ngroup + bin_det.ne) * 4\n if size != fortran.skip(f):\n raise IOError(\"Invalid USRTRACK file\")\n f.close()",
"def build_header(fai_fn):\n\n new_header = []\n new_header.append(\"##fileformat=VCFv4.1\")\n tid_pre = \"##contig=<ID=\"\n with open(fai_fn, 'r') as f:\n for line in f:\n fields = line.split(\"\\t\")\n ctig = fields[0]\n ctig_len = fields[1]\n hdr_str = tid_pre + ctig + \",length=\" + ctig_len + \">\"\n new_header.append(hdr_str)\n \n cols = [\"#CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\\n\"]\n new_header.append(\"\\t\".join(cols))\n hdr = \"\\n\".join(new_header)\n return hdr",
"def make_header_table(fitsdir, search_string='*fl?.fits'):\n headers = {}\n fitslist = list(glob.glob(os.path.join(fitsdir, search_string)))\n if len(fitslist) == 0: \n raise Exception('No fits files found in {}!'.format(fitsdir))\n # get headers from each image\n for fitsfile in fitslist:\n fitsname = fitsfile.split('/')[-1]\n head = dict(fits.getheader(fitsfile, 0, ignore_missing_end=True).items())\n try:\n photplam = fits.getval(fitsfile, 'PHOTPLAM', ext=0)\n except KeyError:\n photplam = fits.getval(fitsfile, 'PHOTPLAM', ext=1)\n head['PHOTPLAM'] = float(photplam)\n headers.update({fitsname:head})\n # construct dataframe\n df = pd.DataFrame(columns=['DETECTOR','FILTER','FILTER1','FILTER2','PHOTPLAM'])\n for fitsname, head in headers.items():\n row = pd.Series(dict(head.items()))\n df.loc[fitsname.split('.fits')[0]] = row.T\n lamfunc = lambda x: ''.join(x[~(x.str.startswith('CLEAR')|x.str.startswith('nan'))])\n filters = df.filter(regex='FILTER').astype(str).apply(lamfunc, axis=1)\n df.loc[:,'FILTER'] = filters\n df.drop(['FILTER1','FILTER2'], axis=1, inplace=True)\n df.sort_values(by='PHOTPLAM', inplace=True)\n return fitslist, df",
"def test_read_header(self):\n self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')\n filename = os.path.join(self.test_dir, 'test_array.fits')\n\n data0 = np.zeros(10, dtype=np.int32)\n data1 = np.zeros(10, dtype=np.float64)\n header = healsparse.fits_shim._make_header({'AA': 0,\n 'BB': 1.0,\n 'CC': 'test'})\n self.write_testfile(filename, data0, data1, header)\n\n with HealSparseFits(filename) as fits:\n exts = [0, 1, 'COV', 'SPARSE']\n for ext in exts:\n header_test = fits.read_ext_header(ext)\n for key in header:\n self.assertEqual(header_test[key], header[key])",
"def init_SDFITS(DSS,tablesize,time_column=False):\n # create the primary HDU and extension headers\n prihdu = pyfits.PrimaryHDU()\n hdr = pyfits.CardList()\n cols = make_basic_columns(tablesize,time_column)\n \n # add telescope location data to the table header\n logger.debug(\"DSS: %s\", DSS)\n if type(DSS) == list:\n # This may seem odd but in the most general case there could be two or\n # more antennas, like in an interferometer. In that case, however,\n # \"single dish\" FITS format doesn't apply. We'll just assume a list of\n # length 1.\n dss = DSS[0]\n else:\n dss = DSS\n if dss !=0 :\n hdr.append(pyfits.Card('telescop', dss.name))\n hdr.append(pyfits.Card('sitelong', dss['longitude']))\n hdr.append(pyfits.Card('sitelat', dss['latitude']))\n hdr.append(pyfits.Card('siteelev', dss['elevation']))\n hdr.append(pyfits.Card('obsgeo-x', dss['geo-x']))\n hdr.append(pyfits.Card('obsgeo-y', dss['geo-y']))\n hdr.append(pyfits.Card('obsgeo-z', dss['geo-z']))\n hdr.append(pyfits.Card('TIMESYS', 'UTC'))\n \n # there will always be four axes in the data array\n hdr.append(pyfits.Card('MAXIS',4))\n # we will always have the first data axis with frequency in the\n # from of the observatory, or time-delay for correlation functions\n # (cannot set MAXIS1 until we know the size of the spectrum)\n # hdr.append(pyfits.Card('MAXIS1',?))\n hdr.append(pyfits.Card('CTYPE1','FREQ-OBS'))\n \n # the second and third axes will be right ascension and declination\n hdr.append(pyfits.Card('MAXIS2',1))\n hdr.append(pyfits.Card('CTYPE2','RA---GLS'))\n \n hdr.append(pyfits.Card('MAXIS3',1))\n hdr.append(pyfits.Card('CTYPE3','DEC--GLS'))\n\n # the fourth axis is polarization. As a default\n hdr.append(pyfits.Card('MAXIS4',1))\n hdr.append(pyfits.Card('CTYPE4','STOKES'))\n\n if time_column:\n # the optional fifth data axis will be time\n # (cannot set MAXIS5 until we know the number of spectra)\n # hdr.append(pyfits.Card('MAXIS4',?))\n hdr.append(pyfits.Card('CTYPE5','TIME'))\n \n return prihdu, hdr, cols",
"def make_table_HDU(self, extname, FITSrecords, columns, header):\n # get the number of rows used\n nrows = len(FITSrecords['SCAN'].nonzero()[0])\n # create a new FITS record array with the right number of rows\n newFITSrec = pyfits.FITS_rec.from_columns(columns, nrows=nrows)\n # copy rows to the new record array\n for row in range(nrows):\n newFITSrec[row] = FITSrecords[row]\n # create the HDU\n tabhdu = pyfits.BinTableHDU(data=newFITSrec, header=header, name=extname)\n return tabhdu",
"def mkHeaders(phdr, events_header, extver=1):\n\n headers = [phdr]\n # This is a reference, not a copy. Keywords will be updated (in other\n # functions) in headers[1], and the output corrtag header as well as the\n # flt and counts headers will contain the updated values.\n headers.append(events_header)\n\n err_hdr = fits.Header()\n dq_hdr = fits.Header()\n err_hdr[\"extname\"] = (\"ERR\", \"extension name\")\n dq_hdr[\"extname\"] = (\"DQ\", \"extension name\")\n err_hdr[\"extver\"] = (extver, \"extension version number\")\n dq_hdr[\"extver\"] = (extver, \"extension version number\")\n if \"rootname\" in events_header:\n rootname = events_header[\"rootname\"]\n err_hdr[\"rootname\"] = (rootname, \"rootname of the observation set\")\n dq_hdr[\"rootname\"] = (rootname, \"rootname of the observation set\")\n if \"expname\" in events_header:\n expname = events_header[\"expname\"]\n err_hdr[\"expname\"] = (expname, \"exposure identifier\")\n dq_hdr[\"expname\"] = (expname, \"exposure identifier\")\n if \"ra_aper\" in events_header:\n err_hdr[\"ra_aper\"] = (events_header[\"ra_aper\"],\n \"RA of reference aperture center\")\n if \"dec_aper\" in events_header:\n err_hdr[\"dec_aper\"] = (events_header[\"dec_aper\"],\n \"Declination of reference aperture center\")\n if \"pa_aper\" in events_header:\n err_hdr[\"pa_aper\"] = (events_header[\"pa_aper\"],\n \"Position Angle of reference aperture center (de\")\n if \"dispaxis\" in events_header:\n err_hdr[\"dispaxis\"] = (events_header[\"dispaxis\"],\n \"dispersion axis; 1 = axis 1, 2 = axis 2, none\")\n if \"ngoodpix\" in events_header:\n err_hdr[\"ngoodpix\"] = (-999, \"number of good pixels\")\n if \"goodmean\" in events_header:\n err_hdr[\"goodmean\"] = (-999., \"mean value of good pixels\")\n if \"goodmax\" in events_header:\n err_hdr[\"goodmax\"] = (-999., \"maximum value of good pixels\")\n\n headers.append(err_hdr)\n headers.append(dq_hdr)\n\n return headers",
"def new_fits(outfile, **kwargs):\n # Fake data\n sci_data = numpy.arange(10000, dtype='float').reshape(100,100)\n err_data = numpy.sqrt(sci_data) # Poisson error\n dq_data = numpy.zeros(sci_data.shape, dtype='int16') # No bad pixel\n\n # Create individual extensions\n hdu_hdr = pyfits.PrimaryHDU()\n hdu_sci = pyfits.ImageHDU(sci_data)\n hdu_err = pyfits.ImageHDU(err_data)\n hdu_dq = pyfits.ImageHDU(dq_data)\n\n # Modify headers\n \n hdu_hdr.header['FILENAME'] = outfile\n hdu_hdr.header['NEXTEND'] = 3\n \n hdu_sci.header['BUNIT'] = 'COUNTS'\n hdu_sci.header['EXTNAME'] = 'SCI'\n hdu_sci.header['EXTVER'] = 1\n\n hdu_err.header['BUNIT'] = 'COUNTS'\n hdu_err.header['EXTNAME'] = 'ERR'\n hdu_err.header['EXTVER'] = 1\n\n hdu_dq.header['BUNIT'] = 'UNITLESS'\n hdu_dq.header['EXTNAME'] = 'DQ'\n hdu_dq.header['EXTVER'] = 1\n\n # Create multi-extension FITS\n hduList = pyfits.HDUList([hdu_hdr])\n hduList.append(hdu_sci)\n hduList.append(hdu_err)\n hduList.append(hdu_dq)\n\n # Write to file\n hduList.writeto(outfile, **kwargs)",
"def testFitsHeader(self):\n # getPixelOrigin() returns origin in lsst coordinates, so need to add 1 to\n # compare to values stored in fits headers\n parentCrpix = self.parent.getWcs().getPixelOrigin()\n\n # Make a sub-image\n x0, y0 = 20, 30\n llc = lsst.geom.Point2I(x0, y0)\n bbox = lsst.geom.Box2I(llc, lsst.geom.Extent2I(60, 50))\n deep = False\n subImg = afwImage.ExposureF(self.parent, bbox, afwImage.LOCAL, deep)\n\n with lsst.utils.tests.getTempFilePath(\".fits\") as outFile:\n subImg.writeFits(outFile)\n hdr = readMetadata(outFile)\n\n def checkLtvHeader(hdr, name, value):\n # Per DM-4133, LTVn headers are required to be floating point\n self.assertTrue(hdr.exists(name), name + \" not saved to FITS header\")\n self.assertIsInstance(\n hdr.getScalar(name), numbers.Real, name + \" is not numeric\")\n self.assertNotIsInstance(\n hdr.getScalar(name), numbers.Integral, name + \" is an int\")\n self.assertEqual(hdr.getScalar(name), value, name + \" has wrong value\")\n\n checkLtvHeader(hdr, \"LTV1\", -1*x0)\n checkLtvHeader(hdr, \"LTV2\", -1*y0)\n\n self.assertTrue(hdr.exists(\"CRPIX1\"), \"CRPIX1 not saved to fits header\")\n self.assertTrue(hdr.exists(\"CRPIX2\"), \"CRPIX2 not saved to fits header\")\n\n fitsCrpix = [hdr.getScalar(\"CRPIX1\"), hdr.getScalar(\"CRPIX2\")]\n self.assertAlmostEqual(\n fitsCrpix[0] - hdr.getScalar(\"LTV1\"), parentCrpix[0] + 1, 6, \"CRPIX1 saved wrong\")\n self.assertAlmostEqual(\n fitsCrpix[1] - hdr.getScalar(\"LTV2\"), parentCrpix[1] + 1, 6, \"CRPIX2 saved wrong\")",
"def write_head(self,suff=''):\n try:\n hdr=self.hdr\n except:\n print(\"Build header first!\")\n raise ValueError\n\n out_fname = 'input.magn_header'\n if self.devnam=='TCV' and suff=='':\n out_fname += '_'+self.infile[6:18]\n else:\n out_fname += '_'+suff\n\t\t\t\n print('OUT header '+out_fname)\n outfile = open(out_fname, 'w')\n \n \n #outfile.write('{:d} (R,z) wall points & divertor flag (1 = divertor, 0 = wall)\\n'.format(len(lines)))\n # shot info\n outfile.write('{:8d} {:10f} {:2d}\\n'.format(hdr['nSHOT'], hdr['tSHOT'], hdr['modflg']))\n #device name \n outfile.write(hdr['devnam'] +'\\n')\n # something + plasma current \n outfile.write('{:4d} {:10f}\\n'.format(hdr['FPPkat'], hdr['IpiFPP']))\n outfile.write('{:4d}\\n'.format(len(hdr['PFxx'])))\n # Write the special points\n for j in range(len(hdr['PFxx'])):\n # poloidal flux\n outfile.write('{:8.6f} '.format(hdr['PFxx'][j]))\n outfile.write(' \\n')\n\n for j in range(len(hdr['PFxx'])):\n # R\n outfile.write('{:8.6f} '.format(hdr['RPFx'][j]))\n outfile.write(' \\n')\n \n for j in range(len(hdr['PFxx'])):\n # z\n outfile.write('{:8.6f} '.format(hdr['zPFx'][j]))\n outfile.write(' \\n')\n \n #SSQ\n for i in range(0,len(hdr['SSQ']),4):\n tmp_str = ['{:8.6f} '.format(j) for j in hdr['SSQ'][i:i+4]]\n outfile.write(\" \".join(tmp_str))\n outfile.write(\"\\n\")\n \n #print rhoPF \n outfile.write(str(hdr['rhoPF'])+'\\n')\n # other arrays\n \n for arr_name in ('PFL','Vol','Area','Qpl'):\n print(\"Writing \", arr_name)\n arr = hdr[arr_name]\n for i in range(0,len(arr),4):\n tmp_str = ['{:18.10f}'.format(j) for j in arr[i:i+4]]\n outfile.write(\" \".join(tmp_str))\n outfile.write(\"\\n\")\n outfile.close()",
"def read_in_1d_fits(path):\n data_arr = pf.open(path)\n hdf = data_arr[0].header\n hde = data_arr[0].header\n F = data_arr[0].data\n E = data_arr[1].data\n W = (hdf['CRVAL1'] + (hdf['CRPIX1'] - 1 + np.arange(hdf['NAXIS1']))*hdf['CDELT1'])*10 \n return W, F, E, hdf, hde",
"def read_all_headers(self, buf):\n\n def header_read(buf, begin=0):\n \"\"\"\n This function reads from a single header in a given .fcs file\n Values of header are recorded in a list \"listvar\"\n Position of next header is extracted by looking at where DATA ends\n and adding +1 to that\n\n Parameters\n ----------\n buf : a buffer like data opened in read binary mode\n begin : byte offset. The default is 0.\n\n Returns\n -------\n Next offset - position of a next header\n List of [TEXT BEGIN, TEXT SIZE, DATA BEGIN, DATA SIZE] in bytes\n \"\"\"\n buf.seek(begin) # starting at the given offset\n stringvar = str(buf.read(56)) # reading header\n listvar = stringvar.split() # spliting header\n listvar.pop(0) # first element of header is \"FCS\" and it's useless\n while len(listvar) > 4: # listvar needs only 4 elements, and elements are removed from\n listvar.pop() # the tail until list is 4 elements long\n # offsets are converted into string\n listvar = [int(x) for x in listvar]\n next_offset = listvar[-1]+1 # next offset is calculated\n text_begin = listvar[0]\n # the difference of BEGIN and END gives size-1\n text_size = listvar[1]-listvar[0]\n data_begin = listvar[2]\n # the difference of BEGIN and END gives size-1\n data_size = listvar[3]-listvar[2]\n listvar = [text_begin, text_size, data_begin, data_size]\n return(next_offset, listvar)\n n = 0\n offsets = [n]\n list_of_lists = []\n\n while True: # this loop ensures that entire .fcs file is read\n try:\n # begining of the .fcs is 0 bytes\n next_off, listvar = header_read(buf, n)\n n = next_off+n # offsets are summed together\n offsets.append(n) # and put in a list\n list_of_lists.append(listvar)\n except ValueError:\n break # ends the loop\n\n header = pd.DataFrame(list_of_lists, columns=[\n \"text begin\", \"text size\", \"data begin\", \"data size\"]) # header is loaded into dataframe\n offsets.pop() # last offset is removed, as it is unnecessary\n offsets = np.array(offsets)\n # adding offsets to begin is necessary because\n header[\"text begin\"] = header[\"text begin\"]+offsets\n # MUSE does not have proper $NEXTDATA start\n header[\"data begin\"] = header[\"data begin\"]+offsets\n self.header = header"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check that the keywords in a the specified FITS header template file are present. The default file is based on the FITS standard document for sensors, LCA10140. Dictionary of missing keywords by header extension number.
|
def check_keywords(infile, template=template_file, verbose=True):
prototype_headers = fits_headers(template=template)
input = fits.open(infile)
report = []
missing_keys = {}
missing_headers = []
#
for i, extname in enumerate(prototype_headers):
prototype = prototype_headers[extname]
if i < 17:
# Check the first 17 input headers (PHDU + 16 image
# extensions) by index i, since EXTNAME is often not set in
# the image extensions.
try:
input_hdu = input[i]
except IndexError:
missing_headers.append(extname)
continue
else:
# Check for remaining prototype headers by extension name.
try:
input_hdu = input[extname]
except KeyError:
missing_headers.append(extname)
continue
# Check for required keywords.
missing_keys[extname] = [keyword for keyword in list(prototype.keys())
if keyword not in list(input_hdu.header.keys())]
if missing_keys[extname]:
report.append("Checking HDU #%i, '%s'. Missing keywords:"
% (i, input_hdu.name))
for key in missing_keys[extname]:
report.append(" %s" % key)
if missing_headers:
report.append("Missing headers:")
for item in missing_headers:
report.append(" %s" % item)
if verbose:
if report:
for line in report:
print(line)
else:
print("No missing keywords or extensions")
return missing_keys
|
[
"def fits_checkkeyword(fitsfile, keyword, ext=0, silent=False):\n import astropy.io.fits as pf\n\n fh = pf.open(fitsfile)\n try:\n return fh[ext].header[keyword]\n except KeyError as e:\n if silent:\n return None\n else:\n print('The specified extension or keyword is not found.')\n raise e",
"def fits_headers(template=template_file):\n headers = OrderedDict()\n hdr = fits.header.Header()\n with open(template) as fd:\n for line in fd:\n # Skip comments and whitespace lines.\n if line[0] == '#' or len(line.strip()) == 0:\n continue\n if line[:3] == 'END':\n if len(headers) == 0:\n # First hdu must be the Primary HDU.\n headers['PRIMARY'] = hdr\n else:\n # Subsequent ones must be extensions with an EXTNAME\n headers[hdr['EXTNAME']] = hdr\n hdr = fits.header.Header()\n continue\n data = line.split('=')\n key, value = data[0].strip(), '='.join(data[1:]).strip()\n data = value.split('/')\n value, comment = data[0].strip(), '/'.join(data[1:]).strip()\n hdr[key] = (_cast(value), comment)\n return headers",
"def check_headers(self: ProjectUpdater) -> None:\n for header_file_raw in self.header_files:\n assert header_file_raw[0] == '/'\n header_file = f'src/ballistica{header_file_raw}'\n if header_file.endswith('.h'):\n _check_header(self, header_file)",
"def testFitsHeaderKeywords(self):\n photParams = PhotometricParameters()\n gsdet = GalSimDetector(self.camera[0].getName(),\n GalSimCameraWrapper(self.camera),\n self.obs, self.epoch,\n photParams=photParams)\n self.assertEqual(gsdet.wcs.fitsHeader.getScalar('MJD-OBS'),\n self.obs.mjd.TAI)\n self.assertEqual(gsdet.wcs.fitsHeader.getScalar('EXPTIME'),\n photParams.nexp*photParams.exptime)\n self.assertEqual(gsdet.wcs.fitsHeader.getScalar('RATEL'),\n self.obs.pointingRA)\n self.assertEqual(gsdet.wcs.fitsHeader.getScalar('DECTEL'),\n self.obs.pointingDec)\n self.assertEqual(gsdet.wcs.fitsHeader.getScalar('ROTANGLE'),\n self.obs.rotSkyPos)",
"def validate_header(header, keywords, dripconf=False):\n if not isinstance(header, fits.header.Header):\n log.error(\"Header %s is not %s\" %\n (type(header), fits.header.Header))\n return False\n elif not isinstance(keywords, DataFrame):\n log.error(\"Keywords %s is not %s\" % (type(keywords), DataFrame))\n return False\n return all([*map(lambda x:\n validate_keyrow(header, x[1], dripconf=dripconf),\n keywords.iterrows())])",
"def checkFileHeader():\n # Get the header of the file\n cFile = open(sys.argv[1], \"rb\")\n encHeader = cFile.read(MAX_BYTES_TO_READ)\n cFile.close()\n\n print(\"Checking against hard coded signatures...\")\n\n for fileType, infoTuple in SIGNATURE_TABLE.items():\n keylst = []\n bytesToExam, matchBytes = infoTuple\n for i in range (bytesToExam):\n cByte = encHeader[i]\n ch = cByte >> 4\n cl = cByte & 15\n\n pByte = matchBytes[i]\n ph = pByte >> 4\n pl = pByte & 15\n\n k = getKeyFromPlainAndCipher(mapping, ph, pl, ch, cl)\n keylst.append(k)\n\n sys.stdout.write(\"{0}: \".format(fileType))\n first = True\n for byte in keylst:\n if first:\n sys.stdout.write(\"[{0}\".format(format(byte, '02x')))\n first = False\n else:\n sys.stdout.write(\", {0}\".format(format(byte, '02x')))\n sys.stdout.write(\"]\\n\")\n KEY_TABLE[fileType] = (bytesToExam, keylst)\n\n print(\"All signatures checked!\")",
"def verify_data(header_data: dict):\n if \"song\" not in header_data:\n raise ValueError(\"File must include a song title, but no <song> tag was found.\")\n if \"major_minor\" not in header_data:\n raise ValueError(\"File must include a key, but no <key> tag was found.\")\n return True",
"def test_read_header(self):\n self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')\n filename = os.path.join(self.test_dir, 'test_array.fits')\n\n data0 = np.zeros(10, dtype=np.int32)\n data1 = np.zeros(10, dtype=np.float64)\n header = healsparse.fits_shim._make_header({'AA': 0,\n 'BB': 1.0,\n 'CC': 'test'})\n self.write_testfile(filename, data0, data1, header)\n\n with HealSparseFits(filename) as fits:\n exts = [0, 1, 'COV', 'SPARSE']\n for ext in exts:\n header_test = fits.read_ext_header(ext)\n for key in header:\n self.assertEqual(header_test[key], header[key])",
"def determine_headers(self, *patterns):\n\n #prepare file names\n general_include_dir_name=self.prefix\n\n sf_header_list=[]\n for pattern in patterns:\n general_include_file_name=os.path.join(general_include_dir_name, \"gsl\", pattern)\n\n #test if exists\n if not os.path.isfile(general_include_file_name):\n raise Exception,\"could not find general header file '%s'\" % (general_include_file_name,)\n\n #parse file for includes\n general_include_file=file(general_include_file_name,\"r\")\n include_pattern=re.compile(\"#\\s*include\\s+[\\\"<](.+)[\\\">].*\")\n a_line=general_include_file.readline()\n while a_line:\n # if line is matching\n include_match=include_pattern.match(a_line)\n if include_match:\n new_header_name=os.path.join(general_include_dir_name,include_match.group(1))\n if os.path.isfile(new_header_name):\n sf_header_list.append(new_header_name)\n else:\n print (\"could not find header %s\",new_header_name)\n\n a_line=general_include_file.readline()\n general_include_file.close()\n sf_header_list.append(general_include_file_name)\n return sf_header_list",
"def check_for_keys(fname, *keys):\n with h5.File(fname, 'r') as ifi:\n all_keys = list(ifi.keys())\n for key in keys:\n if key not in all_keys:\n sys.stderr.write(\"Error, key {} not in hdf5 file {}\\n\".format(\n key, fname))\n raise KeyError",
"def _check_header_measurements_keys(\n self,\n headerdata: list[str],\n measurements: dict[str, typing.Any],\n verboseprint: typing.Callable[..., typing.Any],\n ) -> bool:\n counter_constant = 3 # Not sure, maybe for md with units. <Exc, Ems, F>\n meas = [line.split(\":\")[0].replace(\"Meas\", \"\") for line in headerdata]\n b = {k for k, v in collections.Counter(meas).items() if v == counter_constant}\n a = set(measurements.keys())\n verboseprint(\"check header and measurements.keys()\", a == b, a, b)\n return a == b",
"def _headers_exist(repository_ctx, path):\n for h in _INFERENCE_ENGINE_HEADERS:\n if not repository_ctx.path(\"%s/%s\" % (path, h)).exists:\n return False\n return True",
"def test_validate_file_headings():\n good_headings = \"NAME,ICAO,Latitude,Longitude\"\n # using typo's in heading names: NAMEE, Lattitude\n bad_headings = \"NAMEE,ICAO,Lattitude,Longitude\"\n\n assert validate_file_headings(good_headings)\n\n with pytest.raises(InvalidFileHeadingError):\n validate_file_headings(bad_headings)",
"def check_header(install_path):\n\n print(\n \":ggd:check-recipe: Checking that the final files have headers if appropriate\\n\"\n )\n\n installed_files = os.listdir(install_path)\n\n for file_name in [\n x for x in installed_files if os.path.isfile(os.path.join(install_path, x))\n ]:\n\n f_path = os.path.join(install_path, file_name)\n\n ## Check for an index file\n\n if file_name.strip().split(\".\")[-1] in set(\n [\"tbi\", \"bai\", \"crai\", \"fai\", \"tar\", \"bz2\", \"bw\", \"csi\", \"gzi\"]\n ):\n\n continue\n\n ## Skip fasta or fastq files\n if any(x in file_name for x in [\".fasta\", \".fa\", \".fastq\", \".fq\"]):\n continue\n\n ## Check for sam/bam/cram files\n if any(x in file_name for x in [\".sam\", \".bam\", \".cram\"]):\n import pysam\n\n try:\n samfile = pysam.AlignmentFile(f_path, check_sq=False)\n header = samfile.header\n if any(header.lengths):\n print(\n \":ggd:check-recipe: Header found in file {name}\\n\".format(\n name=file_name\n )\n )\n print(\"Head of file:\")\n print(\"---------------------------\")\n print(str(header).strip())\n for i, read in enumerate(samfile):\n print(read)\n if i >= 4:\n break\n print(\"---------------------------\\n\")\n\n else:\n print(\n \":ggd:check-recipe: !!ERROR!! No header found for file {name}\\n\".format(\n name=file_name\n )\n )\n print(\n \":ggd:check-recipe: !!ERROR!! A header is required for sam/bam/cram files\\n\"\n )\n return False\n\n except (ValueError, IOError, Exception) as e:\n print(str(e))\n print(\n \":ggd:check-recipe: !!ERROR!! No header found for file {name}\\n\".format(\n name=file_name\n )\n )\n print(\n \":ggd:check-recipe: !!ERROR!! A header is required for sam/bam/cram files\\n\"\n )\n return False\n\n ## Check vcf/bcf files\n elif any(x in file_name for x in [\".vcf\", \".bcf\"]):\n from cyvcf2 import VCF\n\n try:\n vcffile = VCF(f_path)\n header = str(vcffile.raw_header)\n\n if header:\n print(\n \":ggd:check-recipe: Header found in file {name}\\n\".format(\n name=file_name\n )\n )\n print(\"Head of file:\")\n print(\"---------------------------\")\n print(str(header).strip())\n for i, var in enumerate(vcffile):\n print(var)\n if i >= 4:\n break\n print(\"---------------------------\\n\")\n\n else:\n print(\n \":ggd:check-recipe: !!ERROR!! No header found for file {name}\\n\".format(\n name=file_name\n )\n )\n print(\n \":ggd:check-recipe: !!ERROR!! A header is required for vcf/bcf files\\n\"\n )\n return False\n\n except IOError as e:\n print(str(e))\n print(\n \":ggd:check-recipe: !!ERROR!! No header found for file {name}\\n\".format(\n name=file_name\n )\n )\n print(\n \":ggd:check-recipe: !!ERROR!! A header is required for vcf/bcf files\\n\"\n )\n return False\n\n ## Check other files\n else:\n import gzip\n\n try:\n file_handler = (\n gzip.open(f_path) if f_path.endswith(\".gz\") else open(f_path)\n )\n header = []\n body = []\n try:\n for line in file_handler:\n\n if type(line) != str:\n line = line.strip().decode(\"utf-8\")\n\n if len(line) > 0 and str(line)[0] in set([\"#\", \"!\", \"^\"]):\n\n header.append(str(line).strip())\n\n else:\n body.append(str(line).strip())\n\n if len(body) > 4:\n break\n\n except UnicodeDecodeError:\n print(\n \":ggd:check-recipe: Cannot decode file contents into unicode.\\n\"\n )\n pass\n\n if header:\n print(\n \":ggd:check-recipe: Header found in file {name}\\n\".format(\n name=file_name\n )\n )\n print(\"Head of file:\")\n print(\"---------------------------\")\n print(\"\\n\".join(header))\n print(\"\\n\".join(body))\n print(\"---------------------------\\n\")\n elif any(\n x in file_name\n for x in [\n \".gtf\",\n \".gff\",\n \".gff3\",\n \".bed\",\n \".bedGraph\",\n \".csv\",\n \".txt\",\n ]\n ):\n print(\n \":ggd:check-recipe: !!ERROR!! No header found for file {name}\\n\".format(\n name=file_name\n )\n )\n print(\n \":ggd:check-recipe: !!ERROR!! A header is required for this type of file\\n\"\n )\n print(\"First 5 lines of file body:\")\n print(\"---------------------------\")\n print(\"\\n\".join(body))\n print(\"---------------------------\\n\")\n return False\n else:\n print(\n \":ggd:check-recipe: !!WARNING!! No header found for file {name}\\n\".format(\n name=file_name\n )\n )\n print(\"First 5 lines of file body:\")\n print(\"---------------------------\")\n print(\"\\n\".join(body))\n print(\"---------------------------\\n\")\n print(\n \":ggd:check-recipe: !!WARNING!! GGD requires that any file that can have a header should. Please either add a header or if the file cannot have a header move forward.\\n\"\n )\n print(\n \":ggd:check-recipe: !!WARNING!! IF you move forward without adding a header when one should be added, this recipe will be rejected until a header is added.\\n\"\n )\n\n except IOError as e:\n print(\":ggd:check-recipe: !!ERROR!!\")\n print(str(e))\n return False\n\n return True",
"def validate_file_keys(landing_page_sections_json: dict) -> None:\n logging.info('Validating file keys are valid sections')\n allowed_keys = {'description', 'sections'}\n allowed_keys.update(landing_page_sections_json['sections'])\n not_allowed_key = [key for key in landing_page_sections_json.keys() if key not in allowed_keys]\n assert not not_allowed_key, f'Unsupported keys found: {not_allowed_key}, please add ' \\\n f'these keys under the \"sections\" key or remove them.'",
"def test_empty(self):\n\n # open file\n f = fits.open(\"test.fits\")\n # create ResultsFITS object\n rf = ResultsFITS(f[0], \"HIERARCH ANALYSIS TEST\")\n # check keys\n self.assertEqual(len(rf.keys()), 0)\n # close\n f.close()",
"def add_default_keywords(new_hdr):\n wcsaxes = new_hdr['WCSAXES']\n if wcsaxes == 3:\n default_pc = {\n 'PC1_1': 1,\n 'PC1_2': 0,\n 'PC1_3': 0,\n 'PC2_1': 0,\n 'PC2_2': 1,\n 'PC2_3': 0,\n 'PC3_1': 0,\n 'PC3_2': 0,\n 'PC3_3': 1,\n }\n default_cunit = {'CUNIT1': 'deg', 'CUNIT2': 'deg', 'CUNIT3': 'um'}\n default_ctype = {'CTYPE1': 'RA---TAN', 'CTYPE2': 'DEC--TAN', 'CTYPE3': 'WAVE'}\n elif wcsaxes == 2:\n default_pc = {\n 'PC1_1': 1,\n 'PC1_2': 0,\n 'PC2_1': 0,\n 'PC2_2': 1,\n }\n default_cunit = {'CUNIT1': 'deg', 'CUNIT2': 'deg'}\n default_ctype = {'CTYPE1': 'RA---TAN', 'CTYPE2': 'DEC--TAN'}\n\n if 'PC1_1' not in new_hdr:\n new_hdr.update(default_pc)\n if 'CUNIT1' not in new_hdr:\n new_hdr.update(default_cunit)\n if 'CTYPE1' not in new_hdr:\n new_hdr.update(default_ctype)\n\n return new_hdr",
"def read_excel_header_templates(blocks, template_path):\n\n # Initialize a dictionary, where the Key is the ECF Block Code\n template_headers = {}\n\n # Iterate over all the block keys, and export the list with all lines for\n # a specific file\n for block_key in blocks:\n # Generate the target file name\n file_name = template_path + block_key + TEMPLATE_EXCEL_EXTENSION\n\n try:\n # Read the excel file template\n excel_df = pd.read_excel(file_name, sheet_name=TEMPLATE_EXCEL_SHEET_NAME)\n\n # Get the column names from the template, and put them in a list\n header_data = excel_df.columns.tolist()\n\n # Put the list in the specific ECF key block\n template_headers[block_key] = header_data\n except FileNotFoundError:\n print(\"File {0} not found. Skipping it! Please check!\".format(file_name))\n\n return template_headers",
"def get_template_hdr(template,extname,extver=1):\n if template in [None,'','N/A','n/a']:\n return None\n\n if extname in [None,'PRIMARY']:\n extn = 0\n else:\n # count number of extensions with 'extname' in template\n # if we are trying to create an extension with 'extver' larger than\n # what the template file contains, simply use 'extver' == 1 from template\n timg = pyfits.open(template)\n tmax = 1\n for e in timg:\n if 'extver' in e.header and e.header['extver'] > tmax:\n tmax = e.header['extver']\n timg.close()\n if extver > tmax:\n extver = 1\n\n extn = (extname,extver)\n\n return pyfits.getheader(template,extn)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Accepts a dict from a schema version 1.0, 1.1 or 1.2 package containing a "platforms" key and converts it to a list of releases compatible with' schema version 2.0.
|
def platforms_to_releases(info, debug):
output = []
temp_releases = {}
platforms = info.get('platforms')
for platform in platforms:
for release in platforms[platform]:
key = '%s-%s' % (release['version'], release['url'])
if key not in temp_releases:
temp_releases[key] = {
'sublime_text': '<3000',
'version': release['version'],
'date': info.get('last_modified', '2011-08-01 00:00:00'),
'url': update_url(release['url'], debug),
'platforms': []
}
if platform == '*':
temp_releases[key]['platforms'] = ['*']
elif temp_releases[key]['platforms'] != ['*']:
temp_releases[key]['platforms'].append(platform)
for key in temp_releases:
release = temp_releases[key]
if release['platforms'] == ['windows', 'linux', 'osx']:
release['platforms'] = ['*']
output.append(release)
return output
|
[
"def get_packages(platform):\n with open('pkg-resolver/packages.json', encoding='utf-8', mode='r') as pkg_file:\n pkgs = json.loads(pkg_file.read())\n packages = []\n for platforms in filter(lambda x: x.get(platform) is not None, pkgs.values()):\n if isinstance(platforms.get(platform), list):\n packages.extend(platforms.get(platform))\n else:\n packages.append(platforms.get(platform))\n return packages",
"def test_package_list_with_platform(self):\n rid1 = self._create_release(platforms=['platformOne'])\n self._create_package(rid1, name='packageOne')\n\n rid2 = self._create_release(platforms=['platformTwo'])\n self._create_package(rid2, name='packageTwo')\n\n result = orlo.queries.package_list(platform='platformOne').all()\n self.assertEqual(len(result), 1)\n packages = [r[0] for r in result]\n self.assertIn('packageOne', packages)\n self.assertNotIn('packageTwo', packages)",
"def _parse_supported_versions(self, versions):\n parsed_versions = []\n for version in versions:\n try:\n # handle inputs of the form:\n # \"py3.10\" -> (3, 10)\n # \"3.10\" -> (3, 10)\n # \"py310\" -> (3, 10)\n # The last one (which is wagon's default) will however\n # break when we reach python 10 :) By then, we need to switch\n # to the dotted format.\n version = version.replace('py', '')\n if '.' in version:\n parsed = tuple(int(x) for x in version.split('.'))\n else:\n parsed = (int(version[0]), int(version[1:]))\n parsed_versions.append(parsed)\n except ValueError:\n pass\n return parsed_versions",
"def nu_get_supported_api_versions(base_url: str) -> list:\n\n http_session = requests.session()\n http_resp = http_session.get(url=base_url, verify=False)\n ver_supp = []\n if http_resp.ok:\n json_obj = http_resp.json()\n else:\n return ver_supp\n\n # Go throughout list of dicts and extract CURRENT versions\n for item in json_obj['versions']:\n if item['status'] == 'CURRENT':\n ver_supp.append(item['version'].upper())\n # Let's return most recent version as [0]\n ver_supp.sort(reverse=True)\n return ver_supp",
"def test_package_versions_with_platform(self):\n self._create_finished_release() # this release should not appear in result\n rid1 = self._create_release(platforms=['specific_platform'])\n pid1 = self._create_package(rid1, name='packageOne', version='1.0.1')\n self._start_package(pid1)\n self._stop_package(pid1)\n\n result = orlo.queries.package_versions(platform='specific_platform').all()\n\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0][0], 'packageOne')",
"def analyze_versions(crate_data):\n def get_major(semver):\n if semver is None:\n return None\n digits = semver.lstrip(\"^\").split(\".\")\n if digits[0] != \"0\":\n return digits[0]\n else:\n return \"0.{}\".format(digits[1])\n dependencies = defaultdict(dict)\n versions = defaultdict(set)\n # Fill datastructure first.\n for data in crate_data:\n for dependency in data['dependencies'] + data['dev-dependencies']:\n dependencies[dependency['name']][data['name']] = get_major(dependency['version'])\n versions[dependency['name']].add(get_major(dependency['version']))\n\n for (dependency, version_set) in versions.items():\n if len(version_set) == 1:\n dependencies.pop(dependency)\n\n return dependencies",
"def test_platform_list(self):\n self._create_release(platforms=['platformOne', 'platformTwo'])\n result = orlo.queries.platform_list().all()\n\n self.assertEqual(len(result), 2)\n platforms = [r[0] for r in result]\n self.assertIn('platformOne', platforms)\n self.assertIn('platformTwo', platforms)",
"def get_versions(url, requested_product, requested_version):\n valid_releasessorted = {}\n response = requests.get(url)\n if response.status_code == 200:\n json_result = response.json()\n versions = json_result[requested_product][\"versions\"]\n valid_releases = {}\n # do not want pre-releases; filter them out\n for item in versions.items():\n for build in item[1][\"builds\"]:\n if (build[\"os\"].casefold() == platform.system().casefold()):\n if (build[\"arch\"] == SUPPORTED_ARCH):\n if not (re.search('[a-zA-Z]', item[1][\"version\"])):\n valid_releases[item[1][\"version\"]] = build[\"url\"]\n\n for key in sorted(valid_releases,key=LooseVersion):\n valid_releasessorted[key] = valid_releases[key]\n else:\n raise requests.ConnectionError(\"Server did not return status 200 - returned {0}\".format(response.status_code))\n\n return valid_releasessorted",
"def dpkgPackages(cls, packager):\n # ask {dpkg} for my options\n alternatives = sorted(packager.alternatives(group=cls), reverse=True)\n # the supported versions\n versions = Default,\n # go through the versions\n for version in versions:\n # scan through the alternatives\n for name in alternatives:\n # if it is match\n if name.startswith(version.flavor):\n # build an instance and return it\n yield version(name=name)\n\n # out of ideas\n return",
"def update_yaml_versions(yaml_versions, json_versions):\n\n if json_versions.get('services', False):\n for service in json_versions['services']:\n version, url = [(v, u) for (v, u) in service['versions'].items()\n if v == service['default']][0]\n yaml_versions['services'].update({\n service['name']: {\n \"version\": version,\n \"url\": url\n }\n })\n\n if json_versions.get('platforms', False):\n for platform in json_versions['platforms']:\n version, resources = [(v, r) for (v, r)\n in platform['versions'].items()\n if v == platform['default']][0]\n platform_resources = {}\n\n for item in resources:\n url = [r for r in json_versions['resources']\n if r['name'] == item['resource']][0]['versions'][\n item['version']]\n platform_resources.update({\n item['resource']: {\n 'version': item['version'],\n 'url': url\n }\n })\n\n yaml_versions['platform'].update({\n platform['name']: {\n 'version': version,\n 'resources': platform_resources\n }\n })\n\n return yaml_versions",
"def handle_semver_tags(self, entries):\n\n semver_test = re.compile(r'\\d+\\.\\d+\\.?\\d*')\n\n semver_entries, regular_entries = [], []\n for entry in entries:\n if semver_test.search(entry.tag):\n semver_entries.append(entry)\n else:\n regular_entries.append(entry)\n if len(semver_entries):\n try:\n semver_entries = sorted(semver_entries, key=lambda entry: LooseVersion(entry.tag), reverse=True)\n except Exception as e:\n # the error might me caused of having tags like 1.2.3.1, 1.2.3.beta\n # exception is cant convert str to int, it is comparing 'beta' to 1\n # if that fails then only take the numbers and sort them\n semver_entries = sorted(\n semver_entries,\n key=lambda entry: LooseVersion(semver_test.search(entry.tag).group()),\n reverse=True)\n\n return semver_entries + regular_entries",
"def convert(cargo_ver: str) -> T.List[str]:\n # Cleanup, just for safety\n cargo_ver = cargo_ver.strip()\n cargo_vers = [c.strip() for c in cargo_ver.split(',')]\n\n out: T.List[str] = []\n\n for ver in cargo_vers:\n # This covers >= and =< as well\n # https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#comparison-requirements\n if ver.startswith(('>', '<', '=')):\n out.append(ver)\n\n elif ver.startswith('~'):\n # Rust has these tilde requirements, which means that it is >= to\n # the version, but less than the next version\n # https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#tilde-requirements\n # we convert those into a pair of constraints\n v = ver[1:].split('.')\n out.append(f'>= {\".\".join(v)}')\n if len(v) == 3:\n out.append(f'< {v[0]}.{int(v[1]) + 1}.0')\n elif len(v) == 2:\n out.append(f'< {v[0]}.{int(v[1]) + 1}')\n else:\n out.append(f'< {int(v[0]) + 1}')\n\n elif '*' in ver:\n # Rust has astrisk requirements,, which are like 1.* == ~1\n # https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#wildcard-requirements\n v = ver.split('.')[:-1]\n if v:\n out.append(f'>= {\".\".join(v)}')\n if len(v) == 2:\n out.append(f'< {v[0]}.{int(v[1]) + 1}')\n elif len(v) == 1:\n out.append(f'< {int(v[0]) + 1}')\n\n else:\n # a Caret version is equivalent to the default strategy\n # https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#caret-requirements\n if ver.startswith('^'):\n ver = ver[1:]\n\n # If there is no qualifier, then it means this or the next non-zero version\n # That means that if this is `1.1.0``, then we need `>= 1.1.0` && `< 2.0.0`\n # Or if we have `0.1.0`, then we need `>= 0.1.0` && `< 0.2.0`\n # Or if we have `0.1`, then we need `>= 0.1.0` && `< 0.2.0`\n # Or if we have `0.0.0`, then we need `< 1.0.0`\n # Or if we have `0.0`, then we need `< 1.0.0`\n # Or if we have `0`, then we need `< 1.0.0`\n # Or if we have `0.0.3`, then we need `>= 0.0.3` && `< 0.0.4`\n # https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#specifying-dependencies-from-cratesio\n #\n # this works much like the ~ versions, but in reverse. Tilde starts\n # at the patch version and works up, to the major version, while\n # bare numbers start at the major version and work down to the patch\n # version\n vers = ver.split('.')\n min_: T.List[str] = []\n max_: T.List[str] = []\n bumped = False\n for v_ in vers:\n if v_ != '0' and not bumped:\n min_.append(v_)\n max_.append(str(int(v_) + 1))\n bumped = True\n else:\n if not (bumped and v_ == '0'):\n min_.append(v_)\n if not bumped:\n max_.append('0')\n\n # If there is no minimum, don't emit one\n if set(min_) != {'0'}:\n out.append('>= {}'.format('.'.join(min_)))\n if set(max_) != {'0'}:\n out.append('< {}'.format('.'.join(max_)))\n else:\n out.append('< 1')\n\n return out",
"def pep425tags_get_supported(versions=None, supplied_platform=None):\n supported = []\n\n # Versions must be given with respect to the preference\n if versions is None:\n versions = []\n version_info = get_impl_version_info()\n major = version_info[:-1]\n # Support all previous minor Python versions.\n for minor in range(version_info[-1], -1, -1):\n versions.append(''.join(map(str, major + (minor,))))\n\n impl = get_abbr_impl()\n\n abis = []\n\n abi = get_abi_tag()\n if abi:\n abis[0:0] = [abi]\n\n abi3s = set()\n import imp\n for suffix in imp.get_suffixes():\n if suffix[0].startswith('.abi'):\n abi3s.add(suffix[0].split('.', 2)[1])\n\n abis.extend(sorted(list(abi3s)))\n\n abis.append('none')\n\n platforms = get_platforms(supplied=supplied_platform)\n\n # Current version, current API (built specifically for our Python):\n for abi in abis:\n for arch in platforms:\n supported.append(('%s%s' % (impl, versions[0]), abi, arch))\n\n # No abi / arch, but requires our implementation:\n for i, version in enumerate(versions):\n supported.append(('%s%s' % (impl, version), 'none', 'any'))\n if i == 0:\n # Tagged specifically as being cross-version compatible\n # (with just the major version specified)\n supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))\n\n # Major Python version + platform; e.g. binaries not using the Python API\n for arch in platforms:\n supported.append(('py%s' % (versions[0][0]), 'none', arch))\n\n # No abi / arch, generic Python\n for i, version in enumerate(versions):\n supported.append(('py%s' % (version,), 'none', 'any'))\n if i == 0:\n supported.append(('py%s' % (version[0]), 'none', 'any'))\n\n return supported",
"def test_package_versions(self):\n rid1 = self._create_release(platforms=['platformOne'])\n pid1 = self._create_package(rid1, name='packageOne', version='1.0.1')\n pid2 = self._create_package(rid1, name='packageTwo', version='2.0.1')\n self._start_package(pid1)\n self._stop_package(pid1)\n self._start_package(pid2)\n self._stop_package(pid2)\n sleep(0.1) # To ensure some time separation\n rid2 = self._create_release(platforms=['platformOne'])\n pid1 = self._create_package(rid2, name='packageOne', version='1.0.2')\n pid2 = self._create_package(rid2, name='packageTwo', version='2.0.2')\n self._start_package(pid1)\n self._stop_package(pid1)\n self._start_package(pid2)\n self._stop_package(pid2, success=False)\n\n result = orlo.queries.package_versions().all()\n self.assertEqual(len(result), 2) # Two entries, packageOne/Two\n versions = [(p, v) for p, v in result] # strip out the time\n # Correct versions:\n self.assertIn(('packageOne', '1.0.2'), versions)\n self.assertIn(('packageTwo', '2.0.1'), versions)",
"def _findVersions(self):\n self.descriptorVersions = []\n for nm in self.simpleList:\n vers = 'N/A'\n if hasattr(DescriptorsMod, nm):\n fn = getattr(DescriptorsMod, nm)\n if hasattr(fn, 'version'):\n vers = fn.version\n self.descriptorVersions.append(vers)",
"def _get_available_engine_upgrades(client, major=False):\n results = {}\n paginator = client.get_paginator('describe_db_engine_versions')\n for page in paginator.paginate():\n engine_versions = page['DBEngineVersions']\n for v in engine_versions:\n if v['Engine'] not in results:\n results[v['Engine']] = {}\n if 'ValidUpgradeTarget' not in v or len(v['ValidUpgradeTarget']) == 0:\n continue\n for t in v['ValidUpgradeTarget']:\n if not major and t['IsMajorVersionUpgrade']:\n continue\n if LooseVersion(t['EngineVersion']) > LooseVersion(\n results[v['Engine']].get(v['EngineVersion'], '0.0.0')):\n results[v['Engine']][v['EngineVersion']] = t['EngineVersion']\n return results",
"def get_versions(language: str, framework: str) -> list:\n root = f'archetypes/{language}/{framework}'\n return [subdirectory for subdirectory in os.listdir(root) if os.path.isdir(os.path.join(root, subdirectory))\n and subdirectory not in excluded_archetypal_directories]",
"def get_installable_solc_versions() -> List[Version]:\n data = requests.get(BINARY_DOWNLOAD_BASE.format(_get_os_name(), \"list.json\"))\n if data.status_code != 200:\n raise ConnectionError(\n f\"Status {data.status_code} when getting solc versions from solc-bin.ethereum.org\"\n )\n version_list = sorted((Version(i) for i in data.json()[\"releases\"]), reverse=True)\n version_list = [i for i in version_list if i >= MINIMAL_SOLC_VERSION]\n return version_list",
"def _parse(version):\r\n version = version.replace(\"-\", \".\")\r\n version = version.split(\".\")\r\n version_core = version[:3]\r\n pre_release = version[3:]\r\n for index, char in enumerate(version_core[-1]):\r\n if not char.isdigit():\r\n pre_release = [version_core[-1][index:]] + pre_release\r\n version_core[-1] = version_core[-1][:index]\r\n version_core = list(map(int, version_core))\r\n while len(version_core) < 3:\r\n version_core.append(0)\r\n return version_core, pre_release"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Geocode a single location via maps API Returns a tuple of latitude and longitude
|
def geocode(location):
gmaps = googlemaps.Client(key=settings.GAPI_KEY)
loc = gmaps.geocode(location, region="UK")
if not loc:
raise RuntimeError(f"Could not find {location} on Google maps")
else:
return (loc[0]["geometry"]["location"]["lat"],
loc[0]["geometry"]["location"]["lng"])
|
[
"def address_to_latlng(address):\n location_geo = geocode(address)\n location = {}\n location['lat'] = location_geo['lon']\n location['lon'] = location_geo['lat']\n print location\n return tuple(location.values())",
"def location(locations):\r\n ctx = ssl.create_default_context(cafile=certifi.where())\r\n geopy.geocoders.options.default_ssl_context = ctx\r\n\r\n geo = Nominatim(user_agent=\"map_main.py\", timeout=10)\r\n location1 = geo.geocode(locations)\r\n return location1.latitude, location1.longitude",
"def getGeocodeLocation(inputString):\n\n city = inputString.replace(\" \", \"+\")\n\n # Want results back in a JSON. Adding API key and input string to query.\"\n url = f\"https://maps.googleapis.com/maps/api/geocode/json?address={city}&key={google_api_key}\"\n\n # Request url and make the response a json that Python can read.\n r = requests.get(url).json()\n \n latitude = r[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n longitude = r[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n\n return (latitude, longitude)",
"def user_geocode(geocode_dict):\n\tuser_data = geocode_dict['results'][0]['geometry']['location']\n\tuser_loc = (user_data['lat'], user_data['lng'])\n\n\treturn user_loc",
"def get_location():\n result = _make_request('https://freegeoip.net/json/')\n\n data = result.json()\n\n return (data['latitude'], data['longitude'])",
"def geocode(self):\n\n # If found in cache, return coords\n if self._address in Location._geocode_cache:\n lat, lon = Location._geocode_cache[self._address]\n self.set_lat_lon(lat, lon)\n return\n\n # Real geocoding begins here\n try:\n conn = httplib.HTTPSConnection(\"maps.googleapis.com\")\n params = {'sensor' : 'false', 'address' : self._address}\n url = \"/maps/api/geocode/xml?\" + urllib.urlencode(params)\n conn.request(\"GET\", url)\n r = conn.getresponse()\n if r.status == 200:\n geo_xml = r.read()\n if geo_xml:\n # Find lat, lon in returned XML\n t = xml.etree.ElementTree.fromstring(geo_xml)\n lat = t.findall('result/geometry/location/lat')\n lon = t.findall('result/geometry/location/lng')\n if lat and lon:\n # Successful\n self.set_lat_lon(float(lat[0].text),\n float(lon[0].text))\n return\n else:\n err = \"couldn't resolve address to lat,lon. Try another.\"\n else:\n err = \"not responding. Try later\"\n else:\n err = \"or network failure. Try later\"\n except Exception:\n err = \"exception\"\n if err:\n raise Usage(\"Google geocoder \" + err)",
"def address_to_coords(self, address):\n params = urlencode({\"sensor\": \"false\",\n \"address\": address})\n url = \"http://maps.googleapis.com/maps/api/geocode/json?\" + params\n results = json.loads(self.send(url))\n if results['status'] != 'OK':\n return None\n if len(results['results']) > 1:\n print \"Warning: search for %s returned more then one results, using the first one\" % address\n result = results['results'][0]\n location = result['geometry']['location']\n return \"%.7f\" % location['lat'], \"%.7f\" % location['lng']",
"def geocode(self, recode=False):\n if not self.lat or not self.long or recode:\n # get the geocoordinates for the adress\n # TODO log geocodings into the db\n g = geocoders.Google(settings.GOOGLE_API_KEY)\n adr = '%s, %s %s, %s' % (self.street, self.zipcode, self.city, self.country)\n (self.lat, self.long) = g.geocode(adr)[1]\n self.save()\n return (self.lat, self.long)",
"def geocode_location(address):\n try:\n result = Geocoder.geocode(address)\n lat, lng = result[0].coordinates\n if result.city != \"San Francisco\": # Database only returns foodtrucks in San Francisco\n return None\n return lat, lng\n except:\n return None",
"def get_latlng(address):\n print(\"querying mapquest for\", address)\n\n try:\n result = geocoder.mapquest(\n address, key=os.environ.get(\"MAPQUEST_API_KEY\")\n ).json\n except Exception as e:\n print(e)\n # Exit if a match can't be found\n sys.exit(1)\n \n result = (result[\"lat\"], result[\"lng\"])\n print(\"found\", result)\n return result",
"def get_city_coordinates(location):\r\n\r\n url = \"https://geocode.xyz/\"+location+\"?json=1\"\r\n params = {}\r\n headers = {'Content-Type': 'application/json'}\r\n response = requests.get(url, headers=headers, params=params)\r\n\r\n if response.status_code == 200:\r\n return json.loads(response.content.decode(\"utf-8\"))\r\n else:\r\n print(\"*** ERROR! Response \", response.status_code, \" ***\")\r\n return None",
"def long_lat():\n MAPQUEST_API_KEY = 'bvd5kR5ANCpY295vIH5qgDEcpKZzeuKR'\n\n url = f'http://www.mapquestapi.com/geocoding/v1/address?key={MAPQUEST_API_KEY}&location=Babson%20College'\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n pprint(response_data['results'][0]['locations'][0]['latLng'])\n lat = response_data['results'][0]['locations'][0]['latLng']['lat']\n longitude = response_data['results'][0]['locations'][0]['latLng']['lng']\n return lat, longitude",
"def get_gps_from_address(adress):\n\n google_api_url = \"http://maps.google.com/maps/api/geocode/json?address=%s&sensor=false\" \\\n % adress.encode('utf8')\n\n data_google = json.loads(requests.get(google_api_url).content)\n if data_google.get('results'):\n lat = float(data_google['results'][0]['geometry']['location']['lat'])\n lng = float(data_google['results'][0]['geometry']['location']['lng'])\n else:\n lat = 48\n lng = 2\n return lat, lng",
"def get_lat_lng(address):\n\n g = geocoder.google(address)\n return g.latlng",
"def fetchGeocode(location, town, street):\n\n sanelocation = urllib.quote(location)\n\n response = urllib2.urlopen(\"https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s&sensor=false\" % (sanelocation, apikey))\n\n jsondict = json.load(response)\n #print (response[1])\n if jsondict['results'] == []:\n jsondict['results'] == [\"\"]\n #raise Exception(\"Empty results string: \" + jsondict['status'])\n\n if jsondict['results'] != []:\n data = jsondict['results'][0]\n \n viewport = ( data['geometry']['viewport']['southwest']['lat'],\n data['geometry']['viewport']['southwest']['lng'],\n data['geometry']['viewport']['northeast']['lat'],\n data['geometry']['viewport']['northeast']['lng'] )\n outdict = { 'formattedaddress': data['formatted_address'],\n 'latitude': data['geometry']['location']['lat'],\n 'longitude': data['geometry']['location']['lng'],\n 'locationtype': data['geometry']['location_type'],\n 'viewport': viewport }\n\n newdict = {'latitude': data['geometry']['location']['lat'],\n 'longitude': data['geometry']['location']['lng']}\n\n time.sleep(1)\n\n c.execute('''INSERT INTO locations (location, street, lat, long) VALUES (?, ?, ?, ?)''', (town, street, newdict['latitude'], newdict['longitude']))\n conn.commit()\n return newdict",
"def get_lat_lng_from_area_name(address):\n \n # Complete the address by adding \"Chicago, United States\"\n complete_address = address + \" \" + cst.CHICAGO_ADDRESS\n \n # get url to request\n url = get_url_from_address(complete_address)\n \n # get response from url\n r = requests.get(url)\n \n # parse response text to find string containing latitude and longitude\n lat_and_lng = r.text.partition(cst.LAT_LNG_HTML_POSITION_START)[2].partition(cst.LAT_LNG_HTML_POSITION_STOP)[0]\n \n # retrieve lat and lng\n lng, lat = lat_and_lng.split(',')[1:]\n \n return lat, lng",
"def get_google_location_data(self, lat, lon):\n url = \"http://maps.googleapis.com/maps/api/geocode/json?latlng={},\\\n{}&sensor=false\".format(lat, lon)\n v = requests.get(url)\n j = v.json()\n try:\n # Get the first set of 'address_components' from the JSON results\n components = j['results'][0]['address_components']\n country = town = None\n for c in components:\n if \"country\" in c['types']:\n country = c['long_name']\n if \"locality\" in c['types']:\n town = c['long_name']\n if \"administrative_area_level_1\" in c['types']:\n state = c['long_name']\n return \"{} {} {}\".format(town, state, country)\n except:\n # return \"None\"\n return None",
"def get_position(address):\n return GoogleGeocoder().geocode(address)[1]",
"def getlatlon():\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return [layout_a, layout_b, layout_c] in the form of cutlass_lib definitions
|
def cutlass_lib_layouts():
import cutlass_lib
return [
cutlass_lib.library.LayoutType.RowMajor,
cutlass_lib.library.LayoutType.ColumnMajor,
cutlass_lib.library.LayoutType.RowMajor,
]
|
[
"def cutlass_lib_layouts():\n import cutlass_lib\n\n return [\n cutlass_lib.library.LayoutType.RowMajor,\n cutlass_lib.library.LayoutType.RowMajor,\n cutlass_lib.library.LayoutType.RowMajor,\n ]",
"def _get_layouts(self):\r\n pass",
"def __get_library_layout(self):\n self.add_debug('Fetch library layout ...')\n\n converter = LibraryLayoutConverter(\n self.stock_sample_creation_iso.rack_layout,\n parent=self)\n self.__library_layout = converter.get_result()\n\n if self.__library_layout is None:\n msg = 'Error when trying to convert library layout.'\n self.add_error(msg)\n else:\n self.__library_sectors = QuadrantIterator.sort_into_sectors(\n working_layout=self.__library_layout,\n number_sectors=NUMBER_SECTORS)\n del_sectors = []\n for sector_index, positions in self.__library_sectors.iteritems():\n if len(positions) < 1: del_sectors.append(sector_index)\n for sector_index in del_sectors:\n del self.__library_sectors[sector_index]",
"def get_layout(soup):\n try:\n info = soup.find(class_='course-essential-info-top').find_all('li')\n except AttributeError:\n return {}\n layout = dict([child.text.split(': ') for child in info][:-1])\n return layout",
"def LAYOUT() -> Struct: # pylint: disable=invalid-name\n return MARKET_LAYOUT",
"def define_layouts(self, **sect_buttons):\n sects = {'l':bLEFT,'r':bRIGHT,'c':bCENTER,'j':bJUSTIFIED}\n button_types = '+-chamony'\n \n new_sections = dict( (str(k), []) for k in [bLEFT,bRIGHT,bCENTER,bJUSTIFIED] )\n \n for kee, val in sect_buttons.items():\n if not kee:\n continue\n if kee.isdigit():\n if int(kee) not in sects.values():\n raise ValueError, \"invalid key {!r}\".format(kee)\n elif kee[0].lower() in sects:\n kee = str(sects[kee[0].lower()])\n else:\n raise ValueError, \"invalid key {!r}\".format(kee)\n \n if not isinstance(val,(list,tuple)):\n raise TypeError, \"keyword arguments of define_layouts() should be lists or tuples.\"\n \n #leest = []\n #for b in val:\n # if not isinstance(b,basestring):\n # raise TypeError, \"button-types must be strings, not {!r}\".format(b.__class__.__name__)\n # if not b:\n # continue\n # if val[0].lower() not in button_types:\n # raise ValueError, \"unknown button-type {!r}\".format(val[0].lower())\n # if val[0].lower() == 'o':\n # val = 'm'\n # leest.append(val[0].lower())\n new_sections[kee] = self._parse_button_type(*val,check_against=new_sections)\n \n self.__sections = new_sections",
"def get_component_packages_list(self) -> List[ComponentDefinition]:",
"def _convert_shapes1(self, design):\n for _pp in design.component_instances:\n _libid = -1\n _devn = -1\n _libname = 'default'\n _pname = _pp.library_id\n if -1 != _pp.library_id.find(':'):\n _libname, _pname = _pp.library_id.split(':')\n \n for _li, _ll in enumerate(self.libraries):\n if _libname == _ll.name:\n _libid = _li\n for _di, _dd in enumerate(_ll.devsets[0].shapesets):\n if _pname == _dd.name:\n _devn = _di\n break\n break\n\n self.shapeheader.parts.append(Eagle.Part(\n name=_pp.instance_id, libid=_libid, devsetndx=_devn,\n symvar=1, techno=1)) # after OpenJSON all parts are split\n return",
"def buildLayout(preset, panel, sourceBoard, sourceArea):\n layout = preset[\"layout\"]\n framing = preset[\"framing\"]\n try:\n type = layout[\"type\"]\n if type == \"grid\":\n placementClass = getPlacementClass(layout[\"alternation\"])\n placer = placementClass(\n verSpace=layout[\"vspace\"],\n horSpace=layout[\"hspace\"],\n hbonewidth=layout[\"hbackbone\"],\n vbonewidth=layout[\"vbackbone\"],\n hboneskip=layout[\"hboneskip\"],\n vboneskip=layout[\"vboneskip\"],\n hbonefirst=layout[\"hbonefirst\"],\n vbonefirst=layout[\"vbonefirst\"])\n substrates = panel.makeGrid(\n boardfile=sourceBoard, sourceArea=sourceArea,\n rows=layout[\"rows\"], cols=layout[\"cols\"], destination=VECTOR2I(0, 0),\n rotation=layout[\"rotation\"], placer=placer,\n netRenamePattern=layout[\"renamenet\"], refRenamePattern=layout[\"renameref\"],\n bakeText=layout[\"baketext\"])\n framingSubstrates = dummyFramingSubstrate(substrates, preset)\n panel.buildPartitionLineFromBB(framingSubstrates)\n backboneCuts = buildBackBone(layout, panel, substrates, framing)\n return substrates, framingSubstrates, backboneCuts\n if type == \"plugin\":\n lPlugin = layout[\"code\"](preset, layout[\"arg\"], layout[\"renamenet\"],\n layout[\"renameref\"], layout[\"vspace\"],\n layout[\"hspace\"], layout[\"rotation\"])\n substrates = lPlugin.buildLayout(panel, sourceBoard, sourceArea)\n framingSubstrates = dummyFramingSubstrate(substrates, preset)\n lPlugin.buildPartitionLine(panel, framingSubstrates)\n backboneCuts = lPlugin.buildExtraCuts(panel)\n return substrates, framingSubstrates, backboneCuts\n\n raise PresetError(f\"Unknown type '{type}' of layout specification.\")\n except KeyError as e:\n raise PresetError(f\"Missing parameter '{e}' in section 'layout'\")",
"def gather_pelayout(case):\n ###############################################################################\n ntasks = {}\n nthreads = {}\n rootpes = {}\n pstride = {}\n comp_classes = case.get_values(\"COMP_CLASSES\")\n\n for comp in comp_classes:\n ntasks[comp] = int(case.get_value(\"NTASKS_\" + comp))\n nthreads[comp] = int(case.get_value(\"NTHRDS_\" + comp))\n rootpes[comp] = int(case.get_value(\"ROOTPE_\" + comp))\n pstride[comp] = int(case.get_value(\"PSTRID_\" + comp))\n # End for\n return ntasks, nthreads, rootpes, pstride",
"def get_base_layout(figures):\n if not isinstance(figures, list):\n raise TypeError(\"Invalid figures '{0}'. \"\n \"It should be list.\"\n .format(figures))\n\n layout = {}\n for figure in figures:\n if not figure['layout']:\n raise Exception(\"Figure does not have 'layout'.\")\n\n for key, value in figure['layout'].items():\n layout[key] = value\n\n return layout",
"def _split_definitions(defs: List[Any]):\n constants = []\n functions = []\n aliases = []\n slots = None\n classes = []\n for d in defs:\n if isinstance(d, pytd.Class):\n classes.append(d)\n elif isinstance(d, pytd.Constant):\n if d.name == \"__slots__\":\n pass # ignore definitions of __slots__ as a type\n else:\n constants.append(d)\n elif isinstance(d, function.NameAndSig):\n functions.append(d)\n elif isinstance(d, pytd.Alias):\n aliases.append(d)\n elif isinstance(d, types.SlotDecl):\n if slots is not None:\n raise _ParseError(\"Duplicate __slots__ declaration\")\n slots = d.slots\n elif isinstance(d, types.Ellipsis):\n pass\n elif isinstance(d, astlib.Expr):\n raise _ParseError(\"Unexpected expression\").at(d)\n else:\n msg = \"Unexpected definition\"\n lineno = None\n if isinstance(d, astlib.AST):\n lineno = getattr(d, \"lineno\", None)\n raise _ParseError(msg, line=lineno)\n return constants, functions, aliases, slots, classes",
"def write_layout(self):\n # Welcome message\n if self.verbose > 0:\n print(\"[info] Generating layout in {0}...\".format(self.layoutdir))\n\n # Top selection panel\n indices = [\n \"\"\"<li><a href=\"{{{{ pathto('generated/{0}') }}}}\">\"\"\"\n \"\"\"{1}</a></li>\"\"\".format(x, self.title_for(x))\n for x in self.module_names]\n\n # Carousel items\n carousel_items = [item for item in os.listdir(self.carouselpath)]\n if len(carousel_items) == 0:\n raise IOError(\"No data found in folder '{0}'.\".format(\n self.carouselpath))\n images = []\n indicators = []\n for cnt, item in enumerate(carousel_items):\n if cnt == 0:\n indicators.append(\n \"<li data-target='#examples_carousel' data-slide-to='0' \"\n \"class='active'></li>\")\n images.append(\n \"\"\"<div class=\"active item\">\"\"\"\n \"\"\"<a href=\"{{pathto('index')}}\">\"\"\"\n \"\"\"<img src=\"{{ pathto('_static/carousel/%s', 1) }}\">\"\"\"\n \"\"\"</div></a>\"\"\" % item)\n else:\n indicators.append(\n \"<li data-target='#examples_carousel' data-slide-to='{0}' \"\n \"</li>\".format(cnt))\n images.append(\n \"\"\"<div class=\"item\"><a href=\"{{pathto('index')}}\">\"\"\"\n \"\"\"<img src=\"{{ pathto('_static/carousel/%s', 1) }}\">\"\"\"\n \"\"\"</a></div>\"\"\" % item)\n\n # Create layout maping\n pysphinxdoc_info = {}\n info_file = os.path.join(os.path.dirname(__file__), \"info.py\")\n with open(info_file) as open_file:\n exec(open_file.read(), pysphinxdoc_info)\n layout_info = {\n \"NAME_LOWER\": self.root_module_name,\n \"NAME_UPPER\": self.root_module_name.upper(),\n \"INDEX\": \"\\n\".join(indices),\n \"CAROUSEL_INDICATORS\": \"\\n\".join(indicators),\n \"CAROUSEL_IMAGES\": \"\\n\".join(images),\n \"DESCRIPTION\": self.rst2html(self.release_info[\"DESCRIPTION\"]),\n \"SUMMARY\": self.rst2html(self.release_info[\"SUMMARY\"]),\n \"LOGO\": self.root_module_name,\n \"URL\": self.release_info[\"URL\"],\n \"EXTRAURL\": (self.release_info.get(\"EXTRAURL\") or\n pysphinxdoc_info[\"URL\"]),\n \"EXTRANAME\": self.release_info.get(\"EXTRANAME\") or \"PYSPHINXDOC\"\n }\n\n # Start writting the layout\n template_layout_file = os.path.join(\n os.path.dirname(__file__), \"resources\", \"layout.html\")\n layout_file = os.path.join(self.layoutdir, \"layout.html\")\n self.write_from_template(layout_file, template_layout_file,\n layout_info)",
"def parse_layout(layout):\n for lt_obj in layout:\n print(lt_obj.__class__.__name__)\n print(lt_obj.bbox)\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n print(lt_obj.get_text())\n elif isinstance(lt_obj, LTFigure):\n parse_layout(lt_obj) # Recursive",
"def pyre_loadLayout(self):\n # access the factory\n from .Layout import Layout\n\n # build one and return it\n return Layout(name=f\"{self.pyre_name}.layout\")",
"def _get_macro_def(self, macro):\n prims_def = ''\n for primitive in macro.primitives:\n shape = primitive.shape\n exposure = primitive.is_additive\n rotation = shape.rotation #or primitive.rotation\n rotation = int((2 - rotation) * 180 or 0)\n\n if isinstance(shape, Circle):\n mods = [SHAPE_TAGS['circle']['int'],\n exposure,\n self._convert_units_str(shape.radius * 2),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y)]\n elif isinstance(shape, Rectangle) and shape.is_centered:\n mods = [SHAPE_TAGS['center_rectangle']['int'],\n exposure,\n self._convert_units_str(shape.width),\n self._convert_units_str(shape.height),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n rotation if not shape.flip_horizontal else -rotation]\n elif isinstance(shape, Rectangle) and not shape.is_centered:\n mods = [SHAPE_TAGS['rectangle']['int'],\n exposure,\n self._convert_units_str(shape.width),\n self._convert_units_str(shape.height),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n rotation]\n elif isinstance(shape, Polygon):\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.points]\n v_args = [vertices[i / 2][i % 2]\n for i in range(len(vertices) * 2)]\n mods = [SHAPE_TAGS['polygon']['int'],\n exposure] + v_args + [rotation]\n elif isinstance(shape, RegularPolygon):\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.vertices]\n mods = [SHAPE_TAGS['reg_polygon']['int'],\n exposure,\n vertices,\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n rotation]\n elif isinstance(shape, Moire):\n mods = [SHAPE_TAGS['moire']['int'],\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n self._convert_units_str(shape.ring_thickness),\n self._convert_units_str(shape.gap_thickness),\n self._convert_units_str(shape.max_rings),\n self._convert_units_str(shape.hair_thickness),\n self._convert_units_str(shape.hair_length),\n rotation]\n elif isinstance(shape, Thermal):\n mods = [SHAPE_TAGS['thermal']['int'],\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n self._convert_units_str(shape.inner_diameter),\n self._convert_units_str(shape.gap_thickness),\n rotation]\n mods = ','.join(str(m) for m in mods)\n prim_def = PRIMITIVE.format(mods=mods)\n prims_def += LINE.format(prim_def)\n macro_def = MACRO.format(name=macro.name,\n primitives=prims_def.strip())\n return LINE.format(macro_def)",
"def _get_repr(self, layoutdir):\n representations = {}\n layout_files = glob.glob(os.path.join(layoutdir, \"*.pkl\"))\n layout_files += glob.glob(os.path.join(layoutdir, \"*.cw\"))\n for path in layout_files:\n basename = os.path.basename(path).replace(\".pkl\", \"\")\n basename = basename.replace(\".cw\", \"\")\n project, name, timestamp = basename.split(\"_\")\n if project not in representations:\n representations[project] = {}\n representations[project].setdefault(name, []).append(\n {\"date\": timestamp, \"path\": path})\n for project, project_data in representations.items():\n for name, name_data in project_data.items():\n name_data.sort(key=lambda x: datetime.datetime.strptime(\n x[\"date\"], \"%Y-%m-%d\"))\n return representations",
"async def test_merge_split_component_definition(hass: HomeAssistant) -> None:\n packages = {\n \"pack_1\": {\"light one\": {\"l1\": None}},\n \"pack_2\": {\"light two\": {\"l2\": None}, \"light three\": {\"l3\": None}},\n }\n config = {config_util.CONF_CORE: {config_util.CONF_PACKAGES: packages}}\n await config_util.merge_packages_config(hass, config, packages)\n\n assert len(config) == 4\n assert len(config[\"light one\"]) == 1\n assert len(config[\"light two\"]) == 1\n assert len(config[\"light three\"]) == 1",
"def ClipsConstruct(self):\n # Construct definition\n construct = \"(deftemplate \" + self._templateName + \"\\n\"\n\n # Slots\n for slot in self._slots:\n construct += \"(slot \" + slot.Name + \")\\n\"\n\n # Multislots\n for multislot in self._multislots:\n construct += \"(multislot \" + multislot.Name + \")\\n\"\n\n # Finish construct\n construct += \")\"\n return construct"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return [layout_a, layout_b, layout_c] in the form of cutlass_lib definitions
|
def cutlass_lib_layouts():
import cutlass_lib
return [
cutlass_lib.library.LayoutType.RowMajor,
cutlass_lib.library.LayoutType.RowMajor,
cutlass_lib.library.LayoutType.RowMajor,
]
|
[
"def cutlass_lib_layouts():\n import cutlass_lib\n\n return [\n cutlass_lib.library.LayoutType.RowMajor,\n cutlass_lib.library.LayoutType.ColumnMajor,\n cutlass_lib.library.LayoutType.RowMajor,\n ]",
"def _get_layouts(self):\r\n pass",
"def __get_library_layout(self):\n self.add_debug('Fetch library layout ...')\n\n converter = LibraryLayoutConverter(\n self.stock_sample_creation_iso.rack_layout,\n parent=self)\n self.__library_layout = converter.get_result()\n\n if self.__library_layout is None:\n msg = 'Error when trying to convert library layout.'\n self.add_error(msg)\n else:\n self.__library_sectors = QuadrantIterator.sort_into_sectors(\n working_layout=self.__library_layout,\n number_sectors=NUMBER_SECTORS)\n del_sectors = []\n for sector_index, positions in self.__library_sectors.iteritems():\n if len(positions) < 1: del_sectors.append(sector_index)\n for sector_index in del_sectors:\n del self.__library_sectors[sector_index]",
"def get_layout(soup):\n try:\n info = soup.find(class_='course-essential-info-top').find_all('li')\n except AttributeError:\n return {}\n layout = dict([child.text.split(': ') for child in info][:-1])\n return layout",
"def LAYOUT() -> Struct: # pylint: disable=invalid-name\n return MARKET_LAYOUT",
"def define_layouts(self, **sect_buttons):\n sects = {'l':bLEFT,'r':bRIGHT,'c':bCENTER,'j':bJUSTIFIED}\n button_types = '+-chamony'\n \n new_sections = dict( (str(k), []) for k in [bLEFT,bRIGHT,bCENTER,bJUSTIFIED] )\n \n for kee, val in sect_buttons.items():\n if not kee:\n continue\n if kee.isdigit():\n if int(kee) not in sects.values():\n raise ValueError, \"invalid key {!r}\".format(kee)\n elif kee[0].lower() in sects:\n kee = str(sects[kee[0].lower()])\n else:\n raise ValueError, \"invalid key {!r}\".format(kee)\n \n if not isinstance(val,(list,tuple)):\n raise TypeError, \"keyword arguments of define_layouts() should be lists or tuples.\"\n \n #leest = []\n #for b in val:\n # if not isinstance(b,basestring):\n # raise TypeError, \"button-types must be strings, not {!r}\".format(b.__class__.__name__)\n # if not b:\n # continue\n # if val[0].lower() not in button_types:\n # raise ValueError, \"unknown button-type {!r}\".format(val[0].lower())\n # if val[0].lower() == 'o':\n # val = 'm'\n # leest.append(val[0].lower())\n new_sections[kee] = self._parse_button_type(*val,check_against=new_sections)\n \n self.__sections = new_sections",
"def get_component_packages_list(self) -> List[ComponentDefinition]:",
"def _convert_shapes1(self, design):\n for _pp in design.component_instances:\n _libid = -1\n _devn = -1\n _libname = 'default'\n _pname = _pp.library_id\n if -1 != _pp.library_id.find(':'):\n _libname, _pname = _pp.library_id.split(':')\n \n for _li, _ll in enumerate(self.libraries):\n if _libname == _ll.name:\n _libid = _li\n for _di, _dd in enumerate(_ll.devsets[0].shapesets):\n if _pname == _dd.name:\n _devn = _di\n break\n break\n\n self.shapeheader.parts.append(Eagle.Part(\n name=_pp.instance_id, libid=_libid, devsetndx=_devn,\n symvar=1, techno=1)) # after OpenJSON all parts are split\n return",
"def buildLayout(preset, panel, sourceBoard, sourceArea):\n layout = preset[\"layout\"]\n framing = preset[\"framing\"]\n try:\n type = layout[\"type\"]\n if type == \"grid\":\n placementClass = getPlacementClass(layout[\"alternation\"])\n placer = placementClass(\n verSpace=layout[\"vspace\"],\n horSpace=layout[\"hspace\"],\n hbonewidth=layout[\"hbackbone\"],\n vbonewidth=layout[\"vbackbone\"],\n hboneskip=layout[\"hboneskip\"],\n vboneskip=layout[\"vboneskip\"],\n hbonefirst=layout[\"hbonefirst\"],\n vbonefirst=layout[\"vbonefirst\"])\n substrates = panel.makeGrid(\n boardfile=sourceBoard, sourceArea=sourceArea,\n rows=layout[\"rows\"], cols=layout[\"cols\"], destination=VECTOR2I(0, 0),\n rotation=layout[\"rotation\"], placer=placer,\n netRenamePattern=layout[\"renamenet\"], refRenamePattern=layout[\"renameref\"],\n bakeText=layout[\"baketext\"])\n framingSubstrates = dummyFramingSubstrate(substrates, preset)\n panel.buildPartitionLineFromBB(framingSubstrates)\n backboneCuts = buildBackBone(layout, panel, substrates, framing)\n return substrates, framingSubstrates, backboneCuts\n if type == \"plugin\":\n lPlugin = layout[\"code\"](preset, layout[\"arg\"], layout[\"renamenet\"],\n layout[\"renameref\"], layout[\"vspace\"],\n layout[\"hspace\"], layout[\"rotation\"])\n substrates = lPlugin.buildLayout(panel, sourceBoard, sourceArea)\n framingSubstrates = dummyFramingSubstrate(substrates, preset)\n lPlugin.buildPartitionLine(panel, framingSubstrates)\n backboneCuts = lPlugin.buildExtraCuts(panel)\n return substrates, framingSubstrates, backboneCuts\n\n raise PresetError(f\"Unknown type '{type}' of layout specification.\")\n except KeyError as e:\n raise PresetError(f\"Missing parameter '{e}' in section 'layout'\")",
"def gather_pelayout(case):\n ###############################################################################\n ntasks = {}\n nthreads = {}\n rootpes = {}\n pstride = {}\n comp_classes = case.get_values(\"COMP_CLASSES\")\n\n for comp in comp_classes:\n ntasks[comp] = int(case.get_value(\"NTASKS_\" + comp))\n nthreads[comp] = int(case.get_value(\"NTHRDS_\" + comp))\n rootpes[comp] = int(case.get_value(\"ROOTPE_\" + comp))\n pstride[comp] = int(case.get_value(\"PSTRID_\" + comp))\n # End for\n return ntasks, nthreads, rootpes, pstride",
"def get_base_layout(figures):\n if not isinstance(figures, list):\n raise TypeError(\"Invalid figures '{0}'. \"\n \"It should be list.\"\n .format(figures))\n\n layout = {}\n for figure in figures:\n if not figure['layout']:\n raise Exception(\"Figure does not have 'layout'.\")\n\n for key, value in figure['layout'].items():\n layout[key] = value\n\n return layout",
"def _split_definitions(defs: List[Any]):\n constants = []\n functions = []\n aliases = []\n slots = None\n classes = []\n for d in defs:\n if isinstance(d, pytd.Class):\n classes.append(d)\n elif isinstance(d, pytd.Constant):\n if d.name == \"__slots__\":\n pass # ignore definitions of __slots__ as a type\n else:\n constants.append(d)\n elif isinstance(d, function.NameAndSig):\n functions.append(d)\n elif isinstance(d, pytd.Alias):\n aliases.append(d)\n elif isinstance(d, types.SlotDecl):\n if slots is not None:\n raise _ParseError(\"Duplicate __slots__ declaration\")\n slots = d.slots\n elif isinstance(d, types.Ellipsis):\n pass\n elif isinstance(d, astlib.Expr):\n raise _ParseError(\"Unexpected expression\").at(d)\n else:\n msg = \"Unexpected definition\"\n lineno = None\n if isinstance(d, astlib.AST):\n lineno = getattr(d, \"lineno\", None)\n raise _ParseError(msg, line=lineno)\n return constants, functions, aliases, slots, classes",
"def write_layout(self):\n # Welcome message\n if self.verbose > 0:\n print(\"[info] Generating layout in {0}...\".format(self.layoutdir))\n\n # Top selection panel\n indices = [\n \"\"\"<li><a href=\"{{{{ pathto('generated/{0}') }}}}\">\"\"\"\n \"\"\"{1}</a></li>\"\"\".format(x, self.title_for(x))\n for x in self.module_names]\n\n # Carousel items\n carousel_items = [item for item in os.listdir(self.carouselpath)]\n if len(carousel_items) == 0:\n raise IOError(\"No data found in folder '{0}'.\".format(\n self.carouselpath))\n images = []\n indicators = []\n for cnt, item in enumerate(carousel_items):\n if cnt == 0:\n indicators.append(\n \"<li data-target='#examples_carousel' data-slide-to='0' \"\n \"class='active'></li>\")\n images.append(\n \"\"\"<div class=\"active item\">\"\"\"\n \"\"\"<a href=\"{{pathto('index')}}\">\"\"\"\n \"\"\"<img src=\"{{ pathto('_static/carousel/%s', 1) }}\">\"\"\"\n \"\"\"</div></a>\"\"\" % item)\n else:\n indicators.append(\n \"<li data-target='#examples_carousel' data-slide-to='{0}' \"\n \"</li>\".format(cnt))\n images.append(\n \"\"\"<div class=\"item\"><a href=\"{{pathto('index')}}\">\"\"\"\n \"\"\"<img src=\"{{ pathto('_static/carousel/%s', 1) }}\">\"\"\"\n \"\"\"</a></div>\"\"\" % item)\n\n # Create layout maping\n pysphinxdoc_info = {}\n info_file = os.path.join(os.path.dirname(__file__), \"info.py\")\n with open(info_file) as open_file:\n exec(open_file.read(), pysphinxdoc_info)\n layout_info = {\n \"NAME_LOWER\": self.root_module_name,\n \"NAME_UPPER\": self.root_module_name.upper(),\n \"INDEX\": \"\\n\".join(indices),\n \"CAROUSEL_INDICATORS\": \"\\n\".join(indicators),\n \"CAROUSEL_IMAGES\": \"\\n\".join(images),\n \"DESCRIPTION\": self.rst2html(self.release_info[\"DESCRIPTION\"]),\n \"SUMMARY\": self.rst2html(self.release_info[\"SUMMARY\"]),\n \"LOGO\": self.root_module_name,\n \"URL\": self.release_info[\"URL\"],\n \"EXTRAURL\": (self.release_info.get(\"EXTRAURL\") or\n pysphinxdoc_info[\"URL\"]),\n \"EXTRANAME\": self.release_info.get(\"EXTRANAME\") or \"PYSPHINXDOC\"\n }\n\n # Start writting the layout\n template_layout_file = os.path.join(\n os.path.dirname(__file__), \"resources\", \"layout.html\")\n layout_file = os.path.join(self.layoutdir, \"layout.html\")\n self.write_from_template(layout_file, template_layout_file,\n layout_info)",
"def parse_layout(layout):\n for lt_obj in layout:\n print(lt_obj.__class__.__name__)\n print(lt_obj.bbox)\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n print(lt_obj.get_text())\n elif isinstance(lt_obj, LTFigure):\n parse_layout(lt_obj) # Recursive",
"def pyre_loadLayout(self):\n # access the factory\n from .Layout import Layout\n\n # build one and return it\n return Layout(name=f\"{self.pyre_name}.layout\")",
"def _get_macro_def(self, macro):\n prims_def = ''\n for primitive in macro.primitives:\n shape = primitive.shape\n exposure = primitive.is_additive\n rotation = shape.rotation #or primitive.rotation\n rotation = int((2 - rotation) * 180 or 0)\n\n if isinstance(shape, Circle):\n mods = [SHAPE_TAGS['circle']['int'],\n exposure,\n self._convert_units_str(shape.radius * 2),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y)]\n elif isinstance(shape, Rectangle) and shape.is_centered:\n mods = [SHAPE_TAGS['center_rectangle']['int'],\n exposure,\n self._convert_units_str(shape.width),\n self._convert_units_str(shape.height),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n rotation if not shape.flip_horizontal else -rotation]\n elif isinstance(shape, Rectangle) and not shape.is_centered:\n mods = [SHAPE_TAGS['rectangle']['int'],\n exposure,\n self._convert_units_str(shape.width),\n self._convert_units_str(shape.height),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n rotation]\n elif isinstance(shape, Polygon):\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.points]\n v_args = [vertices[i / 2][i % 2]\n for i in range(len(vertices) * 2)]\n mods = [SHAPE_TAGS['polygon']['int'],\n exposure] + v_args + [rotation]\n elif isinstance(shape, RegularPolygon):\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.vertices]\n mods = [SHAPE_TAGS['reg_polygon']['int'],\n exposure,\n vertices,\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n rotation]\n elif isinstance(shape, Moire):\n mods = [SHAPE_TAGS['moire']['int'],\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n self._convert_units_str(shape.ring_thickness),\n self._convert_units_str(shape.gap_thickness),\n self._convert_units_str(shape.max_rings),\n self._convert_units_str(shape.hair_thickness),\n self._convert_units_str(shape.hair_length),\n rotation]\n elif isinstance(shape, Thermal):\n mods = [SHAPE_TAGS['thermal']['int'],\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n self._convert_units_str(shape.inner_diameter),\n self._convert_units_str(shape.gap_thickness),\n rotation]\n mods = ','.join(str(m) for m in mods)\n prim_def = PRIMITIVE.format(mods=mods)\n prims_def += LINE.format(prim_def)\n macro_def = MACRO.format(name=macro.name,\n primitives=prims_def.strip())\n return LINE.format(macro_def)",
"def _get_repr(self, layoutdir):\n representations = {}\n layout_files = glob.glob(os.path.join(layoutdir, \"*.pkl\"))\n layout_files += glob.glob(os.path.join(layoutdir, \"*.cw\"))\n for path in layout_files:\n basename = os.path.basename(path).replace(\".pkl\", \"\")\n basename = basename.replace(\".cw\", \"\")\n project, name, timestamp = basename.split(\"_\")\n if project not in representations:\n representations[project] = {}\n representations[project].setdefault(name, []).append(\n {\"date\": timestamp, \"path\": path})\n for project, project_data in representations.items():\n for name, name_data in project_data.items():\n name_data.sort(key=lambda x: datetime.datetime.strptime(\n x[\"date\"], \"%Y-%m-%d\"))\n return representations",
"async def test_merge_split_component_definition(hass: HomeAssistant) -> None:\n packages = {\n \"pack_1\": {\"light one\": {\"l1\": None}},\n \"pack_2\": {\"light two\": {\"l2\": None}, \"light three\": {\"l3\": None}},\n }\n config = {config_util.CONF_CORE: {config_util.CONF_PACKAGES: packages}}\n await config_util.merge_packages_config(hass, config, packages)\n\n assert len(config) == 4\n assert len(config[\"light one\"]) == 1\n assert len(config[\"light two\"]) == 1\n assert len(config[\"light three\"]) == 1",
"def ClipsConstruct(self):\n # Construct definition\n construct = \"(deftemplate \" + self._templateName + \"\\n\"\n\n # Slots\n for slot in self._slots:\n construct += \"(slot \" + slot.Name + \")\\n\"\n\n # Multislots\n for multislot in self._multislots:\n construct += \"(multislot \" + multislot.Name + \")\\n\"\n\n # Finish construct\n construct += \")\"\n return construct"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gate inputs, then update gate settings according to c.
|
def call(
self, c: nd.NumDict[feature], *inputs: nd.NumDict
) -> Tuple[nd.NumDict, ...]:
gs = [self.store.isolate(key=k) for k in self.flags]
self.update(c)
return (self.store, *(x.mul(g) for g, x in zip(gs, inputs)))
|
[
"def update_and_send(self,inputs):\n assert(self.running == True)\n\n # TODO: figure out a better way to get state indexes\n # float64 vt 0\n # float64 alpha 1\n # float64 beta 2\n # float64 phi 3\n # float64 theta 4\n # float64 psi 5\n # float64 p 6\n # float64 q 7\n # float64 r 8\n # float64 pn 9\n # float64 pe 10\n # float64 h 11\n # float64 pow 12\n # float64 delta_e 13\n # float64 delta_a 14\n # float64 delta_r 15\n # float64 throttle 16\n self.vcas = inputs[0]\n self.alpha = inputs[1]\n self.beta = inputs[2]\n self.phi = inputs[3]\n self.theta = inputs[4]\n self.psi = inputs[5]\n self.phidot = inputs[6]\n self.thetadot = inputs[7]\n self.psidot = inputs[8]\n pn = inputs[9]\n pe = inputs[10]\n \n self.agl = inputs[11]/self.FG_FT_IN_M\n self.altitude = self.h0 + self.agl\n pu = self.agl\n lat, lon, _alt = pm.enu2geodetic(pe, pn, pu, self.lat0, self.lon0, self.h0, ell=None, deg=False)\n self.latitude = lat\n self.longitude = lon\n\n self.eng_state = [1,0,0,0] # Dummy values\n self.rpm = [6000,1,0,0,] # Dummy values\n\n self.elevator = -1.0*inputs[13]*self.elevator_max\n self.rudder = inputs[15]*self.runnder_max\n self.left_aileron = inputs[14]*self.aileron_max\n self.right_aileron = self.left_aileron\n\n # Send FDM\n self.sock.sendto(self.pack_to_struct(), (self.FG_IP, self.FG_PORT))\n\n # Send Control surfaces\n # flaperons is in radians:\n # surface-positions/leftrad2\n # surface-positions/leftrad\n # surface-positions/rightrad2\n # surface-positions/rightrad\n val = struct.pack('!fffff', -1*self.elevator,\n self.left_aileron, self.left_aileron, self.right_aileron, self.right_aileron)\n self.sock.sendto(val, (self.FG_IP, self.FG_GENERIC_PORT))",
"def gate(self, *args, **kwargs):\n for i in self.fcmdict:\n self.fcmdict[i].gate(*args, **kwargs)\n return self",
"def test_controlled_by_gates_fusion(backend):\n c = Circuit(4)\n c.add((gates.H(i) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n c.add((gates.RY(i, theta=0.5678) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())",
"def apply_gate(self, gate):\n\t\timport numpy as np\n\t\tif not self._is_unitary(gate):\n\t\t\traise ValueError(\"Gate must be unitary\")\n\n\t\tstate = np.array([self.alpha, self.beta])\n\n\t\tnew_state = tuple(np.matmul(np.array(gate), state))\n\n\t\tself.alpha, self.beta = new_state\n\n\t\treturn new_state",
"def apply_gate(state, gate, qubits, nqubits, target, omp_num_threads=get_threads()):\n return custom_module.apply_gate(state, gate, qubits, nqubits, target, omp_num_threads)",
"def apply_state(self, state):",
"def updateCircuit(circuit,\n verbose = False):\n if verbose:\n Warning(\"Currently only replaces to h,s,x,y,z gates\")\n possible_gates = list('hsxyz')\n \n # Convert circuit to qasm string so we can use string processing to switch\n qasm = circuit.qasm().split(';')\n \n \n # Make sure the gate you choose is not a cx gate\n gate_to_switch = np.random.randint(3,len(qasm)-1)\n while qasm[gate_to_switch][1:3] == 'cx' or qasm[gate_to_switch][1:3] == 'ba':\n gate_to_switch = np.random.randint(3,len(qasm)-1)\n \n # Get a new gate and make sure it's different form the current gate\n this_gate = qasm[gate_to_switch][1]\n new_gate = np.random.choice(possible_gates)\n while new_gate == this_gate:\n new_gate = np.random.choice(possible_gates)\n \n qasm[gate_to_switch] = '\\n' + new_gate + ' ' + qasm[gate_to_switch].split(' ')[1]\n \n qasm = ';'.join(qasm) \n circuit = qk.QuantumCircuit.from_qasm_str(qasm)\n \n if verbose:\n print(circuit)\n \n return circuit",
"def set_input(self, inputs):\n for i, node in enumerate(self.input_nodes):\n node.activation_level = inputs[i]",
"def apply_state(self, state):\n if len(state) > len(self.inputs): raise TypeError(\"Too many input states specified\")\n inputs = self.inputs.copy()\n self.inputs = []\n for i,s in enumerate(state):\n v = inputs[i]\n if s == '/': \n self.inputs.append(v)\n continue\n if s in ('0', '1'):\n self.scalar.add_power(-1)\n self.set_type(v, 2)\n if s == '1':\n self.set_phase(v, Fraction(1))\n elif s in ('+', '-'):\n self.scalar.add_power(-1)\n self.set_type(v, 1)\n if s == '-':\n self.set_phase(v, Fraction(1))\n else:\n raise TypeError(\"Unknown input state \" + s)",
"def update(self, inputs):\n\t\t\n\t\tif len(inputs) != self.ni - 1:\n\t\t\traise ValueError, \"wrong number of inputs\"\n\t\t\n\t\t# set input node activations\n\t\tfor c, v in enumerate(inputs):\n\t\t\tself.ai[c] = node(v)\n\t\t\t\n\t\t# set hidden node activations\n\t\tfor c, n in enumerate(self.ah):\n\t\t\tn.update(np.dot(self.wi[c], self.ai)\n\t\t\t\t\t\t+ np.dot(self.wh[c], self.ah))\n\t\t\n\t\t# set output node activations\n\t\tfor c, n in enumerate(self.ao):\n\t\t\tn.update(np.dot(self.wo[c], self.ah))\n\t\t\n\t\treturn self.ao",
"def _compute_control_inputs(self, traj ):\n\n r = traj.u.copy() # reference is input of combined sys\n npts = traj.t.shape[0]\n u = np.zeros([npts ,self.cds.plant.m])\n\n # Compute internal input signal_proc\n for i in range(npts):\n\n ri = r[i,:]\n yi = traj.y[i,:]\n xi = traj.x[i,:]\n ti = traj.t[i]\n\n # extract internal controller states\n xi,zi = self.cds._split_states( xi ) \n\n ui = self.cds.controller.c( zi, yi , ri , ti )\n\n u[i,:] = ui\n\n return u",
"def mutate(self, c):\n choice = random.randint(0,3)\n\n if (choice == 0 or choice == 1) and len(c) > 1:\n for i in range(0, self.max_removals_per_mutation):\n try:\n c.remove(random.randint(0, len(c)-1)) # Randomly remove a gate\n except ValueError:\n print \"ERROR: trying to remove more gates than there are in the Cascade\"\n print \"You should try decreasing the GA's self.max_removals_per_mutation parameter\"\n exit()\n elif choice == 2:\n c.append(self.random_toffoli())\n else:\n # or replace a gate with a random gate\n i = len(c)-1\n index = random.randint(0, i)\n c.remove(index)\n if index == i:\n c.append(self.random_toffoli())\n else:\n c.insert(self.random_toffoli(), index)",
"def apply_input(self, input_):\n self._state = random.choice(list(self.delta[self._state][input_]))",
"def updateActivity(self):\n def nt_in_2_potential(totals_in):\n amount = sum(totals_in.values()) \n potential = amount\n return potential\n \n def potential_2_nt_out(nt_type, potential):\n amount = potential\n return amount\n \n totals_in = {}\n #for each neurotransmitter accepted\n for nt in self.nt_accepted: \n totals_in[nt] = 0.0\n #for each input\n print 'Right before Counter forloop.'\n for compartment in self.inputs.keys():\n if compartment: \n w = self.inputs[compartment] \n print 'compartment:', compartment\n print 'freq/weight:', w\n #retrieve input amount\n output = compartment.getOutput(nt)\n print 'output:',nt,'is', output \n if output:\n print 'Updating totals_in.'\n totals_in[nt] += w*compartment.getOutput(nt)\n print 'totals_in:', totals_in[nt]\n #scale total input to input/point\n totals_in[nt] = totals_in[nt]/len(self.points)\n #Now calculate new potential per point\n potential = nt_in_2_potential(totals_in)\n #Then calculate each new output amout\n #for each nt output\n for nt in NEUROTRANSMITTERS:\n if self.outputs.isOutput(nt): \n new_amount = potential_2_nt_out(nt, potential)\n self.outputs.setAmount(nt, new_amount)\n #update outputs",
"def _compute_control_inputs(self, traj):\n\n r = traj.u.copy() # reference is input of combined sys\n npts = traj.t.shape[0]\n u = np.zeros([npts, self.cds.plant.m])\n\n # Compute internal input\n for i in range(npts):\n\n ri = r[i,:]\n yi = traj.y[i,:]\n ti = traj.t[i]\n\n ui = self.cds.controller.c( yi , ri , ti )\n\n u[i,:] = ui\n\n return u",
"def update(self, inputs):\n if len(inputs) != len(self.values.keys()) - 1:\n raise ValueError(\"Inputs do not match input layer nodes.\")\n\n # set the node values from the inputs\n for input_num in range(len(inputs)):\n self.values[input_num + 1] = inputs[input_num]\n\n # propogate the update to the next layer\n self._next_layer.update(self)",
"def test_set_parameters_fusion(backend):\n c = Circuit(2)\n c.add(gates.RX(0, theta=0.1234))\n c.add(gates.RX(1, theta=0.1234))\n c.add(gates.CNOT(0, 1))\n c.add(gates.RY(0, theta=0.1234))\n c.add(gates.RY(1, theta=0.1234))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())\n\n c.set_parameters(4 * [0.4321])\n fused_c.set_parameters(4 * [0.4321])\n np.testing.assert_allclose(fused_c(), c())",
"def set_calibration_input_params(self):\n self.cparams.parameters['blur'] = self.calibration_params['blur']\n self.cparams.parameters['morph'] = self.calibration_params['morph']\n self.cparams.parameters['H'] = self.calibration_params['H']\n self.cparams.parameters['S'] = self.calibration_params['S']\n self.cparams.parameters['V'] = self.calibration_params['V']",
"def input_data(self, inputs):\n for i, x in enumerate(inputs):\n self.activations[0][i] = x"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Process a book element into the database.
|
def process_book_element(book_element):
book, created = Book.objects.get_or_create(pk=book_element.get('id'))
book.title = book_element.findtext('title')
book.description = book_element.findtext('description')
aliases = {}
same_aliases = False
book_aliases = {}
for alias in book.aliases.values():
book_aliases[alias['value']] = True
for alias in book_element.xpath('aliases/alias'):
scheme = alias.get('scheme')
value = alias.get('value')
aliases[scheme] = value
if value in book_aliases:
same_aliases = True
if same_aliases == False and len(book_aliases) > 0:
book, created = Book.objects.get_or_create(pk=aliases.values()[0])
book.title = book_element.findtext('title')
book.description = book_element.findtext('description')
for scheme, value in aliases.items():
try:
book.aliases.get_or_create(scheme=scheme, value=value)
except IntegrityError as e:
pass
book.save()
|
[
"def add_book_to_db(book: dict) -> None:\n if \"title\" in book:\n title = request.form['title']\n else:\n title = \"\"\n\n if \"authors\" in book:\n authors = \";\\n\".join(request.form['authors'].split(';'))\n else:\n authors = \"\"\n\n if \"publishedDate\" in book:\n published_date = request.form['publishedDate']\n else:\n published_date = \"\"\n\n if \"\" in book:\n industry_identifiers = request.form['industryIdentifiers']\n single_identifiers = industry_identifiers.split(';')\n industry_identifiers = \";\\n\".join([f\"{i.split(',')[0]}({i.split(',')[1]})\\n\" for i in single_identifiers])\n else:\n industry_identifiers = \"\"\n\n page_count = request.form['pageCount']\n links = \";\\n\".join(request.form['links'].split(','))\n languages = \";\\n\".join(request.form['languages'].split(','))\n\n book = Book(title=title,\n authors=authors,\n publishedDate=published_date,\n industryIdentifiers=industry_identifiers,\n pageCount=page_count,\n imageLinks=links,\n language=languages\n )\n\n DATABASE.session.add(book)\n DATABASE.session.commit()",
"def save_books_to_db(books: dict) -> None:\n for book in books:\n volume = book[\"volumeInfo\"]\n\n if \"title\" in volume:\n title = volume[\"title\"]\n else:\n title = \"\"\n\n separator = \";\"\n\n if \"authors\" in volume:\n authors = separator.join([f\"{author}\\n\" for author in volume[\"authors\"]])\n else:\n authors = \"\"\n\n if \"publishedDate\" in volume:\n published_date = volume[\"publishedDate\"]\n else:\n published_date = \"\"\n\n if \"industryIdentifiers\" in volume:\n industry_identifiers = volume[\"industryIdentifiers\"]\n industry_identifiers = separator.join([f\"{i['identifier']}({i['type']})\\n\"\n for i in industry_identifiers])\n else:\n industry_identifiers = \"\"\n\n if \"pageCount\" in volume:\n page_count = volume[\"pageCount\"]\n else:\n page_count = 0\n\n if \"imageLinks\" in volume:\n image_links = \"\\n\".join([f\"{v}\" for (k, v) in volume[\"imageLinks\"].items()])\n else:\n image_links = \"\"\n\n if \"language\" in volume:\n language = volume[\"language\"]\n else:\n language = \"\"\n\n DATABASE.session.add(Book(title=title,\n authors=authors,\n publishedDate=published_date,\n industryIdentifiers=industry_identifiers,\n pageCount=page_count,\n imageLinks=image_links,\n language=language\n )\n )\n DATABASE.session.commit()",
"def add_Book(self, Book_info):\n Book_info = self.change_str_to_mysql(Book_info)\n Book_info = tuple(Book_info)\n command = u\"\"\"self.cur.execute(\"INSERT INTO Book VALUES('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')\")\"\"\" % Book_info\n #print command\n exec(command)",
"def load_books():\n\n print \"Book!\"\n\n # open the csv file and unpack it\n # with open(\"/home/vagrant/src/best_books/data/bestbooks.csv\") as general:\n\n # creating relative path, base upon the _file_ Python global.\n # it makes the code to be more portable and easier to work with\n filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"data\", \"bestbooks.csv\")\n print \"Loading filename: %s\" % filename\n with open(filename) as general:\n reader = csv.reader(general)\n\n #unpacking each row in the file and looping over it.\n #appending each title to the titles list\n\n for award, year, genre, title, author, author2, author3 in reader:\n\n # if title == \"English Passengers\" and \"Hilary Mantel\" in [author, author2, author3]:\n # pdb.set_trace()\n # The date is in the file as year string;\n # we need to convert it to an actual datetime object.\n year = int(year)\n author = author.strip()\n award = award.strip()\n\n #create book object\n #first, we'll check if this current book title we already have in the book table\n #if we don't, then we have to create a book object\n #add it to session and commit it to the database\n #using func.lower helps to compare data without case sensitivity\n book = Book.query.filter(func.lower(Book.title) == func.lower(title)).first()\n if not book:\n book = Book(title=title)\n db.session.add(book)\n db.session.commit()\n\n #create award object\n book_award = Award.query.filter(func.lower(Award.name) == func.lower(award)).first()\n if not book_award:\n book_award = Award(name=award)\n db.session.add(book_award)\n db.session.commit()\n\n #create book award object\n get_book_award = BookAward.query.filter(BookAward.year == year,\n BookAward.book_id == book.book_id,\n BookAward.award_id == book_award.award_id).first()\n if not get_book_award:\n books_awards = BookAward(book_id=book.book_id,\n award_id=book_award.award_id,\n year=year)\n db.session.add(books_awards)\n db.session.commit()\n\n #create genre object\n if genre:\n new_genre = Genre.query.filter(func.lower(Genre.genre) == func.lower(genre)).first()\n if not new_genre:\n new_genre = Genre(genre=genre)\n db.session.add(new_genre)\n db.session.commit()\n\n #create book genre object\n get_book_genre = BookGenre.query.filter(BookGenre.book_id == book.book_id,\n BookGenre.genre_id == new_genre.genre_id).first()\n if not get_book_genre:\n books_genres = BookGenre(book_id=book.book_id,\n genre_id=new_genre.genre_id)\n db.session.add(books_genres)\n db.session.commit()\n\n #create first author object\n this_author = Author.query.filter(func.lower(Author.name) == func.lower(author)).first()\n if not this_author:\n this_author = Author(name=author)\n db.session.add(this_author)\n db.session.commit()\n\n #create book author object for the first author\n get_book_author = BookAuthor.query.filter(BookAuthor.book_id == book.book_id,\n BookAuthor.author_id == this_author.author_id).first()\n if not get_book_author:\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=this_author.author_id)\n db.session.add(books_authors)\n db.session.commit()\n\n\n # need to check if the book has a second author\n # if it does then we will check if this author is in the database\n # if it doesn't then we'll create a new author object,\n # add it to session and commit to the database.\n if author2:\n new_author2 = Author.query.filter(func.lower(Author.name) == func.lower(author2)).first()\n if not new_author2:\n new_author2 = Author(name=author2)\n db.session.add(new_author2)\n db.session.commit()\n\n # once we added this author to our database author table\n # we can create a books author connection object to the books authors table\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=new_author2.author_id)\n\n # if we have this author in our database authors table, then\n # we have to check if we have this author book assossiation in our\n # books authors table.\n # if we don't, then we'll create this assossiation object in the\n # books authors table\n else:\n get_book_author2 = BookAuthor.query.filter(BookAuthor.book_id == book.book_id,\n BookAuthor.author_id == new_author2.author_id).first()\n if not get_book_author2:\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=new_author2.author_id)\n db.session.add(books_authors)\n db.session.commit()\n\n # need to check if the book has a third author\n # if it does then we will check if this author is in the database\n # if it doesn't then we'll create a new author object,\n # add it to session and commit to the database\n if author3:\n new_author3 = Author.query.filter(func.lower(Author.name) == func.lower(author3)).first()\n if not new_author3:\n new_author3 = Author(name=author3)\n db.session.add(new_author3)\n db.session.commit()\n\n # once we added this author to our database author table\n # we can create a books author connection object to the books authors table\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=new_author3.author_id)\n\n # if we have this author in our database authors table, then\n # we have to check if we have this author book assossiation in our\n # books authors table.\n # if we don't, then we'll create this assossiation object in the\n # books authors table\n else:\n get_book_author3 = BookAuthor.query.filter(BookAuthor.book_id == book.book_id,\n BookAuthor.author_id == new_author3.author_id).first()\n if not get_book_author3:\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=new_author3.author_id)\n db.session.add(books_authors)\n db.session.commit()",
"def _process_book(link):\n # download and parse book info\n data = DOWNER.download(link)\n dom = dhtmlparser.parseString(\n utils.handle_encodnig(data)\n )\n dhtmlparser.makeDoubleLinked(dom)\n\n # some books are without price in expected elements, this will try to get\n # it from elsewhere\n price = None\n try:\n price = _strip_content(zapi.get_price(dom))\n except UserWarning:\n price = dom.find(\"p\", {\"class\": \"vaseCena\"})\n\n if price:\n price = price[0].getContent().replace(\" \", \" \")\n price = filter(lambda x: x.isdigit(), price.strip())\n\n if price:\n price = price[0] + \"kč\"\n else:\n price = \"-1\"\n else:\n price = \"-1\"\n\n # required informations\n pub = Publication(\n title=_strip_content(zapi.get_title(dom)),\n authors=_parse_authors(zapi.get_author(dom)),\n price=price,\n publisher=_strip_content(zapi.get_publisher(dom))\n )\n\n # optional informations\n pub.optionals.URL = link\n pub.optionals.pages = _strip_content(zapi.get_pages(dom))\n pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom))\n pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom))\n pub.optionals.binding = _strip_content(zapi.get_binding(dom))\n\n # post checks\n if pub.title.startswith(\"E-kniha:\"):\n pub.title = pub.title.replace(\"E-kniha:\", \"\", 1).strip()\n pub.optionals.is_ebook = True\n\n if pub.optionals.ISBN:\n if \" \" in pub.optionals.ISBN:\n pub.optionals.ISBN = pub.optionals.ISBN.split(\" \")[0]\n\n if \"(\" in pub.optionals.ISBN:\n pub.optionals.ISBN = pub.optionals.ISBN.split(\"(\")[0]\n\n return pub",
"def add_books(db) -> None:\n books = [\n (\"Book A\", \"Genre A\"),\n (\"Book B\", \"Genre B\"),\n (\"Book C\", \"Genre C\"),\n (\"Book D\", \"Genre A\"),\n (\"Book E\", \"Genre A\"),\n (\"Book F\", \"Genre B\"),\n (\"Book G\", \"Genre A\"),\n ]\n for book in books:\n row = models.Book()\n row.title = book[0]\n row.genre = book[1]\n row.save()",
"def store_book(connect, cursor, book_info):\n cursor.execute(\"\"\"SELECT * from book WHERE id = (?)\"\"\", (book_info.get('book_id'),))\n results = cursor.fetchall()\n results = [dict(ix) for ix in results]\n connect.commit()\n if len(results) > 0:\n return\n\n similar_books = book_info.get('similar_books')\n similar_books_list = \"\"\n for i, each_book_info in enumerate(similar_books, 1):\n similar_books_list += str(i) + \". \" + each_book_info.get('bookname') \\\n + \": \" + each_book_info.get('url')\n similar_books_list += \"\\n\"\n\n cursor.execute(\"\"\"insert into book values (?,?,?,?,?,?,?,?,?,?,?)\"\"\", (\n book_info.get('book_name'),\n book_info.get('book_url'),\n book_info.get('book_id'),\n book_info.get('book_ISBN'),\n book_info.get('book_author_url'),\n book_info.get('author_name'),\n book_info.get('book_rating'),\n book_info.get('rating_count'),\n book_info.get('review_count'),\n book_info.get('image_url'),\n similar_books_list\n ))\n connect.commit()\n print(\"There are \" + str(get_book_table_size(connect, cursor)) + \" books in boot_tb table\")",
"def add_book_to_db(cursor, book_object):\n sql_query = \"INSERT INTO books(name, price) VALUES (%s, %s) RETURNING ID\"\n cursor.execute(sql_query, (book_object['name'], book_object['price']))\n result = cursor.fetchone()[0]\n return result",
"def update_insert_books_tb(self, book_dic):\r\n book_dic = remove_empty_string(book_dic)\r\n book_id = book_dic['book_id']\r\n if self.is_book_exist(book_dic):\r\n # If book_id already exist in table, then update the book\r\n self.update_books_tb(book_dic, book_id)\r\n print(f'Book with id {book_id} is updated')\r\n else:\r\n # Book does not exist, then insert\r\n self.insert_books_tb(book_dic)\r\n print(f'Book with id {book_id} is created')",
"def put_book():\n dbh = db_handler.DbHandler()\n docs_book = dbh.fetch_books()\n book = {}\n is_id = request.args.get('id')\n if not is_id:\n abort(400, \"Bad Request: Invalid id input\")\n if not request.json:\n abort(400, \"Bad Request: Invalid json input\")\n if is_id:\n for obj in docs_book:\n if obj['_id'] == is_id:\n book = obj\n if book == {}:\n abort(404, \"Page Not Found: No such a book\")\n input_json = request.get_json(force=True)\n for key in input_json:\n if key == 'book_rating':\n book['book_rating'] = int(input_json[key])\n elif key == 'isbn':\n book['isbn'] = input_json[key]\n elif key == 'book_title':\n book['book_title'] = input_json[key].replace(\" \", \"\")\n elif key == 'book_rating_count':\n book['book_rating_count'] = int(input_json[key])\n elif key == 'book_review_count':\n book['book_review_count'] = int(input_json[key])\n elif key == 'book_url':\n book['book_url'] = input_json[key]\n else:\n abort(400, \"Bad Request: Invalid key\")\n dbh.insert_book(book)\n return jsonify(book), 201",
"def book_feed(self, pair):",
"def add_book(self, book: Book):\n self.books.append(book)",
"def read_book(self, book):\r\n book = self._library[self._library.find(book)]",
"def order_add_book(request, book):\n try:\n # now add this book to the current order and save it\n book.order = request.session['order']\n book.save()\n except KeyError:\n # there is no current order\n print(\"Tried to add a book to current order, but there isn't a current order\")\n raise KeyError",
"def create(self, book):\n return super(BookRepository, self).create(book)",
"def lookup_Book_by_ID(self, Book_id):\n command = u\"\"\"self.cur.execute(\"SELECT * FROM Book WHERE Book_id = %s\")\"\"\" % Book_id\n #print command\n exec(command)\n data = self.cur.fetchone()\n data = list(data)\n data = self.change_str_from_mysql(data)\n return data",
"def buy_book(self, book):\r\n self._balance += books[book]\r\n self._library += Book(book)",
"def update_bookdata():\n\tbId = t0.get()\n\tbtitle = t1.get()\n\tbauthor = t2.get()\n\tbavailable = t3.get()\n\tbtenant = t4.get()\n\tbdatedue = t5.get()\n\t\n\tif messagebox.askyesno(\"Warning!\", \"Are you sure you want to update this entry?\"):\n\t\tif '' in {btitle, bauthor, bavailable}:\n\t\t\tmessagebox.showerror(\"ERROR!\", \"Please ensure at least Title, Author, and Availability fields are filled!\")\n\t\telse:\n\t\t\tquery = \"UPDATE bookList SET book_title = ?, book_author = ?, availability = ?, tenant = ?, datedue = ? WHERE id = ?\"\n\t\t\tcursor.execute(query, (btitle, bauthor, bavailable, btenant, bdatedue, bId))\n\t\t\tclear()\n\telse:\n\t\treturn True",
"def update_books_tb(self, book_dic, book_id):\r\n self.books_tb.delete_one({'book_id': book_id})\r\n self.insert_books_tb(book_dic)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This provides a way for ajax calls to get the user's own picks
|
def get_picks(request):
response_data = {}
user = request.user
game_ids = []
#Only try this if we are asking at least one game. Otherwise, just return a normal response with no games.
if request.POST.get('games'):
try:
#I have no clue why, but the brackets are added to this array in the
#incoming json data. Oh well, no harm done.
game_ids = [int(g) for g in request.POST['games'].split(',')]
except MultiValueDictKeyError:
return_thing = request.POST
return HttpResponse(return_thing, status=400, content_type="application/json")
response_data['picks'] = {}
games = Game.objects.prefetch_related('pick_set').filter(id__in=game_ids)
#Get picks by a list of games
for game in games.all():
pick = game.get_current_pick_by_author(user)
if pick:
response_data['picks'][game.id] = pick.winner.id
return HttpResponse(json.dumps(response_data), content_type="application/json")
|
[
"def get_pick_list(self, pick_list_id):\r\n return self.get('picklists/{}'.format(pick_list_id)).json()",
"def user_profile():\n user_id = session[\"user_id\"]\n picks = Pick.query.filter_by(author=user_id).all()\n return render_template(\n \"profile.html\",\n picks=picks\n )",
"def get_poolmembers(mypoolstring):\n global f5rest_url\n return (get_f5json(f5rest_url + mypoolstring + '/members/'))",
"def get_users_on_waitlist(self):\n waitlist = self.waitlistslot_set.all()\n return UserProfile.objects.filter(waitlistslot__in=waitlist)",
"def run(self):\n user_id = None\n valid_inputs = ['1', '2']\n selection = select_from_menu(valid_inputs)\n if selection == '1':\n try:\n user_id = int(input('\\nPlease enter the user id that you would like to use:\\n> '))\n invalid = False\n except ValueError:\n invalid = True\n while invalid:\n try:\n user_id = int(input('Invalid input - user id must be numeric. Please try again:\\n> '))\n invalid = False\n except ValueError:\n invalid = True\n num_owned_qs, avg_q_score = self.db_manager.get_num_owned_posts_and_avg_score(user_id, 1)\n num_owned_as, avg_a_score = self.db_manager.get_num_owned_posts_and_avg_score(user_id, 2)\n num_votes = self.db_manager.get_num_votes(user_id)\n return user_id, [num_owned_qs, avg_q_score, num_owned_as, avg_a_score, num_votes]\n elif selection == '2':\n return user_id, []",
"def fetch_qarnot_profiles():\n global script_dialog\n\n # disable submit button\n script_dialog.SetEnabled(\"SubmitButton\", False)\n # display loading message\n script_dialog.SetItems(\"QarnotProfileCombo\", [\"Loading profiles...\"])\n profile_list = script_dialog.findChild(\n ThinkboxUI.Controls.Scripting.ComboControl.ComboControl,\n \"QarnotProfileCombo\",\n )\n # set italic font\n profile_list_font = profile_list.font()\n profile_list_font.setItalic(True)\n profile_list.setFont(profile_list_font)\n\n # fetch profiles\n q_render_deadline.refresh_connection()\n qarnot_profiles = q_render_deadline.get_available_profiles()\n\n return qarnot_profiles",
"def pick(self, action: 'SoPickAction') -> \"void\":\n return _coin.SoProfile_pick(self, action)",
"def combo_callback(self):\n global o_img, p_img\n request_name = open_req_cb.get()\n selected_label.config(text='{}'.format(request_name))\n request_id = selected_label.cget('text').split(':')[0]\n user_name = id_entry.get()\n r = requests.get(\n address + \"/api/retrieve_request/\" + user_name + '/' + request_id)\n result = r.json()\n p_method.set(result['procedure'])\n selected_label.config(text=result['filename'])\n show_time(result)\n show_hist(result)\n o_img = result['original_img']\n p_img = result['processed_img']\n\n return None",
"async def pick(ctx: commands.Context, choice):\n await pick_or_ban(ctx, \"picks\", choice)",
"def _pick_up(\n self,\n command: models.PickUpDropTipCommand) -> ReturnType:\n return [\n PickUpTipRequest(\n pipetteId=command.params.pipette,\n labwareId=command.params.labware,\n wellName=command.params.well\n )\n ]",
"def get_single_user():",
"def pick_list(self):\n \n self._send(\"pick_list\")\n return [e2string(x) for x in self._read_json(220)]",
"def get(self):\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin():\n owner_json = self._GetAllOwnerDataJson()\n else:\n owner_json = self._GetOwnerDataForUserJson(user)\n else:\n self.RenderHtml('result.html', {\n 'errors': ['Log in to edit test owners.']})\n return\n\n self.RenderHtml('edit_test_owners.html',\n {'owner_info': owner_json})",
"def get_uploaders(request):\n admin_id = request.user.id # to get the id of the admin\n uploader_list = []\n for uploader in MyUser.objects.filter(owner=admin_id): # gets the list of uploaders whose owner is the logged in user.\n uploader_permissions = []\n uploader_module_permissions = Permission.objects.filter(content_type_id__model='moduledata', user=uploader.id) # gets the permissions of allotted to the uploader\n for module_permission in uploader_module_permissions:\n uploader_permissions.append(ModuleData.objects.get(code=module_permission.name))\n uploader_list.append({'uploader': uploader, 'permissions': uploader_permissions})\n return render(request, 'select_uploader.html', {'uploader_list': uploader_list})",
"async def handle_user_owned_request_listing(request):\n # Future authorization check here\n\n # Check for incorrect client query here\n\n user = request.match_info[\"user\"]\n\n try:\n ret = await request.app[\"db_conn\"].get_request_owned(user)\n except InterfaceError:\n handle_dropped_connection(request)\n\n return aiohttp.web.json_response(ret)",
"def get_multireddits(self, redditor, *args, **kwargs):\n redditor = six.text_type(redditor)\n url = self.config['multireddit_user'].format(user=redditor)\n return self.request_json(url, *args, **kwargs)",
"def pick_playlist(sp, username):\n # Grab user playlists created, followed, public, private, etc.\n playlists_result = sp.user_playlists(username)\n playlists = []\n user_ids = []\n track_sizes = []\n playlist_count = 0\n\n # Displays prompt.\n print(\"Pick a playlist: \")\n for playlist in playlists_result['items']:\n playlist_count += 1\n\n # Add to playlists name and ids\n playlist_id = playlist['id']\n name = playlist['name']\n playlists.append([name, playlist_id])\n\n # Record owner of the playlist and size\n user_ids.append(playlist['owner']['id'])\n track_sizes.append(playlist['tracks']['total'])\n print(str(playlist_count) + \" \" + name)\n\n # Check if choice is valid\n chosen_playlist = int(input())\n if chosen_playlist > playlist_count or chosen_playlist < 1:\n print(\"That is not a valid choice. Please try again.\")\n sys.exit(1)\n\n return playlists[chosen_playlist - 1], user_ids[chosen_playlist - 1], track_sizes[chosen_playlist - 1]",
"def process_user_data(self):\n self.show_menu()\n option = self.option_selected()\n if 1 <= option <= 6: # validates a valid option\n # (i,e algo: if option == 1, then data = self.menu.option1, then. send request to server with the data)\n if option == 1:\n data = self.option1()\n self.client.send(data)\n keys = self.client.receive()\n for i in keys:\n print(\"User id: \" + keys[i] + \"client name: \" + keys[i])\n elif option == 2:\n data = self.option2()\n self.client.send(data)",
"def selectMiningPool(update: Update, context: CallbackContext) -> int:\n tgUser = update.message.from_user\n poolname = update.message.text\n context.user_data['poolname'] = poolname\n logger.info(\"Mining Pool selected for %s: %s\", tgUser.username, poolname)\n update.message.reply_text(\n 'Now please tell me the username for that Mining Poool.',\n reply_markup=ReplyKeyboardRemove(),\n )\n return SETPOOLUSERNAME"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loads or computes calibration. When finalImageHeight is given, scales camera matrix to final height, which may be different from the imageHeight at which calibration is computed.
|
def LoadOrCompute(self,
squareWidth=None,
rows=None,
cols=None,
forceRecompute=False,
finalImageHeight=None):
if forceRecompute:
print('Forcing recomputation of calibration data.')
elif self.LoadFromFile(finalImageHeight):
return True
if squareWidth == None or rows == None or cols == None:
raise ValueError(
'Need to pass chessboard params to compute calibration')
nChessFrames = 0
objectPoints = []
imagePoints = []
camera = CameraSource(
self.calibVideo, self.calibHeight, startFrame=self.startFrame)
self.calibVideoSize = (camera.ORIGINAL_WIDTH, camera.ORIGINAL_HEIGHT)
chess = Chessboard(squareWidth, rows, cols)
print('Extracting frames for calibration.')
while True:
frame = camera.GetFrame()
if frame is None:
break
if self.maxSamples > 0 and len(imagePoints) >= self.maxSamples:
break
calibSample = False
objectPointsFrame, imagePointsFrame = chess.GetObjectAndImagePoints(
frame, draw=True)
if objectPointsFrame is not None:
nChessFrames += 1
if nChessFrames % 100 == 0:
calibSample = True
objectPoints.append(objectPointsFrame)
imagePoints.append(imagePointsFrame)
cv2.putText(frame, 'CALIBRATION SAMPLE', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
print('Extracted calibration sample ', len(imagePoints))
# Display frame.
if not camera.OutputFrameAndTestContinue('chess', frame, height=None)[0]:
print('User stopped calibration process...')
return False
if calibSample:
time.sleep(0.5)
print('Calibrating...')
_, self.cameraMatrix, self.distCoeffs, _, _ = cv2.calibrateCamera(
objectPoints, imagePoints, camera.ImageSize(), None, None)
self.hasCalib = True
self.PrintInfo()
print('Saving calib video size to: ', self.calibFileSize)
print('Saving camera matrix to: ', self.calibFileCam)
print('Saving distortion coeffs to: ', self.calibFileDist)
np.savetxt(self.calibFileSize, self.calibVideoSize)
np.savetxt(self.calibFileCam, self.cameraMatrix)
np.savetxt(self.calibFileDist, self.distCoeffs)
print('Done.')
self._RecomputeForNewSize(finalImageHeight)
return True
|
[
"def _load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the calibration file\n calib_filepath = os.path.join(\n self.base_path, 'calib/{}.txt'.format(self.sequence))\n filedata = utils.read_calib_file(calib_filepath)\n\n # Create 3x4 projection matrices\n P_rect_00 = np.reshape(filedata['P0'], (3, 4))\n P_rect_10 = np.reshape(filedata['P1'], (3, 4))\n P_rect_20 = np.reshape(filedata['P2'], (3, 4))\n P_rect_30 = np.reshape(filedata['P3'], (3, 4))\n\n data['P_rect_00'] = P_rect_00\n data['P_rect_10'] = P_rect_10\n data['P_rect_20'] = P_rect_20\n data['P_rect_30'] = P_rect_30\n\n # Create 4x4 matrices from the rectifying rotation matrices\n R_rect_00 = np.eye(4)\n R_rect_00[0:3, 0:3] = np.reshape(filedata['R_rect'], (3, 3))\n data['R_rect_00'] = R_rect_00\n\n # Compute the rectified extrinsics from cam0 to camN\n T1 = np.eye(4)\n T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]\n T2 = np.eye(4)\n T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]\n T3 = np.eye(4)\n T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]\n\n # # Compute the velodyne to rectified camera coordinate transforms\n T_cam_velo = filedata['Tr_velo_cam'].reshape((3, 4))\n T_cam_velo = np.vstack([T_cam_velo, [0, 0, 0, 1]])\n data['T_cam0_velo'] = R_rect_00.dot(T_cam_velo)\n data['T_cam1_velo'] = T1.dot(R_rect_00.dot(T_cam_velo))\n data['T_cam2_velo'] = T2.dot(R_rect_00.dot(T_cam_velo))\n data['T_cam3_velo'] = T3.dot(R_rect_00.dot(T_cam_velo))\n\n # # Compute the camera intrinsics\n data['K_cam0'] = P_rect_00[0:3, 0:3]\n data['K_cam1'] = P_rect_10[0:3, 0:3]\n data['K_cam2'] = P_rect_20[0:3, 0:3]\n data['K_cam3'] = P_rect_30[0:3, 0:3]\n\n # Compute the stereo baselines in meters by projecting the origin of\n # each camera frame into the velodyne frame and computing the distances\n # between them\n p_cam = np.array([0, 0, 0, 1])\n p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)\n p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)\n data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline\n\n self.calib = namedtuple('CalibData', data.keys())(*data.values())",
"def calibrate(self):\n if not self._calibrate:\n raise ValueError(\"calibrate parameter must be set\")\n\n if self._calibration_samples >= self._max_samples:\n return self._camera\n\n frame = self.capture()\n\n if self._last_timestamp is None:\n self._last_timestamp = frame.timestamp\n\n if (frame.timestamp - self._last_timestamp).total_seconds() > self._frame_delay:\n ret, corners = frame.images[0].features\n if ret is True:\n self._objpoints.append(self._objp)\n self._imgpoints.append(corners)\n\n self._calibration_samples += 1\n self._last_timestamp = frame.timestamp\n\n if self._calibration_samples >= self._max_samples:\n img = frame.images[0].image\n shape = img.shape[::-1]\n self._camera = self._finish_calibration(self._objpoints, self._imgpoints, shape)\n return self._camera",
"def calibrate_and_draw(self, calib_images_folder, nx, ny):\n \n # +-----------------+\n # | FINDING CORNERS |\n # +-----------------+\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((nx*ny,3), np.float32)\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n print (\"Finding corners ... \") \n for image_link in os.listdir(calib_images_folder):\n image = mpimg.imread(calib_images_folder+image_link)\n ret, corners = self.find_chessboard_corners(image, nx, ny)\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n print (\"Done ...\")\n \n # +------------------+\n # | CALIBRATE CAMERA |\n # +------------------+\n print (\"Calibrating Camera ... \")\n image_link = random.choice(os.listdir(calib_images_folder))\n image = mpimg.imread(calib_images_folder+image_link)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n self.mtx = mtx\n self.dist = dist\n self.is_calibrated = True\n # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\n dist_pickle = {}\n dist_pickle[\"mtx\"] = mtx\n dist_pickle[\"dist\"] = dist\n pickle.dump( dist_pickle, open( \"camera_config/\"+self.name+\".conf\", \"wb\" ) )\n print (\"Camera configuration has been written to \" + \"camera_config/\" + self.name+\".conf\")\n print (\"Done ...\")\n \n # +---------------+\n # | VISUALIZATION |\n # +---------------+\n # Finding corners\n print (\"Showing random sample\")\n image_link = random.choice(os.listdir(calib_images_folder))\n image1 = mpimg.imread(calib_images_folder+image_link)\n num_of_try = 10\n ret, corners, image_draw1 = self.find_chessboard_corners_and_draw(image1, nx, ny)\n while ret==False and num_of_try>0:\n image_link = random.choice(os.listdir(calib_images_folder))\n image1 = mpimg.imread(calib_images_folder+image_link)\n ret, corners, image_draw1 = self.find_chessboard_corners_and_draw(image, nx, ny)\n num_of_try-=1\n # Undistortion\n image_link = random.choice(os.listdir(calib_images_folder))\n image2 = mpimg.imread(calib_images_folder+image_link)\n image_draw2 = cv2.undistort(image2, mtx, dist, None, mtx)\n #Plot\n f, ((ax11, ax12), (ax21, ax22)) = plt.subplots(2, 2, figsize=(20,10))\n ax11.imshow(image1)\n ax11.set_title('Original Image', fontsize=30)\n ax12.imshow(image_draw1)\n ax12.set_title('Corners Image', fontsize=30) \n ax21.imshow(image2)\n ax21.set_title('Distorted Image', fontsize=30)\n ax22.imshow(image_draw2)\n ax22.set_title('Undistorted Image', fontsize=30)\n plt.show()",
"def calibrate(self, calib_images_folder, nx, ny):\n\n # +-----------------+\n # | FINDING CORNERS |\n # +-----------------+\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((nx*ny,3), np.float32)\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n print (\"Finding corners ... \") \n for image_link in os.listdir(calib_images_folder):\n image = mpimg.imread(calib_images_folder+image_link)\n ret, corners = self.find_chessboard_corners(image, nx, ny)\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n print (\"Done ...\")\n\n # +------------------+\n # | CALIBRATE CAMERA |\n # +------------------+\n print (\"Calibrating Camera ... \")\n image_link = random.choice(os.listdir(calib_images_folder))\n image = mpimg.imread(calib_images_folder+image_link)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n self.mtx = mtx\n self.dist = dist\n self.is_calibrated = True\n # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\n dist_pickle = {}\n dist_pickle[\"mtx\"] = mtx\n dist_pickle[\"dist\"] = dist\n pickle.dump( dist_pickle, open( \"camera_config/\"+self.name+\".conf\", \"wb\" ) )\n print (\"Camera configuration has been written to \" + \"camera_config/\" + self.name+\".conf\")\n print (\"Done ...\")",
"def store_calibration(self) -> None:\n _, mtx, dist, _, _ = cv2.calibrateCamera(self.object_points, self.image_points,\n self.frame_size, None, None)\n width = self.frame_size[0]\n height = self.frame_size[1]\n camera_matrix, _ = cv2.getOptimalNewCameraMatrix(mtx, dist, (width, height), 0,\n (width, height))\n\n data = {\n 'camera_matrix': camera_matrix,\n 'mtx': mtx,\n 'dist': dist,\n }\n\n file_name = ASSETS_PATH / 'custom_calibration.pkl'\n\n if file_name.exists():\n file_name.unlink()\n\n with open(file_name, 'wb') as output:\n pickle.dump(data, output, pickle.HIGHEST_PROTOCOL)",
"def load_calibration(self) -> None:\n custom_file = ASSETS_PATH / 'custom_calibration.pkl'\n default_file = ASSETS_PATH / 'default_calibration.pkl'\n\n if not custom_file.exists() and not default_file.exists():\n print('[Camera Calibration] No calibration file found')\n return\n\n file_name = custom_file if custom_file.exists() else default_file\n\n with open(file_name, 'rb') as input_data:\n self.calibration = pickle.load(input_data)\n\n print('[Camera Calibration] ' + ('Custom' if custom_file.exists() else 'Default') +\n ' configuration loaded')",
"def refresh_calibrationImage(self):\n cal = self.constraints[self.calibrating_color]\n hn = cal['h_min']\n hx = cal['h_max']\n sn = cal['s_min']\n sx = cal['s_max']\n vn = cal['v_min']\n vx = cal['v_max']\n\n def fill(h, img):\n for y in range(0, 256):\n v = (vx - vn) * y / 256 + vn\n for x in range(0, 256):\n s = (sx - sn) * x / 256 + sn\n\n img[y, x, 0] = h\n img[y, x, 1] = s\n img[y, x, 2] = v\n\n fill(hn, self.swatchn)\n fill(hx, self.swatchx)\n self.swatchn = cv.cvtColor(self.swatchn, cv.COLOR_HSV2BGR)\n self.swatchx = cv.cvtColor(self.swatchx, cv.COLOR_HSV2BGR)\n hor = np.hstack((self.swatchn, self.swatchx))\n cv.imshow(\"cal\", hor)",
"def calibrateCamera(image_path ='camera_cal/calibration*.jpg',\n nx=9, ny=6, image_shape=(720, 1280)):\n\n # Read in an image\n images = glob.glob(image_path)\n objpoints = []\n imgpoints = []\n\n objp = np.zeros((ny * nx, 3), np.float32)\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2) #z-coordinate is all zero\n\n for fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n\n if ret == True:\n imgpoints.append(corners)\n objpoints.append(objp) \n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, image_shape, None, None)\n return ret, mtx, dist, rvecs, tvecs",
"def calibration():\n # To be used later to count number of images calibrated\n images_calibrated = []\n \n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((6*9,3), np.float32)\n objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)\n \n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n \n # Make a list of calibration images\n images = image_dir\n \n # Step through the list and search for chessboard corners\n for idx, fname in enumerate(images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9,6), None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n images_calibrated.append(img)\n\n # Draw and save the corners\n cv2.drawChessboardCorners(img, (9,6), corners, ret)\n write_name = 'Corners_found'+str(idx)+'.jpg'\n cv2.imwrite('./camera_cal/output_images/'+ write_name, img)\n \n \n cv2.destroyAllWindows()\n \n # Do camera calibration given object points and image points\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n \n # save to pickle\n camera_cal_dict = {'mtx':mtx, 'dist':dist}\n pickle.dump(camera_cal_dict, open(camera_cal_fname, \"wb\"))\n \n print('Camera calibrated using {0} images'.format(np.array(images_calibrated).shape[0]))",
"def calibrate_video(path_video, time_begin=0, time_end=-1, destination_video=None, destination_txt=None):\n if destination_video is None:\n destination_video = Path(\"../data/4_model_output/videos/tries\")\n if destination_txt is None:\n destination_txt = Path(\"../data/2_intermediate_top_down_lanes/calibration/tries\")\n\n # Verify that the video exists\n if not path_video.exists():\n raise FindPathExtractError(path_video)\n\n # Verify that the folders exist and that the video or the txt file does not exist\n name_video = path_video.parts[-1]\n # Check that the folder exists\n if not destination_video.exists():\n raise FindPathExtractError(destination_video)\n\n corrected_video = \"corrected_\" + name_video\n path_corrected_video = destination_video / corrected_video\n # Check that the video does not exist\n if path_corrected_video.exists():\n raise AlreadyExistError(path_corrected_video)\n\n # Check that the folder exists\n if not destination_txt.exists():\n raise FindPathExtractError(destination_txt)\n\n name_txt = name_video[: -3] + \"txt\"\n path_txt = destination_txt / name_txt\n # Check that the video does not exist\n if path_txt.exists():\n raise AlreadyExistError(path_txt)\n\n # Get the image\n print(\"Get the image ...\")\n list_images = extract_image_video(path_video, time_begin, time_end)\n nb_images = len(list_images)\n\n # Selection of the 8 points in a random image\n print(\"Point selection for calibration ...\")\n (points_src, points_meter) = calibration_selection(list_images[rd.randint(int(nb_images / 10), int(nb_images / 5))])\n\n # Get the coordinate of the points in the final image in pixels and the extreme points\n (points_dst, exteme_points) = meter_to_pixel(points_src, points_meter, list_images[0])\n\n # Get the homography matrix\n homography = get_homography(points_src, points_dst)\n\n # Transform the image\n print(\"Correction of image ...\")\n for index_image in range(nb_images):\n list_images[index_image] = get_top_down_image(list_images[index_image], homography)\n\n # Get the fps\n video = cv2.VideoCapture(str(path_video))\n fps_video = int(video.get(cv2.CAP_PROP_FPS))\n\n # Make the video\n print(\"Make the corrected video ...\")\n make_video(corrected_video, list_images, fps_video, destination_video)\n\n # Construct the txt file\n to_store = [name_video, points_src, points_dst, homography, exteme_points]\n store_calibration_txt(name_txt, to_store, destination_txt)",
"def get_camera_calibration_values():\n\tcalibration_images = glob.glob('./camera_cal/calibration*.jpg')\n\treturn __calibrate_camera(calibration_images)",
"def image_calibration(data_array, headers, calib_folder=\"\", bias_files=None,\n dark_files=None, flat_files=None):\n\n filt_list, exp_list = [], []\n for hdr in headers:\n filt_list.append(hdr['filter'])\n exp_list.append(hdr['exptime'])\n\n master_bias = get_master_bias(infiles=bias_files, calib_folder=calib_folder)\n master_dark = dict([(time, get_master_dark(time, infiles=dark_files, calib_folder=calib_folder)) for time in np.unique(exp_list)])\n master_flat = dict([(filt, get_master_flat(filt, infiles=flat_files, calib_folder=calib_folder)) for filt in np.unique(filt_list)])\n\n data_calibrated = np.zeros(data_array.shape)\n headers_calibrated = deepcopy(headers)\n for i,data in enumerate(data_array):\n filt = filt_list[i]\n time = exp_list[i]\n\n data_calibrated[i] = (data - master_bias - master_dark[time])/master_flat[filt]\n #data_calibrated[i] += np.abs(np.median(data_calibrated[i]))\n data_calibrated[i][data_calibrated[i] <= 0.] = np.min(data_calibrated[i][data_calibrated[i] > 0.])\n #data_calibrated[i] = data_calibrated[i]/data_calibrated[i].max()*100.\n headers_calibrated[i].add_history(\"Calibration using bias, dark and flatfield done.\")\n\n return data_calibrated, headers_calibrated",
"def calibrate(self):\n\n self.need_calibration = True\n self.calibrator.clear_cache()\n self.initialize()\n\n # Generate a dummy engine to generate a new calibration cache.\n for input_idx in range(self.network.num_inputs):\n input_shape = self.network.get_input(input_idx).shape\n input_shape[0] = 2 # need even-numbered batch size for interleaving\n self.network.get_input(input_idx).shape = input_shape\n self.builder.build_engine(self.network, self.builder_config)",
"def _load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the rigid transformation from IMU to velodyne\n data[\"T_velo_imu\"] = self._load_calib_rigid(\"calib_imu_to_velo.txt\")\n\n # Load the camera intrinsics and extrinsics\n data.update(self._load_calib_cam_to_cam(\"calib_velo_to_cam.txt\", \"calib_cam_to_cam.txt\"))\n\n # Pre-compute the IMU to rectified camera coordinate transforms\n data[\"T_cam0_imu\"] = data[\"T_cam0_velo\"].dot(data[\"T_velo_imu\"])\n data[\"T_cam1_imu\"] = data[\"T_cam1_velo\"].dot(data[\"T_velo_imu\"])\n data[\"T_cam2_imu\"] = data[\"T_cam2_velo\"].dot(data[\"T_velo_imu\"])\n data[\"T_cam3_imu\"] = data[\"T_cam3_velo\"].dot(data[\"T_velo_imu\"])\n\n return data",
"def compute_calibration(\n self,\n sequences_list: List[str],\n batch_size: int = 1,\n pass_mode: str = \"forward\",\n tokens_list: List[str] = None,\n n_bins: int = 10,\n ) -> Dict[str, Any]:\n if tokens_list is None:\n tokens_list = NATURAL_AAS_LIST\n\n _check_sequence(sequences_list, self.model_dir, 1024)\n\n inputs, labels, tokens = self._process_sequences_and_tokens(\n sequences_list, tokens_list\n )\n logits = self._compute_logits(inputs, batch_size, pass_mode)\n logits, labels = self._filter_logits(logits, labels, tokens)\n calibration_dict = self._compute_calibration(logits, labels, n_bins)\n\n return calibration_dict",
"def load_calibration_parameters(self):\n def _load(directory): # Load calibration parameters from file\n with open(directory + self.parameters_file, 'r') as inputfile:\n self.calibration_params = json.load(inputfile)\n try:\n _load(self.dir)\n except IOError:\n self.plant_db.tmp_dir = \"/tmp/\"\n _load(self.plant_db.tmp_dir)",
"def read_from_yaml(self, yaml_file):\n log.debug(\"Initialized CalibrationParams from a yaml file: \" +\n yaml_file)\n\n with open(yaml_file, 'r') as file_pointer:\n calibration_params = yaml.load(file_pointer)\n\n raw_rgb_intrinsics = calibration_params['rgb_intrinsics']\n self.rgb_intrinsics = np.array(\n np.reshape(\n raw_rgb_intrinsics['data'],\n (raw_rgb_intrinsics['rows'], raw_rgb_intrinsics['cols'])))\n raw_rgb_distortion_coeffs = calibration_params['rgb_distortion_coeffs']\n self.rgb_distortion_coeffs = np.array(raw_rgb_distortion_coeffs['data'])\n raw_hand_eye_transform = calibration_params['hand_eye_transform']\n self.hand_eye_transform = np.array(\n np.reshape(raw_hand_eye_transform['data'],\n (raw_hand_eye_transform['rows'],\n raw_hand_eye_transform['cols'])))\n raw_depth_intrinsics = calibration_params['depth_intrinsics']\n self.depth_intrinsics = np.array(\n np.reshape(\n raw_depth_intrinsics['data'],\n (raw_depth_intrinsics['rows'], raw_depth_intrinsics['cols'])))\n raw_depth_distortion_coeffs = calibration_params[\n 'depth_distortion_coeffs']\n self.depth_distortion_coeffs = np.array(\n raw_depth_distortion_coeffs['data'])\n raw_depth_extrinsics = calibration_params['depth_extrinsics']\n self.depth_extrinsics = np.array(\n np.reshape(\n raw_depth_extrinsics['data'],\n (raw_depth_extrinsics['rows'], raw_depth_extrinsics['cols'])))\n raw_ir1_intrinsics = calibration_params['ir1_intrinsics']\n self.ir1_intrinsics = np.array(\n np.reshape(\n raw_ir1_intrinsics['data'],\n (raw_ir1_intrinsics['rows'], raw_ir1_intrinsics['cols'])))\n raw_ir1_distortion_coeffs = calibration_params['ir1_distortion_coeffs']\n self.ir1_distortion_coeffs = np.array(raw_ir1_distortion_coeffs['data'])\n raw_ir2_intrinsics = calibration_params['ir2_intrinsics']\n self.ir2_intrinsics = np.array(\n np.reshape(\n raw_ir2_intrinsics['data'],\n (raw_ir2_intrinsics['rows'], raw_ir2_intrinsics['cols'])))\n raw_ir2_distortion_coeffs = calibration_params['ir2_distortion_coeffs']\n self.ir2_distortion_coeffs = np.array(raw_ir2_distortion_coeffs['data'])\n raw_ir_extrinsics = calibration_params['ir_extrinsics']\n self.ir_extrinsics = np.array(\n np.reshape(raw_ir_extrinsics['data'],\n (raw_ir_extrinsics['rows'], raw_ir_extrinsics['cols'])))\n\n inv_extrinsics = np.linalg.inv(self.ir_extrinsics)\n\n self.extrinsics_r = inv_extrinsics[:3, :3]\n self.extrinsics_t = inv_extrinsics[:3, 3]\n\n self.rgb_width = calibration_params['rgb_width']\n self.rgb_height = calibration_params['rgb_height']\n self.depth_width = calibration_params['depth_width']\n self.depth_height = calibration_params['depth_height']\n self.z_scaling = calibration_params['z_scaling']\n self.depth_scale = calibration_params['depth_scale']",
"def loadCameraCalibration(self, file):\n # fileLines = file.readlines()\n\n # self.cameraIntrinsicMatrix[0, :] = np.fromstring(fileLines[1])\n # self.cameraIntrinsicMatrix[1, :] = np.fromstring(fileLines[2])\n # self.cameraIntrinsicMatrix[2, :] = np.fromstring(fileLines[3])\n self.cameraIntrinsicMatrix = np.array([[ 524.20336054,0.,300.47947718], [0.,523.18999732,277.66374865], [0.,0.,1.]])\n self.cameraDistortionCoeff = np.array([2.6e-1, -9.14594098e-1, -2.82354497e-3, 1.13680542e-3, 1.20066203e+00])\n # print(self.cameraIntrinsicMatrix)\n # self.cameraDistortionCoeff = np.loadtxt(fileLines[3])",
"def fb_cameraCalibrate_ch01(FILE_PATH_Section1,FILE_PATH_Section2,FILE_PATH_Section3,FILE_PATH_Section4, img_width_Flag1, img_height_Flag1):\n # get parameters for different sections of Flag1 camera.\n calibrateParameter_section1 = fb_cameraCalibrate(FILE_PATH_Section1, img_width_Flag1, img_height_Flag1)\n calibrateParameter_section2 = fb_cameraCalibrate(FILE_PATH_Section2, img_width_Flag1, img_height_Flag1)\n calibrateParameter_section3 = fb_cameraCalibrate(FILE_PATH_Section3, img_width_Flag1, img_height_Flag1)\n calibrateParameter_section4 = fb_cameraCalibrate(FILE_PATH_Section4, img_width_Flag1, img_height_Flag1)\n\n\n calibrateParameter_Flag1 = CalibrateParameter_Flag1(calibrateParameter_section1,calibrateParameter_section2,calibrateParameter_section3,calibrateParameter_section4)\n\n return calibrateParameter_Flag1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The client email registered for the integration service.
|
def client_email(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_email")
|
[
"def get_email(self):\r\n return self.email",
"def notification_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_email\")",
"def thread_email(self):\n return self._thread_email",
"def provider_email(self) -> str:\n return pulumi.get(self, \"provider_email\")",
"def email(self):\n return '{}.{}@email.com'.format(self.first, self.last)",
"def notification_email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"notification_email\")",
"def master_account_email(self) -> str:\n return pulumi.get(self, \"master_account_email\")",
"def email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email_address\")",
"def email_sender(self) -> str:\n return self._email_sender",
"def google_group_email(self) -> str:\n return pulumi.get(self, \"google_group_email\")",
"def sender_email(self) -> str:\n return str(self._email.SenderEmailAddress)",
"def getMail(self):\n return self._mail",
"def customer_email(obj):\n return str(obj.customer.subscriber.email)",
"def email_receiver(self) -> str:\n return self._email_receiver",
"def project_email(report, project):\n return [project.researcher.email]",
"def mail_address(self):\n return self.project_name + self.base_mail_address",
"def get_client_id(self):\n return self.__client_id",
"def x_mailer(self) -> str:\n return self._x_mailer",
"def get_default_email_address(self):\n return self.teams[\"Default\"].email"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This resource allows you to create and manage third party log integrations for a CloudAMQP instance. Once configured, the logs produced will be forward to corresponding integration. Only available for dedicated subscription plans. Argument Reference (cloudwatchlog)
|
def __init__(__self__,
resource_name: str,
args: IntegrationLogArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
|
[
"def __init__(__self__,\n resource_name: str,\n args: IntegrationLogCollectionArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def setup_logger():\n log = logging.getLogger('contrail_vrouter_provisioning')\n log.setLevel(logging.DEBUG)\n # create rotating file handler which logs even debug messages\n fh = logging.handlers.RotatingFileHandler(LOG_FILENAME,\n maxBytes=64*1024,\n backupCount=2)\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n # create formatter and add it to the handlers\n formatter = logging.Formatter(\n '[%(asctime)s %(name)s(%(lineno)s) %(levelname)s]: %(message)s',\n datefmt='%a %b %d %H:%M:%S %Y')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n log.addHandler(fh)\n log.addHandler(ch)\n\n return log",
"def insertEventLogger(cfgInterface):\n loggerSvc = cfgInterface.cmsConfig.service(\"MessageLogger\")\n\n\n if not loggerSvc.has_key(\"destinations\"):\n loggerSvc['destinations'] = ('vstring', 'untracked', [])\n destinations = loggerSvc['destinations']\n if \"\\\"EventLogger\\\"\" not in destinations:\n loggerSvc['destinations'][2].append(\"\\\"EventLogger\\\"\")\n\n loggerSvc['EventLogger'] = ('PSet', \"untracked\", {\n 'default' : ( 'PSet', 'untracked', {\n 'limit' : ( \"int32\", \"untracked\", '0')\n }),\n 'FwkReport' : ( 'PSet', 'untracked', {\n 'limit' : ( \"int32\", \"untracked\", '1000000'),\n 'reportEvery' :( \"int32\", \"untracked\", '1'),\n \n }),\n \n })\n \n return",
"def _write_to_cloud_logging(log_id, log_file_path):\n # TODO(ethanbao): Turn conductor into a python object so that the logging\n # client can be instance variable not global variable.\n global CLOUD_LOGGING_CLIENT\n if not CLOUD_LOGGING_CLIENT:\n CLOUD_LOGGING_CLIENT = cloud_logging.Client()\n cloud_logger = CLOUD_LOGGING_CLIENT.logger(log_id)\n if log_file_path:\n with open(log_file_path, 'r') as log_file:\n cloud_logger.log_text(log_file.read())",
"def register_for_new_logs(self):\n pass",
"def create_log(self):\n pass",
"def create_log(self, **kwargs):\n uri = '/log/logs/'\n post_data = {'log': kwargs}\n return self.create_resource(uri, post_data)",
"def configure_logger(self):\n \n log_handler = LogglyHTTPSHandler(100, self.level,\n None,\n self.endpoint)\n format_str = '''%(asctime)s level=%(levelname)s, msg=\"%(message)s\",\n module=%(module)s, file=\"%(filename)s\", lineno=%(lineno)d'''\n logging.Formatter(format_str)\n logger = logging.getLogger()\n logger.addHandler(log_handler)\n logger.setLevel(self.level)\n return log_handler",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n access_key_id: Optional[pulumi.Input[str]] = None,\n api_key: Optional[pulumi.Input[str]] = None,\n client_email: Optional[pulumi.Input[str]] = None,\n credentials: Optional[pulumi.Input[str]] = None,\n host: Optional[pulumi.Input[str]] = None,\n host_port: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None,\n private_key_id: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n secret_access_key: Optional[pulumi.Input[str]] = None,\n sourcetype: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[str]] = None,\n token: Optional[pulumi.Input[str]] = None,\n url: Optional[pulumi.Input[str]] = None) -> 'IntegrationLog':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _IntegrationLogState.__new__(_IntegrationLogState)\n\n __props__.__dict__[\"access_key_id\"] = access_key_id\n __props__.__dict__[\"api_key\"] = api_key\n __props__.__dict__[\"client_email\"] = client_email\n __props__.__dict__[\"credentials\"] = credentials\n __props__.__dict__[\"host\"] = host\n __props__.__dict__[\"host_port\"] = host_port\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_key\"] = private_key\n __props__.__dict__[\"private_key_id\"] = private_key_id\n __props__.__dict__[\"project_id\"] = project_id\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"secret_access_key\"] = secret_access_key\n __props__.__dict__[\"sourcetype\"] = sourcetype\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"token\"] = token\n __props__.__dict__[\"url\"] = url\n return IntegrationLog(resource_name, opts=opts, __props__=__props__)",
"def __init__(self, account_name, account_key, queue_name=\"logqueue\"):\n self.log = Log()\n self.queue_type = config.ACS_LOGGING_QUEUE_TYPE\n self.queue_name = queue_name\n # self.log.debug(\"Queue type: \" + self.queue_type + \" / \" + self.queue_name)\n\n if self.queue_type == \"AzureStorageQueue\":\n self.createAzureQueues(account_name, account_key)\n elif self.queue_type == \"LocalFile\":\n self.file_queue = open(config.UNPROCESSED_LOG_FILE, 'w+')\n else:\n self.log.error(\"Unknown queue type: \" + queue_type)",
"def caplog(caplog):\n from prefect.logging.configuration import PROCESS_LOGGING_CONFIG\n\n for name, logger_config in PROCESS_LOGGING_CONFIG[\"loggers\"].items():\n if not logger_config.get(\"propagate\", True):\n logger = logging.getLogger(name)\n if caplog.handler not in logger.handlers:\n logger.handlers.append(caplog.handler)\n\n yield caplog",
"def _log_to_project_history(project, action_time, action_type, message):\r\n Project = get_model('projects', 'Project')\r\n key = redis_key_for_project(project)\r\n data = {\r\n 'action_time': action_time,\r\n 'message': message,\r\n 'action_type': action_type,\r\n }\r\n r = TxRedisMapper()\r\n r.lpush(key, data=data)\r\n r.ltrim(key, 0, 4)\r\n\r\n # Store logs in hubs, too\r\n if project.outsource:\r\n _log_to_project_history(\r\n project.outsource, action_time, action_type, message\r\n )",
"def create_log_subscription(self, DirectoryId: str, LogGroupName: str) -> Dict:\n pass",
"def update_logging(self):\n # custom loggers passed into tcex would not have log_info method\n if not hasattr(self.tcex.logger, 'log_info'):\n return\n\n if self._default_args.tc_log_level is None:\n # some Apps use logging while other us tc_log_level. ensure tc_log_level is always\n # available.\n self._default_args.tc_log_level = self._default_args.logging\n\n self.tcex.logger.log_info(self._default_args)\n\n # add api handler\n if self._default_args.tc_token is not None and self._default_args.tc_log_to_api:\n self.tcex.logger.add_api_handler(level=self.tcex.default_args.tc_log_level)\n\n # add rotating log handler\n self.tcex.logger.add_rotating_file_handler(\n name='rfh',\n filename=self._default_args.tc_log_file,\n path=self._default_args.tc_log_path,\n backup_count=self._default_args.tc_log_backup_count,\n max_bytes=self._default_args.tc_log_max_bytes,\n level=self.tcex.default_args.tc_log_level,\n )\n\n # replay cached log events\n self.tcex.logger.replay_cached_events(handler_name='cache')",
"def __init__(__self__, *,\n enable_media_metric_logs: Optional[pulumi.Input[bool]] = None,\n enable_sip_logs: Optional[pulumi.Input[bool]] = None,\n voice_connector_id: Optional[pulumi.Input[str]] = None):\n if enable_media_metric_logs is not None:\n pulumi.set(__self__, \"enable_media_metric_logs\", enable_media_metric_logs)\n if enable_sip_logs is not None:\n pulumi.set(__self__, \"enable_sip_logs\", enable_sip_logs)\n if voice_connector_id is not None:\n pulumi.set(__self__, \"voice_connector_id\", voice_connector_id)",
"def create_log(self, log_form):\n return # osid.logging.Log",
"def report_log(target, record_object):\n return requests.post(\n os.path.join(target, \"lofka/service/push\"),\n data=json.dumps(record_object).encode()\n )",
"def xcLogger( appname ):\n if (sys.platform[:3] == 'win'):\n return logging.handlers.NTEventLogHandler( appname )\n \n return logging.handlers.TimedRotatingFileHandler('/var/log/%s.log' % appname)\n\n #More difficult to configure as it defaults to localhost:514 \n #return logging.handlers.SysLogHandler() ",
"def log_message(self) -> global___LogMessage:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get an existing IntegrationLog resource's state with the given name, id, and optional extra properties used to qualify the lookup.
|
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_key_id: Optional[pulumi.Input[str]] = None,
api_key: Optional[pulumi.Input[str]] = None,
client_email: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
host_port: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
private_key_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_access_key: Optional[pulumi.Input[str]] = None,
sourcetype: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None) -> 'IntegrationLog':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IntegrationLogState.__new__(_IntegrationLogState)
__props__.__dict__["access_key_id"] = access_key_id
__props__.__dict__["api_key"] = api_key
__props__.__dict__["client_email"] = client_email
__props__.__dict__["credentials"] = credentials
__props__.__dict__["host"] = host
__props__.__dict__["host_port"] = host_port
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["name"] = name
__props__.__dict__["private_key"] = private_key
__props__.__dict__["private_key_id"] = private_key_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["region"] = region
__props__.__dict__["secret_access_key"] = secret_access_key
__props__.__dict__["sourcetype"] = sourcetype
__props__.__dict__["tags"] = tags
__props__.__dict__["token"] = token
__props__.__dict__["url"] = url
return IntegrationLog(resource_name, opts=opts, __props__=__props__)
|
[
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ResourceSpecificLogging':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ResourceSpecificLoggingArgs.__new__(ResourceSpecificLoggingArgs)\n\n __props__.__dict__[\"log_level\"] = None\n __props__.__dict__[\"target_id\"] = None\n __props__.__dict__[\"target_name\"] = None\n __props__.__dict__[\"target_type\"] = None\n return ResourceSpecificLogging(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n destination_configuration: Optional[pulumi.Input[pulumi.InputType['LoggingConfigurationDestinationConfigurationArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'LoggingConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _LoggingConfigurationState.__new__(_LoggingConfigurationState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"destination_configuration\"] = destination_configuration\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return LoggingConfiguration(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n desired_state: Optional[pulumi.Input[str]] = None,\n properties: Optional[pulumi.Input[str]] = None,\n role_arn: Optional[pulumi.Input[str]] = None,\n schema: Optional[pulumi.Input[str]] = None,\n type_name: Optional[pulumi.Input[str]] = None,\n type_version_id: Optional[pulumi.Input[str]] = None) -> 'Resource':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ResourceState.__new__(_ResourceState)\n\n __props__.__dict__[\"desired_state\"] = desired_state\n __props__.__dict__[\"properties\"] = properties\n __props__.__dict__[\"role_arn\"] = role_arn\n __props__.__dict__[\"schema\"] = schema\n __props__.__dict__[\"type_name\"] = type_name\n __props__.__dict__[\"type_version_id\"] = type_version_id\n return Resource(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[str]] = None,\n services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'IntegrationLogCollection':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _IntegrationLogCollectionState.__new__(_IntegrationLogCollectionState)\n\n __props__.__dict__[\"account_id\"] = account_id\n __props__.__dict__[\"services\"] = services\n return IntegrationLogCollection(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n groups_includeds: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None) -> 'Signon':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SignonState.__new__(_SignonState)\n\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"groups_includeds\"] = groups_includeds\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"priority\"] = priority\n __props__.__dict__[\"status\"] = status\n return Signon(resource_name, opts=opts, __props__=__props__)",
"def getState(id):",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[int]] = None,\n enable_screenshot_on_failure_and_script: Optional[pulumi.Input[bool]] = None,\n guid: Optional[pulumi.Input[str]] = None,\n location_privates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StepMonitorLocationPrivateArgs']]]]] = None,\n locations_publics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n period_in_minutes: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n steps: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StepMonitorStepArgs']]]]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StepMonitorTagArgs']]]]] = None) -> 'StepMonitor':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StepMonitorState.__new__(_StepMonitorState)\n\n __props__.__dict__[\"account_id\"] = account_id\n __props__.__dict__[\"enable_screenshot_on_failure_and_script\"] = enable_screenshot_on_failure_and_script\n __props__.__dict__[\"guid\"] = guid\n __props__.__dict__[\"location_privates\"] = location_privates\n __props__.__dict__[\"locations_publics\"] = locations_publics\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"period_in_minutes\"] = period_in_minutes\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"steps\"] = steps\n __props__.__dict__[\"tags\"] = tags\n return StepMonitor(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Stack':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = StackArgs.__new__(StackArgs)\n\n __props__.__dict__[\"notification_arns\"] = None\n __props__.__dict__[\"parameters\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"template_url\"] = None\n __props__.__dict__[\"timeout_in_minutes\"] = None\n return Stack(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n enable_media_metric_logs: Optional[pulumi.Input[bool]] = None,\n enable_sip_logs: Optional[pulumi.Input[bool]] = None,\n voice_connector_id: Optional[pulumi.Input[str]] = None) -> 'VoiceConnectorLogging':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VoiceConnectorLoggingState.__new__(_VoiceConnectorLoggingState)\n\n __props__.__dict__[\"enable_media_metric_logs\"] = enable_media_metric_logs\n __props__.__dict__[\"enable_sip_logs\"] = enable_sip_logs\n __props__.__dict__[\"voice_connector_id\"] = voice_connector_id\n return VoiceConnectorLogging(resource_name, opts=opts, __props__=__props__)",
"def pull_event_log(id):\n # Reschedule the deletion time of the event-log\n # __store_delete_time(id)\n\n event_log = event_store[id]\n\n if event_log is None:\n raise EventLogNotFoundError\n\n return event_log",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n record_id: Optional[pulumi.Input[str]] = None,\n resource_name_: Optional[pulumi.Input[str]] = None,\n tag: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None) -> 'ResourceRecord':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ResourceRecordState.__new__(_ResourceRecordState)\n\n __props__.__dict__[\"record_id\"] = record_id\n __props__.__dict__[\"resource_name\"] = resource_name_\n __props__.__dict__[\"tag\"] = tag\n __props__.__dict__[\"value\"] = value\n return ResourceRecord(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n force_delete: Optional[pulumi.Input[bool]] = None,\n groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n monitor_ids: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n query: Optional[pulumi.Input[pulumi.InputType['ServiceLevelObjectiveQueryArgs']]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n target_threshold: Optional[pulumi.Input[float]] = None,\n thresholds: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLevelObjectiveThresholdArgs']]]]] = None,\n timeframe: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n validate: Optional[pulumi.Input[bool]] = None,\n warning_threshold: Optional[pulumi.Input[float]] = None) -> 'ServiceLevelObjective':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceLevelObjectiveState.__new__(_ServiceLevelObjectiveState)\n\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"force_delete\"] = force_delete\n __props__.__dict__[\"groups\"] = groups\n __props__.__dict__[\"monitor_ids\"] = monitor_ids\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"query\"] = query\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"target_threshold\"] = target_threshold\n __props__.__dict__[\"thresholds\"] = thresholds\n __props__.__dict__[\"timeframe\"] = timeframe\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"validate\"] = validate\n __props__.__dict__[\"warning_threshold\"] = warning_threshold\n return ServiceLevelObjective(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n basic_sli: Optional[pulumi.Input[pulumi.InputType['SloBasicSliArgs']]] = None,\n calendar_period: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n goal: Optional[pulumi.Input[float]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n request_based_sli: Optional[pulumi.Input[pulumi.InputType['SloRequestBasedSliArgs']]] = None,\n rolling_period_days: Optional[pulumi.Input[int]] = None,\n service: Optional[pulumi.Input[str]] = None,\n slo_id: Optional[pulumi.Input[str]] = None,\n user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n windows_based_sli: Optional[pulumi.Input[pulumi.InputType['SloWindowsBasedSliArgs']]] = None) -> 'Slo':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SloState.__new__(_SloState)\n\n __props__.__dict__[\"basic_sli\"] = basic_sli\n __props__.__dict__[\"calendar_period\"] = calendar_period\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"goal\"] = goal\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"request_based_sli\"] = request_based_sli\n __props__.__dict__[\"rolling_period_days\"] = rolling_period_days\n __props__.__dict__[\"service\"] = service\n __props__.__dict__[\"slo_id\"] = slo_id\n __props__.__dict__[\"user_labels\"] = user_labels\n __props__.__dict__[\"windows_based_sli\"] = windows_based_sli\n return Slo(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n access_point_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n health_check_source_ip: Optional[pulumi.Input[str]] = None,\n health_check_target_ip: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n opposite_access_point_id: Optional[pulumi.Input[str]] = None,\n opposite_interface_id: Optional[pulumi.Input[str]] = None,\n opposite_interface_owner_id: Optional[pulumi.Input[str]] = None,\n opposite_region: Optional[pulumi.Input[str]] = None,\n opposite_router_id: Optional[pulumi.Input[str]] = None,\n opposite_router_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n role: Optional[pulumi.Input[str]] = None,\n router_id: Optional[pulumi.Input[str]] = None,\n router_type: Optional[pulumi.Input[str]] = None,\n specification: Optional[pulumi.Input[str]] = None) -> 'RouterInterface':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RouterInterfaceState.__new__(_RouterInterfaceState)\n\n __props__.__dict__[\"access_point_id\"] = access_point_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"health_check_source_ip\"] = health_check_source_ip\n __props__.__dict__[\"health_check_target_ip\"] = health_check_target_ip\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"opposite_access_point_id\"] = opposite_access_point_id\n __props__.__dict__[\"opposite_interface_id\"] = opposite_interface_id\n __props__.__dict__[\"opposite_interface_owner_id\"] = opposite_interface_owner_id\n __props__.__dict__[\"opposite_region\"] = opposite_region\n __props__.__dict__[\"opposite_router_id\"] = opposite_router_id\n __props__.__dict__[\"opposite_router_type\"] = opposite_router_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"role\"] = role\n __props__.__dict__[\"router_id\"] = router_id\n __props__.__dict__[\"router_type\"] = router_type\n __props__.__dict__[\"specification\"] = specification\n return RouterInterface(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n azuread_based_service_principals: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LedgerAzureadBasedServicePrincipalArgs']]]]] = None,\n certificate_based_security_principals: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LedgerCertificateBasedSecurityPrincipalArgs']]]]] = None,\n identity_service_endpoint: Optional[pulumi.Input[str]] = None,\n ledger_endpoint: Optional[pulumi.Input[str]] = None,\n ledger_type: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Ledger':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _LedgerState.__new__(_LedgerState)\n\n __props__.__dict__[\"azuread_based_service_principals\"] = azuread_based_service_principals\n __props__.__dict__[\"certificate_based_security_principals\"] = certificate_based_security_principals\n __props__.__dict__[\"identity_service_endpoint\"] = identity_service_endpoint\n __props__.__dict__[\"ledger_endpoint\"] = ledger_endpoint\n __props__.__dict__[\"ledger_type\"] = ledger_type\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"tags\"] = tags\n return Ledger(resource_name, opts=opts, __props__=__props__)",
"def get_integration(\n self,\n integration_id: str,\n ) -> Integration:\n try:\n return self._integration_instances[integration_id]\n except KeyError:\n raise IntegrationNotRegisteredError(integration_id)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n exclusion_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityMonitoringFilterExclusionFilterArgs']]]]] = None,\n filtered_data_type: Optional[pulumi.Input[str]] = None,\n is_enabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n query: Optional[pulumi.Input[str]] = None,\n version: Optional[pulumi.Input[int]] = None) -> 'SecurityMonitoringFilter':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SecurityMonitoringFilterState.__new__(_SecurityMonitoringFilterState)\n\n __props__.__dict__[\"exclusion_filters\"] = exclusion_filters\n __props__.__dict__[\"filtered_data_type\"] = filtered_data_type\n __props__.__dict__[\"is_enabled\"] = is_enabled\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"query\"] = query\n __props__.__dict__[\"version\"] = version\n return SecurityMonitoringFilter(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n base_capacity: Optional[pulumi.Input[int]] = None,\n config_parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkgroupConfigParameterArgs']]]]] = None,\n endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkgroupEndpointArgs']]]]] = None,\n enhanced_vpc_routing: Optional[pulumi.Input[bool]] = None,\n namespace_name: Optional[pulumi.Input[str]] = None,\n publicly_accessible: Optional[pulumi.Input[bool]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n workgroup_id: Optional[pulumi.Input[str]] = None,\n workgroup_name: Optional[pulumi.Input[str]] = None) -> 'Workgroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkgroupState.__new__(_WorkgroupState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"base_capacity\"] = base_capacity\n __props__.__dict__[\"config_parameters\"] = config_parameters\n __props__.__dict__[\"endpoints\"] = endpoints\n __props__.__dict__[\"enhanced_vpc_routing\"] = enhanced_vpc_routing\n __props__.__dict__[\"namespace_name\"] = namespace_name\n __props__.__dict__[\"publicly_accessible\"] = publicly_accessible\n __props__.__dict__[\"security_group_ids\"] = security_group_ids\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"workgroup_id\"] = workgroup_id\n __props__.__dict__[\"workgroup_name\"] = workgroup_name\n return Workgroup(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'DebugSession':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = DebugSessionArgs.__new__(DebugSessionArgs)\n\n __props__.__dict__[\"api_id\"] = None\n __props__.__dict__[\"count\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"environment_id\"] = None\n __props__.__dict__[\"filter\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"organization_id\"] = None\n __props__.__dict__[\"revision_id\"] = None\n __props__.__dict__[\"timeout\"] = None\n __props__.__dict__[\"tracesize\"] = None\n __props__.__dict__[\"validity\"] = None\n return DebugSession(resource_name, opts=opts, __props__=__props__)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The client email registered for the integration service.
|
def client_email(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_email")
|
[
"def client_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_email\")",
"def get_email(self):\r\n return self.email",
"def notification_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_email\")",
"def thread_email(self):\n return self._thread_email",
"def provider_email(self) -> str:\n return pulumi.get(self, \"provider_email\")",
"def email(self):\n return '{}.{}@email.com'.format(self.first, self.last)",
"def notification_email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"notification_email\")",
"def master_account_email(self) -> str:\n return pulumi.get(self, \"master_account_email\")",
"def email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email_address\")",
"def email_sender(self) -> str:\n return self._email_sender",
"def google_group_email(self) -> str:\n return pulumi.get(self, \"google_group_email\")",
"def sender_email(self) -> str:\n return str(self._email.SenderEmailAddress)",
"def getMail(self):\n return self._mail",
"def customer_email(obj):\n return str(obj.customer.subscriber.email)",
"def email_receiver(self) -> str:\n return self._email_receiver",
"def project_email(report, project):\n return [project.researcher.email]",
"def mail_address(self):\n return self.project_name + self.base_mail_address",
"def get_client_id(self):\n return self.__client_id",
"def x_mailer(self) -> str:\n return self._x_mailer",
"def get_default_email_address(self):\n return self.teams[\"Default\"].email"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns a storm configuration given its name, assuming it exists.
|
def get_config(self, storm_name: str) -> Dict:
q = {"name": storm_name}
cols = {"config": 1}
r = list(self._storms.find(q, cols))
if len(r) == 0:
raise KeyError(f"{storm_name} not found, no configuration to load.")
else:
return r[0]["config"]
|
[
"def get_conf(self, name='global'):\n return self.cluster_configuration_manager.get_object(name)",
"def get(name):\n value = Configuration.settings.get(name, None)\n\n if value is None:\n raise ConfigurationNotFound(name)\n\n return value",
"def get(self, name: str, default=None):\n if name in self.__config:\n return self.__config[name]\n if '.' in name:\n names = name.split('.')\n cur = self.__config\n for name in names:\n if type(cur) is dict and name in cur:\n cur = cur[name]\n else:\n return default\n return cur\n return default",
"def get_config(name: str):\n # 1. Check environment variables\n env_name = name.replace(\"_\", \"__\").replace(\".\", \"_\").upper()\n env_val = os.getenv(\"IOT_\" + env_name)\n if env_val:\n if \";\" in env_val:\n return [v.strip() for v in env_val.split(\";\")]\n return env_val\n\n # 2. Check config file\n keys = name.split(\".\")\n val = _CONFIG_YAML\n for k in keys:\n if isinstance(val, dict):\n val = val.get(k, {})\n\n if val:\n return val\n raise ValueError(f'\"{name} not found')",
"def read_config(config_name: str) -> Optional[str]:\n config_path = os.path.join(CONFIG_DIR, '%s.conf' % config_name)\n if os.path.exists(config_path):\n with open(config_path, 'rt') as config_handle:\n config = config_handle.read()\n return config\n return None",
"def _get_project_config(self, name):\n self._load_project_config()\n\n name = to_wiki_format(self.site, name)\n if name not in self._project_config[\"projects\"]:\n return None\n\n config = self._project_config[\"defaults\"].copy()\n config.update(self._project_config[\"projects\"][name])\n return config",
"def get_conf(self, comp, conf_name):\r\n for cfg in comp.configuration_sets[0].configuration_data:\r\n if cfg.name == conf_name:\r\n return cfg.data\r\n return None",
"def getConfigItem(self, name, ses=None):\n if not self._config.has_key(ses):\n return None\n\n if not self._config[ses].has_key(name):\n return None\n\n return self._config[ses][name]",
"def get(self, name):\n response = self._session.get(\n path=self._session.urljoin(self.RESOURCE_PATH, name).format(base_api=self.base_api),\n headers={'Accept': self._accept_header()}\n )\n\n etag = response.headers['ETag']\n return TemplateConfig(session=self._session, data=response.json(), etag=etag)",
"def get_city_config(city_name):\n settings = [c for c in DEFAULT_CITIES if c.name == city_name]\n assert len(settings) == 1, 'Error in recognising city'\n return settings[0]",
"def get_config(config_name):\n _load_config_file()\n\n return json.loads(redis_conn.get(config_key + config_name))",
"def get_launch_config(config, name):\n if name == \"(Defaults)\":\n return config.defaults\n\n for launch_config in config.launches:\n if launch_config.confname == name:\n return launch_config\n\n raise UserError(\"No LaunchConfig named %s\" % name)",
"def load_config(tube_name):\n config_path = os.path.join(settings.TUBES_ROOT, tube_name, \"config.yml\")\n if not os.path.isfile(config_path):\n raise ValueError(\"Tube named '%s' doesn't exist.\" % tube_name)\n return parse_config(config_path)",
"def find_station(self, name):\n for st in self._stations:\n if st.is_named(name):\n return st\n return None",
"def station_by_name(self, name):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if name == _[\"properties\"][\"name\"]]\n log.debug(\"searching for station {} found {}\".format(name, station))\n return station[0]\n except:\n log.debug(\"Exception: searching for station {} found None\".format(name))\n return None",
"def get_mysql_config(name=None, index=None):\n if not name and not index:\n return MYSQL_SERVERS[0].client_config.copy()\n\n if name:\n for server in MYSQL_SERVERS:\n if server.name == name:\n return server.client_config.copy()\n elif index:\n return MYSQL_SERVERS[index].client_config.copy()\n\n return None",
"def load(file_name):\n cfg_path = join(Config.get_config_folder(), file_name)\n if not exists(cfg_path):\n copy(Config._get_default(file_name), cfg_path)\n Logger.info(\"Config: Loading config from %s\", cfg_path)\n with open(cfg_path) as f:\n return load(f)",
"def get_runs_by_storm(self, storm_name: str) -> List[Dict]:\n\n q = {\"storm_name\": storm_name}\n cols = {\"config\": 0}\n r = list(self._runs.find(q, cols))\n\n if len(r) == 0:\n return None\n else:\n return r",
"def cfgget(self, name, default = NOPARAM):\n try:\n return self.params[name]\n except KeyError:\n pass\n if default != NOPARAM: return default\n try:\n return default_params[name]\n except KeyError:\n pass\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns the run_record from last storm run under a given name
|
def get_last_run(self, storm_name: str) -> Dict:
q = {"storm_name": storm_name}
cols = {"_id": 0}
r = list(self._runs.find(q, cols))
if len(r) == 0:
return None
elif len(r) > 0:
max_run_idx = np.argmax(
np.array([dt.datetime.strptime(x["run_date"], "%Y-%m-%d") for x in r])
)
return r[max_run_idx]
|
[
"def get_last_run(runfile):\n runfile = open(runfile, 'r')\n return int(runfile.readlines()[-1])",
"def get_last_run(self, rule_id):\n\n s = RuleRun.search()\n s = s.filter('term', rule_id=rule_id).sort('-timestamp')\n s = s[:1]\n response = s.execute()\n if response.hits.total > 0:\n return response[0]\n else:\n return None",
"def get_last_population(log_name):\n return get_last_line(get_log_folder(log_name) + '/population_log.txt')",
"def get_last_snapshot(self):\n name = self.snapshot_names[-1]\n return self.get_snapshot(name)",
"def find_or_create_test_run(self, run_name):\n runs = []\n for r in self.plan.entries:\n runs += r.runs\n\n for r in runs:\n if r.name == run_name:\n run = r\n print 'Run {} is found'.format(run_name)\n break\n else:\n entry = {\n 'name': run_name,\n 'suite_id': self.suite.id,\n 'include_all': False,\n 'case_ids': [_.id for _ in self.cases],\n 'project_id': self.project.id,\n 'milestone_id': self.milestone.id,\n 'plan_id': self.plan.id\n }\n run = self.plan.api.add_plan_entry(entry)\n run = self.plan.api.get_run(run['id'])\n print 'Run {} is created'.format(run_name)\n return run",
"def get_runs_by_storm(self, storm_name: str) -> List[Dict]:\n\n q = {\"storm_name\": storm_name}\n cols = {\"config\": 0}\n r = list(self._runs.find(q, cols))\n\n if len(r) == 0:\n return None\n else:\n return r",
"def last_run_at(self) -> str:\n return pulumi.get(self, \"last_run_at\")",
"def last_run(self) -> int:\n past_runs = os.listdir(self.run_dir)\n if len(past_runs) == 0:\n return 0\n run_ids = [int(r.split('/')[-1:][0]) for r in past_runs]\n last_run = max(run_ids)\n path = os.path.join(self.run_dir, str(last_run))\n if not os.listdir(path):\n last_run = last_run - 1\n return last_run",
"def lookup_ds(self, run):\n for ds in self.ds_run_table:\n runlist = self.ds_run_table[ds]\n if len(runlist) == 1 and run == runlist[0]:\n return ds\n elif len(runlist) > 1 and runlist[0] <= run <= runlist[-1]:\n return ds\n \n # if we get to here, we haven't found the run\n print(\"Error, couldn't find a ds for run {run}.\")\n exit()",
"def read_last_event():\n db = get_db()\n\n row = db.execute(\"\"\"event_type, max(event_timestamp), gps_coord FROM events\"\"\").fetchall()\n\n return row",
"def get_trace(file_path, run_name):\n parsed_file = []\n file_path = file_path + \"/out/nextflow_reports\"\n fn = os.path.join(file_path, run_name + \"_execution_trace.txt\")\n\n if pathlib.Path(fn).exists(): \n\n fh = open(fn, 'r')\n\n for line in fh:\n record = line.strip().split(\"\\t\")\n\n if record[0] == \"task_id\":\n parsed_file.append(record)\n continue\n\n record[1] = record[1].split(\":\")[-1].replace(\"__\",\"-\")\n record[3] = record[3][0] \n\n parsed_file.append(record) \n\n return parsed_file\n\n else:\n return None",
"def find_the_last_use(self, model_name, callback):\n print (\"5. me me\")\n history = self.get_collection('_history')\n history.find_one({'document_model': model_name},\n sort=[('_id', DESCENDING)],\n fields=['document_model', '_id'],\n callback=callback)",
"def get_last_update(name: str) -> float:\n global _feeds\n return _feeds[name]['last_update']",
"def last_run_time(self) -> str:\n return pulumi.get(self, \"last_run_time\")",
"def last_executor(self):\n if len(self.proto_wo_data.routes) > 0:\n return self.proto_wo_data.routes[-1].executor",
"def get_workflow_runs_by_name(self, workflow_name):\n variables = {\n 'name': workflow_name\n }\n\n return self.query(\"\"\"\n query workflowRunsByNameQuery($name: String!) {\n workflowRunsByName(name: $name) {\n id\n name\n createdBy {\n id\n firstName\n lastName\n email\n }\n deleted\n deletedAt\n updatedAt\n createdAt\n }\n }\n \"\"\",\n variables=variables\n )",
"def last(self) -> MispEvent:\n return self.list(limit=1, direction='desc')[0]",
"def __get_last_task_finish__(self):\n last_task = self.employee.execution_set.filter(exec_status__in=[self.Done, self.OnCorrection, self.OnChecking],\n task__exec_status__in=[self.InProgress, self.Done, self.Sent],\n subtask__add_to_schedule=True\n ) \\\n .order_by('actual_finish').last()\n if last_task and last_task.actual_finish:\n return last_task.actual_finish\n return datetime.now().replace(hour=9,minute=0,second=0,microsecond=0)",
"async def get_last_games(self):\n now = datetime.utcnow()\n sql = (\"SELECT event_id, MAX(end_time) as end_time \" \n \"FROM rcs_events \"\n \"WHERE event_type_id = 1 AND end_time < $1 \"\n \"GROUP BY event_id \"\n \"ORDER BY end_time DESC \"\n \"LIMIT 1\")\n row = await self.bot.pool.fetchrow(sql, now)\n return row['event_id'], row['end_time']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will Return all run records for a storm (and all fields)
|
def get_runs_by_storm(self, storm_name: str) -> List[Dict]:
q = {"storm_name": storm_name}
cols = {"config": 0}
r = list(self._runs.find(q, cols))
if len(r) == 0:
return None
else:
return r
|
[
"def get_runs(self):\n try:\n return self.__dict__['runs']\n except KeyError:\n json = self._connection._make_request('routes/%s/runs/' % self.id)\n obj_list = [BusRun(\n j[\"id\"],\n j['display_name'],\n j['direction_name'],\n self,\n self._connection,\n ) for j in json.get(\"items\")]\n self.__dict__['runs'] = obj_list\n return obj_list",
"def list_runs(self):\n res = self.api_client.ListRuns()\n return res.response().result",
"def get_runs(self, conditions: List[str] = []) -> List[Run]:\n condition_str = ''\n if len(conditions) >= 1:\n condition_str = 'AND ' + ' AND '.join(conditions)\n query = f\"\"\"\n SELECT timestamp,\n dataset_name,\n preprocessing_cfg,\n model_type,\n model_hyperparameters,\n metric,\n score\n FROM {self.runs_table}, {self.scores_table}\n WHERE {self.runs_table}.run_id = {self.scores_table}.run_id\n {condition_str};\n \"\"\"\n results = self.engine.execute(query)\n runs = []\n for row in results:\n runs.append(row_to_run(row))\n return runs",
"def runsByState(state):\n print '*** Print runs in state: '+str(state)\n params = {}\n stmt = \"\"\"SELECT \n runid, state, todo, done, checked, archived, \n partitionid, partitionname, activity, \n runtype, params, starttime\n FROM %s\"\"\"%(configuration.mondb_table,)\n if state:\n stmt = stmt + \" WHERE state=:state\"\n params = {'state': state }\n cursor = utilities.mondb().execute(stmt, params)\n runs = cursor.fetchall()\n cursor.close()\n utilities.printRuns(runs)",
"def load_all_runs(self) -> List[RunResult]:\n results = []\n with open(self.store_location, mode='rb') as f:\n while True:\n try:\n r = self.serializer.load(f)\n results.append(r)\n except EOFError:\n break\n\n return results",
"def load_all_runs(self) -> Sequence[RunResult]:",
"def getRunList(minRun):\n runlist = []\n\n #FULLADDRESS=\"http://pccmsdqm04.cern.ch/runregistry_api/\" \n #FULLADDRESS=\"http://pccmsdqm04.cern.ch/runregistry/xmlrpc\"\n FULLADDRESS=\"http://cms-service-runregistry-api.web.cern.ch/cms-service-runregistry-api/xmlrpc\"\n\n print \"RunRegistry from: \",FULLADDRESS\n server = xmlrpclib.ServerProxy(FULLADDRESS)\n # you can use this for single run query\n# sel_runtable=\"{runNumber} = \"+run+\" and {datasetName} LIKE '%Express%'\"\n #sel_runtable=\"{groupName} ='Collisions11' and {runNumber} >= \" + str(minRun) + \" and {datasetName} LIKE '%Express%'\"\n sel_runtable=\"{groupName} ='Collisions11' and {runNumber} >= \" + str(minRun) + \" and {datasetName} LIKE '%Online%'\"\n\n #sel_runtable=\"{groupName} ='Commissioning11' and {runNumber} >= \" + str(minRun)# + \" and {datasetName} LIKE '%Express%'\"\n\n run_data = server.DataExporter.export('RUN', 'GLOBAL', 'csv_runs', sel_runtable)\n for line in run_data.split(\"\\n\"):\n #print line\n run=line.split(',')[0]\n if \"RUN_NUMBER\" in run or run == \"\":\n continue\n #print \"RUN: \" + run\n runlist.append(int(run))\n return runlist",
"def list_records(self):\n return self.connection.list_records(zone=self)",
"def get_models_run(self):\n\n modelsRun = get_models_run_list(self.controller.log.log)\n \n return modelsRun",
"def get_storm_page_data(region_code, year, storm_id):\n url = ind_storm_url.format(region_code, year, storm_id)\n soup = get_soup(url)\n path_tbl = soup.find(id=\"stormList\").find('tbody')\n\n col_idxs = ['Date', 'Time', 'Lat', 'Lon', 'Wind', 'Pressure', 'Storm Type']\n ret = list()\n for row in path_tbl.find_all('tr'):\n cols = row.find_all('td')\n if cols is None or len(cols) == 0:\n continue\n\n t_data = dict()\n for i_col in range(len(col_idxs)):\n col = cols[i_col]\n t_data[col_idxs[i_col]] = col.text.strip()\n\n ret.append(t_data)\n\n return ret",
"def get_all_scheduled_recordings(self):\r\n\r\n self.mediasite.recorder.gather_recorders()\r\n\r\n recorders = self.mediasite.model.get_recorders()\r\n\r\n #initialize our return dictionary\r\n recorder_recordings = []\r\n\r\n #loop for each recorder in recorders listing\r\n for recorder in recorders:\r\n\r\n #gather scheduled recordings by recorder\r\n scheduled_recordings = self.mediasite.recorder.gather_recorder_scheduled_recordings(recorder[\"id\"])\r\n\r\n #initialize schedule id, name, and recorder_recordings list\r\n schedule_id = \"\"\r\n schedule_name = \"\"\r\n \r\n #loop for each recording in scheduled_recordings\r\n for recording in scheduled_recordings[\"value\"]:\r\n \r\n #determine if we already have the schedule_id and name, if not, gathering it.\r\n if schedule_id != recording[\"ScheduleId\"]:\r\n schedule_id = recording[\"ScheduleId\"]\r\n schedule_result = self.mediasite.schedule.get_schedule(schedule_id)\r\n schedule_name = schedule_result[\"Name\"]\r\n\r\n #create dictionary containing the scheduled recording's information\r\n recording_dict = {\"title\":schedule_name,\r\n \"location\":recorder[\"name\"],\r\n \"cancelled\":recording[\"IsExcluded\"],\r\n \"id\":schedule_id,\r\n \"start\":recording[\"StartTime\"] + \"Z\",\r\n \"end\":recording[\"EndTime\"] + \"Z\",\r\n \"duration\":recording[\"DurationInMinutes\"]\r\n }\r\n\r\n #add the scheduled recording information to list of other recordings for this recorder\r\n recorder_recordings.append(recording_dict)\r\n\r\n return recorder_recordings",
"def listRuns(self, minrun=-1, maxrun=-1, logical_file_name=\"\", block_name=\"\", dataset=\"\"):\n\ttry:\n\t\tconn = self.dbi.connection()\n\t\ttran=False\n\t\tret=self.runlist.execute(conn, minrun, maxrun, logical_file_name, block_name,\n\t\tdataset, tran)\n\t\treturn ret\n\n\texcept Exception, ex:\n\t\traise ex\n\t\t\n\tfinally:\n\t\tconn.close()",
"def get_warzone_service_records(self, players):\n return self._get_service_records(mode=\"warzone\", players=players)",
"def get_runs_for_flow(self, flow_id):\n run_data = self.client.get('runs', params={'flow': flow_id})\n runs = run_data['results']\n while run_data['next']:\n run_data = self.client.request('get', run_data['next'])\n runs = runs + run_data['results']\n return runs",
"def get_all_records(self):\n sql = 'SELECT * FROM %s' % (self.table)\n print(sql)\n return self.curs.execute(sql).fetchall()",
"def getAllRecords(self):\r\n session = wx.GetApp().session\r\n result = session.query(getattr(db, self.modelName)).all()\r\n return result",
"def listRunsWithNewData(dbConn):\n sqlQuery = \"\"\"SELECT RUN_ID, MAX(streamer.STREAMER_ID) FROM run INNER JOIN\n streamer USING (RUN_ID) WHERE streamer.STREAMER_ID >\n run.LAST_STREAMER AND run.RUN_STATUS != (SELECT ID FROM\n run_status WHERE STATUS = 'Active') GROUP BY RUN_ID\"\"\"\n\n dbConn.execute(sqlQuery)\n results = dbConn.fetchall()\n\n updatedRuns = []\n for result in results:\n updatedRun = {\"RUN_ID\": result[0], \"LAST_STREAMER\": result[1]}\n updatedRuns.append(updatedRun)\n\n return updatedRuns",
"def get_last_run(self, storm_name: str) -> Dict:\n q = {\"storm_name\": storm_name}\n cols = {\"_id\": 0}\n r = list(self._runs.find(q, cols))\n\n if len(r) == 0:\n return None\n elif len(r) > 0:\n max_run_idx = np.argmax(\n np.array([dt.datetime.strptime(x[\"run_date\"], \"%Y-%m-%d\") for x in r])\n )\n return r[max_run_idx]",
"def show_runs():\n # return render_template(\"runs.html\", runs=data.runs(), type=type)\n return render_template(\"runs.html\", runs=[], type=type)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds new run record (for use after storm run)
|
def write_run_record(self, run_record: Dict) -> None:
q = {}
self._runs.insert_one(run_record)
|
[
"def add(self, record):\n return self._append_record(record, 'additions')",
"def _add_model_run(\n posts: List[YamlDict],\n run_id: str,\n open_timestamp: dt,\n inputs: List[str],\n outputs: List[YamlDict],\n model_config: YamlDict,\n submission_script: YamlDict,\n code_repo: YamlDict,\n description: str,\n) -> None:\n code_run = _create_target_data_dict(\n DataRegistryTarget.code_run,\n {\n DataRegistryField.run_date: open_timestamp,\n DataRegistryField.run_identifier: run_id,\n DataRegistryField.code_repo: code_repo,\n DataRegistryField.model_config: model_config,\n DataRegistryField.submission_script: submission_script,\n DataRegistryField.inputs: inputs,\n DataRegistryField.outputs: outputs,\n DataRegistryField.description: description,\n },\n )\n logger.debug(f\"Created ModelRun: {code_run}\")\n posts.append(code_run)",
"def add_record(self, **kw):\n return self.connection.add_record(self, **kw)",
"def save_run(self, run: Run):\n # save run metadata\n query = f\"\"\"\n INSERT INTO {self.runs_table} (timestamp, dataset_name,\n preprocessing_cfg, model_type, model_hyperparameters)\n VALUES (\n '{run.timestamp}',\n '{run.dataset_name}',\n '{json.dumps(run.preprocessing_cfg)}',\n '{run.model_type}',\n '{json.dumps(run.model_hyperparameters)}'\n )\n RETURNING run_id;\n \"\"\"\n # returns the run_id for the next insert\n run_id = self.engine.execute(query).scalar()\n # save run results\n metric_rows = []\n for metric, score in run.metric_scores.items():\n metric_rows.append(f\"({run_id}, '{metric}', {score})\")\n value_rows = ', '.join(metric_rows)\n query = f\"\"\"\n INSERT INTO {self.scores_table} (run_id, metric, score)\n VALUES {value_rows};\n \"\"\"\n self.engine.execute(query)",
"def add_run(self, run: BoboRun) -> None:\n\n with self._lock:\n if run.id in self.runs:\n raise RuntimeError(\n \"Run ID {} already exists in handler for NFA {}.\".format(\n run.id, self.nfa.name))\n\n self.runs[run.id] = run\n run.subscribe(self)",
"def add_record(self, record):\r\n if self.ttl > 0:\r\n if record.ttl > self.ttl:\r\n record.ttl = self.ttl\r\n self.records.append((record,time.localtime()))",
"def add_record(self, task):\n connection = self.start_connection()\n cursor = connection.cursor()\n cursor.execute('INSERT INTO todo VALUES(?)', [task])\n self.commit_close_connection(connection)",
"def add_record(self, transaction):\n raise NotImplementedError(\"Please Implement this method\")",
"def add_to_database(self, run_data: list[int]):\n file = open(f\"{CURRENT_DIR}/{self.database_file}\", \"a\")\n file.write('\\n')\n file.write(\" \".join(str(item) for item in run_data))\n file.close()",
"def update_run_record(self, run_record: Dict) -> None:\n q = {\"_id\": run_record[\"_id\"]}\n self._runs.update_one(q, {\"$set\": run_record})",
"def complete_a_run(self, run_data: list[int] = None):\n if run_data is None:\n run_data = [0] + [stage.end_time for stage in self.stages]\n self.add_to_database(run_data)\n self.past_runs.append(PastRun(run_data))",
"def record(running_app, minimal_record):\n s = current_rdm_records.records_service\n draft = s.create(system_identity, minimal_record)\n return s.publish(system_identity, draft.id)",
"def addRuns(self, runs):\n numrunsadded = 0\n errmsg = \"\"\n for run in runs:\n run = int(run)\n if run in self._expRecordDict:\n self._runs.append(run)\n numrunsadded += 1\n else:\n errmsg += \"Run %d does not exist in IPTS %d (record file)\\n\" % (\n run, self._iptsNumber)\n # ENDFOR\n\n return numrunsadded, errmsg",
"def save_run(self, run_result: RunResult) -> None:",
"def add_run(self,runset,runme,con):\n if not isinstance(runme,Task) and not isinstance(runme,Test):\n raise TypeError(\n 'The runme argument to add_run must be a Test or Task '\n '(in produtil.testing.parsetree) or subclass. You '\n 'provided a %s %s'%(\n type(runme).__name__,elipses(repr(runme))))\n if runset is None:\n self.__runobjs[runme.name]=RunConPair(runme,con)\n return\n addme=RunConPair(runme,con)\n for xrunset in [ runset, '**all**' ]:\n self.__runsets[xrunset].add(addme)",
"def add_operation_run(\n self,\n operation_name,\n workflow_run_id,\n status,\n message=None\n ):\n variables = {\n 'operationRun': {\n 'operationName': operation_name,\n 'workflowRunId': workflow_run_id,\n 'status': status\n }\n }\n \n if message is not None:\n variables['operationRun']['message'] = message\n \n return self.query(\"\"\"\n mutation addOperationRunMutation($operationRun: AddOperationRunInput!) {\n addOperationRun(input: $operationRun) {\n operationRun {\n id\n operationName\n workflowRunId\n startDateTime\n endDateTime\n status\n message\n createdBy {\n id\n firstName\n lastName\n email\n }\n createdAt\n updatedAt\n }\n }\n }\n \"\"\",\n variables=variables\n )",
"def test_add_run(self):\n\n self.bds.add_run(self.config)\n self.assertEqual(type(self.bds.store), dict)\n for key, val in self.bds.store.items():\n self.assertEqual(type(key), RIDTConfig)\n self.assertEqual(type(val), DataStore)",
"def add_run_trigger(self, run_trigger):\n self._run_trigger = run_trigger",
"def save_run(self, run_result: RunResult) -> None:\n with open(self.store_location, mode='ab') as f:\n self.serializer.dump(run_result, f)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates existing run record
|
def update_run_record(self, run_record: Dict) -> None:
q = {"_id": run_record["_id"]}
self._runs.update_one(q, {"$set": run_record})
|
[
"def update(self):\n\n self.__check_update_ok()\n self.db.update_dataset_record(self.dataset_dict)",
"def write_run_record(self, run_record: Dict) -> None:\n\n q = {}\n self._runs.insert_one(run_record)",
"def update_student_records(self, students, test_runs):\n pass",
"def update(self, sql):",
"def update_operation_run(\n self,\n operation_run_id,\n start_date_time=None,\n end_date_time=None,\n status=None,\n message=None,\n deleted=None\n ):\n\n \n variables = {\n 'operationRun': {\n 'id': operation_run_id\n }\n }\n \n if start_date_time is not None:\n variables['operationRun']['startDateTime'] = start_date_time.isoformat()\n if end_date_time is not None:\n variables['operationRun']['endDateTime'] = end_date_time.isoformat()\n if status is not None:\n allowed_statuses = ['init', 'running', 'completed_success', 'completed_failure']\n assert status in allowed_statuses, f'Status must be one of: {\", \".join(allowed_statuses)}.'\n variables['operationRun']['status'] = status\n if message is not None:\n variables['operationRun']['message'] = message\n if deleted is not None:\n variables['operationRun']['deleted'] = deleted\n \n result = self.query(\"\"\"\n mutation updateOperationRunMutation($operationRun: UpdateOperationRunInput!) {\n updateOperationRun(input: $operationRun) {\n operationRun {\n id\n operationName\n workflowRunId\n startDateTime\n endDateTime\n status\n message\n updatedAt\n deletedAt\n createdBy {\n id\n firstName\n lastName\n email\n }\n }\n }\n }\n \"\"\",\n variables=variables\n )\n return result",
"def save_run(self, run: Run):\n # save run metadata\n query = f\"\"\"\n INSERT INTO {self.runs_table} (timestamp, dataset_name,\n preprocessing_cfg, model_type, model_hyperparameters)\n VALUES (\n '{run.timestamp}',\n '{run.dataset_name}',\n '{json.dumps(run.preprocessing_cfg)}',\n '{run.model_type}',\n '{json.dumps(run.model_hyperparameters)}'\n )\n RETURNING run_id;\n \"\"\"\n # returns the run_id for the next insert\n run_id = self.engine.execute(query).scalar()\n # save run results\n metric_rows = []\n for metric, score in run.metric_scores.items():\n metric_rows.append(f\"({run_id}, '{metric}', {score})\")\n value_rows = ', '.join(metric_rows)\n query = f\"\"\"\n INSERT INTO {self.scores_table} (run_id, metric, score)\n VALUES {value_rows};\n \"\"\"\n self.engine.execute(query)",
"def test_patch_with_reschedule(self):\n return_dts = timezone.now()\n Run.objects.update(enqueue_dts=timezone.now())\n response = self.patch(\n '/api/v1/run/1/',\n {\n 'return_dts': return_dts.isoformat(' '),\n 'return_success': True,\n }\n )\n\n self.assertEqual(202, response.status_code)\n self.assertEqual(2, Run.objects.filter(job_id=1).count())\n self.assertEqual(\n return_dts, Run.objects.filter(job_id=1)[0].return_dts)",
"def update(self):\n self._job = pyslurm.job().find_id(str(self.id))[0]",
"def update_sample_record(data):\n session = controller.connect_to_database()\n record = session.query(Sample).filter_by(id=data[\"id\"]).one()\n\n record = Sample()\n record.sample = data[\"sample\"]\n record.panel = data[\"panel\"]\n record.sample_taken = data[\"sample_taken\"]\n record.genotyping = data[\"genotyping\"]\n record.variant_calling = data[\"variant_calling\"]\n record.qc_status = data[\"qc_status\"]\n record.qc_report = data[\"qc_report\"]\n record.coverage = data[\"coverage\"]\n\n session.commit()\n session.close()",
"def test_patch_with_reschedule(self):\n Run.objects.update(enqueue_dts=timezone.now())\n response = self.patch(\n '/api/v1/run/1/',\n {\n 'return_dts': timezone.now().isoformat(' '),\n 'return_success': True,\n }\n )\n\n self.assertEqual(202, response.status_code)\n self.assertEqual(2, Job.objects.get(pk=1).run_set.count())\n self.assertEqual(1, Job.objects.get(pk=3).run_set.count())",
"def test_reschedule_with_started_run(self):\n job = Job.objects.get(pk=1)\n self.assertEqual(1, Run.objects.filter(job=job).count())\n\n run = Run.objects.get(pk=1)\n run.schedule_dts = timezone.now()\n run.enqueue_dts = timezone.now()\n run.start_dts = timezone.now()\n run.return_dts = timezone.now()\n run.save()\n\n Run.objects.create(\n job=job,\n schedule_dts=timezone.now(),\n enqueue_dts=timezone.now(),\n start_dts=timezone.now(),\n )\n\n self.assertEqual(2, Run.objects.filter(job=job).count())\n job.reschedule()\n self.assertEqual(2, Run.objects.filter(job=job).count())",
"def test_reschedule_with_run_scheduled(self):\n job = Job.objects.get(pk=1)\n self.assertEqual(1, Run.objects.filter(job=job).count())\n\n run = Run.objects.get(pk=1)\n run.schedule_dts = timezone.now()\n run.enqueue_dts = timezone.now()\n run.return_dts = timezone.now()\n run.save()\n\n Run.objects.create(\n job=job,\n schedule_dts=timezone.now()\n )\n\n self.assertEqual(2, Run.objects.filter(job=job).count())\n job.reschedule()\n self.assertEqual(2, Run.objects.filter(job=job).count())",
"def test_update(self, record):",
"def update(self, instance):\n assert isinstance(instance, SchedulerFreerunEntry)\n collection = self.ds.connection(COLLECTION_SCHEDULER_FREERUN_ENTRY)\n return collection.save(instance.document, safe=True)",
"def update_record(target_name, update_score):\n subject = session.query(Subject).filter(Subject.name == target_name).first()\n subject.score = update_score\n session.commit()",
"def test_successful_update(self):\n\n manager = SchedulerManager()\n manager.sync_with_database()",
"def update_line(test_type, project_id, doc_id, param, value):\n # Get the existing data line data\n doc_id = str(doc_id)\n data_line = DataLine.objects.get(\n test_type=test_type, project_id=project_id, doc_id=doc_id\n )\n data_dict = data_line.data\n\n # Define the new data\n data_new_element = {param: value}\n data_dict.update(data_new_element)\n\n # Update the database\n DataLine.objects.filter(pk=data_line.pk).update(data=data_dict)\n\n return data_line",
"def update(self, payload):\n try:\n try:\n sys_id = self.get_one()['sys_id']\n except KeyError:\n raise InvalidUsage('Attempted to update a non-existing record')\n except MultipleResults:\n raise NotImplementedError(\"Update of multiple records is not supported\")\n\n if not isinstance(payload, dict):\n raise InvalidUsage(\"Update payload must be of type dict\")\n\n response = self.session.put(self._get_url(self.table, sys_id), data=json.dumps(payload))\n return self._get_content(response) # @TODO - update to return first key (API breakage)",
"def update(self) -> None:\n self.run_id += 1\n time.sleep(1)\n self.create_threads()\n print(\"Updated.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a playlists full current record (excluding changelog)
|
def get_playlist_current_info(self, playlist_id: str) -> Dict:
q = {"_id": playlist_id}
cols = {"changelog": 0}
r = list(self._playlists.find(q, cols))
if len(r) == 0:
raise Exception(f"{playlist_id} not found.")
else:
return r[0]
|
[
"def get_playlist():\n return _playlist",
"def playlist(self):\n return self.video.playlist",
"def get_playlog(self, flag='c'):\n return self._saver._fetch_playlog(flag=flag)",
"def curr_playlist(self):\n return self.curr_playlist_name",
"def fetch_playlists_metadata():\n sp = get_client()\n\n from span.tasks.library import get_playlists_metadata\n\n playlist_metas = get_playlists_metadata(sp)\n\n # export data\n sys.stdout.write(jsonpickle.encode(playlist_metas))",
"def get_playlist_changelog(self, playlist_id: str) -> Dict:\n q = {\"_id\": playlist_id}\n cols = {\"changelog\": 1}\n r = list(self._playlists.find(q, cols))\n\n if len(r) == 0:\n raise Exception(f\"{playlist_id} not found.\")\n else:\n if \"changelog\" in r[0].keys():\n return r[0][\"changelog\"]\n else:\n raise Exception(\n f\"No changelog found for {playlist_id}, has it been collected more than once?\"\n )",
"def get_user_playlists():\n playlists_info = sp.current_user_playlists(limit=50, offset=0)['items']\n playlists = []\n for playlist in playlists_info:\n playlist_dict = {}\n playlist_dict['uri'] = playlist['uri']\n playlist_dict['name'] = playlist['name']\n playlists.append(playlist_dict)\n return playlists",
"def getPlaylists(self, user=None):\n pass",
"def get_playlist(self):\n # TODO - traverse the actual tree\n ordered_list = list(self._videos)\n ordered_list.sort()\n vid_data = list(map(lambda video: video.vid_data, ordered_list))\n vid_data.reverse()\n return vid_data",
"def playlists_html(self):\n if self._playlists_html:\n return self._playlists_html\n else:\n self._playlists_html = request.get(self.playlists_url)\n return self._playlists_html",
"def get_currently_playing():\n currently_playing = {}\n currently_playing_json = sp.current_user_playing_track()\n if currently_playing_json is None:\n currently_playing['song'] = '>>>'\n currently_playing['artist'] = '<<<'\n return currently_playing\n else:\n currently_playing['song'] = currently_playing_json['item']['name']\n currently_playing['artist'] = currently_playing_json['item']['artists'][0]['name']\n return currently_playing",
"def get_embed(self) -> Embed:\n voice_client: VoiceClient = self.guild.voice_client\n message = self.playlist.print()\n status = \"Stoped\"\n if voice_client:\n if voice_client.is_playing():\n status = 'Playing'\n elif voice_client.is_paused():\n status = 'Paused'\n\n try:\n current = self.playlist.get_current()\n embed_data = {\n \"title\": f\"Playlist - {status}\",\n \"description\": message,\n \"thumbnail\": {\n \"url\": current.thumbnail\n },\n }\n except EmptyPlaylistException:\n embed_data = {\n \"title\": f\"Playlist - {status}\",\n \"description\": message,\n }\n\n return Embed.from_dict(embed_data)",
"def song(self):\n cur = self.con.cursor()\n cur.execute('select songs.titel, auteurs.naam, makers.naam, songs.datering, '\n 'songs.datumtekst, songs.url, songs.commentaar from songs '\n 'left outer join auteurs on auteurs.id == songs.tekst '\n 'left outer join makers on makers.id == songs.muziek')\n return [row for row in cur]",
"def getPlaylists(playlistPage):\r\n sp = getSP()\r\n playlists = playlistPage[\"items\"]\r\n while playlistPage[\"next\"]:\r\n playlistPage = sp.next(playlistPage)\r\n for playlist in playlistPage[\"items\"]:\r\n playlists.append(playlist)\r\n return playlists",
"async def list(self, ctx):\n if self._playlist.qsize() > 0 or self._current_song:\n await self.bot.command_channel.send(\"Songs in the playlist:\")\n if self._current_song is not None:\n await self.bot.command_channel.send(f\"{self._current_song.title}\")\n for song in self._playlist:\n await self.bot.command_channel.send(f\"{song.title}\")\n else:\n await self.bot.command_channel.send(\"No song in the playlist.\")",
"def get_playlist_data(url):\n sourceCode = requests.get(url).content\n sourceCode = sourceCode.decode('utf-8')\n print(\"url: \", url)\n strat_idx = sourceCode.index('{\"responseContext\"')\n # end_idx = sourceCode.index(';</script><link rel=\"canonical')\n ori_texts = sourceCode[strat_idx:]\n playlist = []\n while True:\n if ori_texts.find('playlistPanelVideoRenderer') == -1:\n break\n texts = ori_texts[ori_texts.index('playlistPanelVideoRenderer'):]\n texts.index('webPageType')\n texts = texts[texts.index('{\"webCommandMetadata\":{\"url\":\"/watch'):]\n texts = texts[texts.index('/watch?'):texts.index('\",\"webPageType')]\n playlist.append(texts)\n ori_texts = ori_texts[ori_texts.index('playlistPanelVideoRenderer')+10:]\n return playlist",
"def fetch_playlist(id: str):\n sp = get_client()\n\n from span.tasks.library import get_playlist_from_id\n\n playlist = get_playlist_from_id(sp, id)\n\n # export data\n sys.stdout.write(jsonpickle.encode(playlist))",
"def get_playlist_json():\n return json.dumps(get_playlist())",
"def get_playlists(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('playlists', search, start,\r\n max_items)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a playlists changelog, a dictionary where each entry is a date.
|
def get_playlist_changelog(self, playlist_id: str) -> Dict:
q = {"_id": playlist_id}
cols = {"changelog": 1}
r = list(self._playlists.find(q, cols))
if len(r) == 0:
raise Exception(f"{playlist_id} not found.")
else:
if "changelog" in r[0].keys():
return r[0]["changelog"]
else:
raise Exception(
f"No changelog found for {playlist_id}, has it been collected more than once?"
)
|
[
"def getWeblogEntriesDates(entries_dict):",
"def deb_changelogs(new_snap, pkg_changes):\n # type: (str, Dict[str, Tuple[str, str]]) -> Dict[str, str]\n changelogs = {} # type: Dict[str, str]\n with tmpdir() as tmp:\n unsquashfs(tmp, new_snap, \"/usr/share/doc/*\")\n for name in pkg_changes:\n old_ver, new_ver = pkg_changes[name]\n # split of multi-arch tag\n fsname = name.split(\":\")[0]\n for chglogname in [\"changelog.Debian.gz\", \"changelog.gz\"]:\n changelog_path = os.path.join(\n tmp,\"usr/share/doc\", fsname, chglogname)\n if not os.path.exists(changelog_path):\n continue\n if not name in changelogs:\n changelogs[name] = \"\"\n changelogs[name] = changelog_until(changelog_path, old_ver)\n break\n return changelogs",
"def _changes(plays):\n changes = {}\n\n for play in plays[\"plays\"]:\n task_changes = {}\n for task in play[\"tasks\"]:\n host_changes = {}\n for host, data in task[\"hosts\"].items():\n if data.get(\"changed\", False) is True:\n host_changes[host] = data.get(\"diff\", data.get(\"changes\", {}))\n elif any(x in data for x in [\"failed\", \"skipped\", \"unreachable\"]):\n host_changes[host] = data.get(\"results\", data.get(\"msg\", {}))\n if host_changes:\n task_changes[task[\"task\"][\"name\"]] = host_changes\n if task_changes:\n changes[play[\"play\"][\"name\"]] = task_changes\n return changes",
"def get_logs(docid):\n cursor = flask.g.db.cursor()\n cursor.execute(\"SELECT diff, username,\"\n \" remote_addr, user_agent, timestamp\"\n \" FROM logs WHERE docid=?\"\n \" ORDER BY timestamp DESC\", (docid,))\n result = []\n for row in cursor:\n item = dict(zip(row.keys(), row))\n item[\"diff\"] = json.loads(item[\"diff\"])\n result.append(item)\n return result",
"def read(\n self, log_date: str, time_interval: Optional[Tuple[str, str]] = None\n ) -> List[Dict[str, Union[datetime, str]]]:\n time_boundaries: Tuple[datetime, datetime]\n\n if time_interval:\n time_boundaries = (\n datetime.fromisoformat(f\"{log_date}T{time_interval[0]}\"),\n datetime.fromisoformat(f\"{log_date}T{time_interval[1]}\"),\n )\n else:\n time_boundaries = (\n datetime.fromisoformat(f\"{log_date}\"),\n datetime.fromisoformat(f\"{log_date}\") + timedelta(days=1),\n )\n\n self.cursor.execute(\n \"\"\"\n SELECT lms.created_at, usr.user_id, usr.first_name, usr.second_name, lms.message\n FROM log_messages lms\n JOIN users usr\n ON lms.user_id = usr.user_id\n WHERE lms.created_at > ? AND lms.created_at < ?\n ORDER BY lms.created_at;\n \"\"\",\n time_boundaries,\n )\n\n return [dict(item) for item in self.cursor.fetchall()]",
"async def get_history():\n # Retrieve the parse history from the database or from a stored variable\n parse_history = [\n {\"sentence\": \"The dog chased the cat\", \"grammar\": \"English\", \"c-structure\": True, \"f-structure\": False, \"date\": \"2022-01-01\"},\n {\"sentence\": \"Le chat a poursuivi le chien\", \"grammar\": \"French\", \"c-structure\": False, \"f-structure\": True, \"date\": \"2022-01-02\"},\n {\"sentence\": \"Der Hund jagte die Katze\", \"grammar\": \"German\", \"c-structure\": True, \"f-structure\": True, \"date\": \"2022-01-03\"},\n ]\n return {\n \"parse_history\": parse_history\n }",
"def get_history(project, human_readable_names=True):\n person_query, project_query, bill_query = get_history_queries(project)\n history = []\n for version_list in [person_query.all(), project_query.all(), bill_query.all()]:\n for version in version_list:\n object_type = parent_class(type(version)).__name__\n\n # The history.html template can only handle objects of these types\n assert object_type in [\"Person\", \"Bill\", \"Project\"]\n\n # Use the old name if applicable\n if version.previous:\n object_str = describe_version(version.previous)\n else:\n object_str = describe_version(version)\n\n common_properties = {\n \"time\": version.transaction.issued_at,\n \"operation_type\": version.operation_type,\n \"object_type\": object_type,\n \"object_desc\": object_str,\n \"ip\": version.transaction.remote_addr,\n }\n\n if version.operation_type == Operation.UPDATE:\n # Only iterate the changeset if the previous version\n # Was logged\n if version.previous:\n changeset = version.changeset\n if isinstance(version, BillVersion):\n if version.owers != version.previous.owers:\n added, removed = describe_owers_change(\n version, human_readable_names\n )\n\n if added:\n changeset[\"owers_added\"] = (None, added)\n if removed:\n changeset[\"owers_removed\"] = (None, removed)\n\n # Remove converted_amount if amount changed in the same way.\n if (\n \"amount\" in changeset\n and \"converted_amount\" in changeset\n and changeset[\"amount\"] == changeset[\"converted_amount\"]\n ):\n del changeset[\"converted_amount\"]\n\n for prop, (val_before, val_after) in changeset.items():\n if human_readable_names:\n if prop == \"payer_id\":\n prop = \"payer\"\n if val_after is not None:\n val_after = describe_version(version.payer)\n if version.previous and val_before is not None:\n val_before = describe_version(\n version.previous.payer\n )\n else:\n val_after = None\n\n next_event = common_properties.copy()\n next_event[\"prop_changed\"] = prop\n next_event[\"val_before\"] = val_before\n next_event[\"val_after\"] = val_after\n history.append(next_event)\n else:\n history.append(common_properties)\n else:\n history.append(common_properties)\n\n return sorted(history, key=history_sort_key, reverse=True)",
"def _populate_history(self, project):\r\n ids = [project.id]\r\n if project.is_hub:\r\n ids += project.outsourcing.all().values_list('id', flat=True)\r\n entries = LogEntry.objects.filter(\r\n content_type=ContentType.objects.get_for_model(Project),\r\n object_id__in=ids\r\n )[:5]\r\n r = TxRedisMapper()\r\n key = redis_key_for_project(project)\r\n for entry in entries:\r\n data = {\r\n 'action_time': entry.action_time,\r\n 'message': entry.message,\r\n 'action_type': entry.action_type\r\n }\r\n r.rpush(key, data=data)\r\n r.ltrim(key, 0, 4)",
"def get_changelog(self, when=None, db=None):\n sid = str(self.id)\n when_ts = to_utimestamp(when)\n if when_ts:\n sql = \"\"\"\n SELECT time, author, field, oldvalue, newvalue, 1 AS permanent\n FROM ticket_change WHERE ticket=%s AND time=%s\n UNION\n SELECT time, author, 'attachment', null, filename,\n 0 AS permanent\n FROM attachment WHERE type='ticket' AND id=%s AND time=%s\n UNION\n SELECT time, author, 'comment', null, description,\n 0 AS permanent\n FROM attachment WHERE type='ticket' AND id=%s AND time=%s\n ORDER BY time,permanent,author\n \"\"\"\n args = (self.id, when_ts, sid, when_ts, sid, when_ts)\n else:\n sql = \"\"\"\n SELECT time, author, field, oldvalue, newvalue, 1 AS permanent\n FROM ticket_change WHERE ticket=%s\n UNION\n SELECT time, author, 'attachment', null, filename,\n 0 AS permanent\n FROM attachment WHERE type='ticket' AND id=%s\n UNION\n SELECT time, author, 'comment', null, description,\n 0 AS permanent\n FROM attachment WHERE type='ticket' AND id=%s\n ORDER BY time,permanent,author\n \"\"\"\n args = (self.id, sid, sid)\n return [(from_utimestamp(t), author, field, oldvalue or '',\n newvalue or '', permanent)\n for t, author, field, oldvalue, newvalue, permanent in\n self.env.db_query(sql, args)]",
"def get_changelog_version_date() -> Tuple[str, str]:\n with open(\"CHANGELOG.md\", \"r\") as file:\n for line in file.readlines():\n result = CHANGELOG_EXP.match(line)\n if result is not None:\n return result.groups()\n\n return None, None",
"def _download_hist_releases() -> Dict[str, Dict[str, str]]:\n req = requests.get(HIST_REALEASES)\n soup = BeautifulSoup(req.text, features='html.parser')\n table = soup.find('table')\n links = table.find_all('a')\n hist_releases = {}\n for i in range(0, len(links), 2): # 2: changelog+tarfile\n link = links[i: i + 2]\n if len(link) != 2:\n continue\n logfile, zipfile = (link[0].attrs['href'],\n link[1].attrs['href'])\n date = link[1].getText()[len('cord-19_'):]\n date = date.replace('.tar.gz', '')\n hist_releases.update({\n date: {'log': logfile, 'zip': zipfile}})\n\n return hist_releases",
"def audit_dates(self):\n return {\n \"created_at\": self.created_at.isoformat() + 'Z',\n \"updated_at\": self.updated_at.isoformat() + 'Z'\n }",
"def logfile_timeline(self, container):\n interesting_lines = [\n line.strip()\n for line in open(container.logfile)\n if self.interesting_re.search(line)]\n return [(container.name,) + split_timestamp(line) for line in interesting_lines]",
"def _read_changelog(changelog_file):\n with open(changelog_file) as f:\n lines = [\"<p>\" + line + \"</p>\" for line in f.readlines()]\n return \"\\n\".join(lines)",
"def _format_changelog(self, changelog):\r\n if not changelog:\r\n return changelog\r\n new_changelog = []\r\n for line in string.split(string.strip(changelog), '\\n'):\r\n line = string.strip(line)\r\n if line[0] == '*':\r\n new_changelog.extend(['', line])\r\n elif line[0] == '-':\r\n new_changelog.append(line)\r\n else:\r\n new_changelog.append(' ' + line)\r\n\r\n # strip trailing newline inserted by first changelog entry\r\n if not new_changelog[0]:\r\n del new_changelog[0]\r\n\r\n return new_changelog",
"async def changelog(message):\n log = await get_git_log()\n return \"```{}```\".format(\"\\n\".join(log[:10]))",
"def get_changelog(debpath, changelogpath, baseversion, updateversion):\n # create tmp dir\n randomstring = gen_string(10)\n TMPDIR = '/tmp/diffchangelog-' + randomstring\n\n extractcmd = \"dpkg-deb -x \" + debpath + \" \" + TMPDIR\n\n extractdeb = os.system(extractcmd)\n # extract deb failed?\n if extractdeb != 0:\n log_print(\"extract deb file failed.\")\n return 9\n\n zcatcmd = \"cd \" + TMPDIR + \" && zcat \" + changelogpath\n\n changelogs = os.popen(zcatcmd).read()\n\n # clean TMPDIR\n cleancmd = \"rm -rf \" + TMPDIR\n os.system(cleancmd)\n\n return changelogs",
"def __list__(self):\n return self.changes",
"def getWeblog():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets a playlists last collection date.
|
def get_playlist_collection_date(self, playlist_id: str) -> str:
q = {"_id": playlist_id}
cols = {"last_collected": 1}
r = list(self._playlists.find(q, cols))
# If not found print old date
if len(r) == 0:
return "2000-01-01" # Long ago
elif len(r) == 1:
return r[0]["last_collected"]
else:
raise Exception("Playlist Ambiguous, should be unique to table.")
|
[
"def get_last(collection):\n return list(DB.DATABASE[collection].find().sort([('created_at', -1)]).limit(1))[0]",
"def last(self) -> datetime.date:\n return self.__dates__[-1]",
"def last_day(self):\n return self._last_day",
"def last_played(self):\n if self._last_played is None:\n return None\n else:\n return self._last_played.strftime(UsageStats._DATE_FORMAT)",
"def get_last_play(username,game_id):\n baseurl = 'https://api.geekdo.com/xmlapi2/'\n url = baseurl + (f\"plays?username={username}&id={game_id}\")\n data = request_data(url)\n doc = xmltodict.parse(data)\n try:\n last_played = doc['plays']['play'][0]['@date']\n except KeyError:\n last_played = doc['plays']['play']['@date']\n return last_played",
"def LastPostDate(self):\n\n\t\tlast_post = forum_post.objects.filter(thread = self).order_by('-date_created')\n\t\tif len(last_post) == 0:\n\t\t\treturn 'Never'\n\t\t#End\n\n\t\treturn last_post[0].date_created",
"def get_last_play(username,game_id):\n baseurl = 'https://www.boardgamegeek.com/xmlapi2/'\n url = baseurl + (f\"plays?username={username}&id={game_id}\")\n data = request_data(url)\n doc = xmltodict.parse(data)\n try:\n last_played = doc['plays']['play'][0]['@date']\n except KeyError:\n last_played = doc['plays']['play']['@date']\n return last_played",
"def get_collection():\r\n collection = datetime.date.today()\r\n return __database.sensors[collection]",
"def LastPostDate(self):\n\n\t\tlast_thread = forum_thread.objects.filter(forum = self).order_by('-date_created')\n\t\tif len(last_thread) == 0:\n\t\t\treturn 'Never'\n\t\t#End\n\t\tlast_post = forum_post.objects.filter(thread = last_thread[0]).order_by('-date_created')\n\t\tif len(last_post) == 0:\n\t\t\treturn 'Never'\n\t\t#End\n\n\t\treturn last_post[0].date_created",
"def date_latest(self):\n dt = self.sort_date_latest()\n return self._adjust_for_precision(dt, 1.0)",
"def _get_last_poller_date(self, polling_lookback):\n return self._get_timestamp() - datetime.timedelta(minutes=polling_lookback)",
"def last(self) -> MispEvent:\n return self.list(limit=1, direction='desc')[0]",
"def get_end_date(self):\n latest_position = self.get_latest_position()\n if latest_position is not None:\n return latest_position.end_date\n else:\n return None",
"def lastProject(self):\n return self.projects[-1]",
"def get_end_date(self):\n if (self.end_date != date.max):\n return self.end_date\n else:\n return None",
"def getPublishDateOfLastReleaseData(self):\n sql = \"SELECT date FROM public.deter_publish_date\"\n \n return self.__execSQL(sql)",
"def last_post(self):\n posts = self.Catalog.searchResults(meta_type='Post',\n size=1,\n sort_on='date',\n sort_order='descending')\n if posts:\n return posts[0].getObject()\n\n return None",
"def collections_latest( request, repo, org, num_collections=1 ):\n collections = []\n s = _session(request)\n url = '{}/kiroku/{}-{}/'.format(settings.WORKBENCH_URL, repo, org)\n r = s.get(url)\n soup = BeautifulSoup(r.text)\n if _needs_login(soup):\n raise Exception('Could not get collection IDs. Please log out, log in, and try again.')\n cids = []\n for c in soup.find_all('a','collection'):\n cids.append(c.string)\n collections = cids[-num_collections:]\n return collections",
"def last_log():\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_list: List = sorted(full_list, key=os.path.getmtime)\n\n if not time_sorted_list:\n return\n return time_sorted_list[-1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns all ids from the artists db.
|
def get_known_artist_ids(self) -> List[str]:
q = {}
cols = {"_id": 1}
r = list(self._artists.find(q, cols))
return [x["_id"] for x in r]
|
[
"def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))",
"def fetchAlbumIds(artist_id):\n url = \"https://api.spotify.com/v1/artists/\" + artist_id + \"/albums?album_type=album&market=US\"\n req = requests.get(url)\n if req.ok == False:\n return 'Error: bad Spotify API URL or similar error'\n data = req.json()\n albums_list = []\n #print len(data[u'items'])\n for album in data[u'items']:\n album_id = album[u'id']\n albums_list.append(album_id)\n return albums_list",
"def get_ids():",
"def fetchAlbumIds(artist_id):\n url_base = \"https://api.spotify.com/v1/artists/\" + artist_id\n url_album = \"/albums?album_type=album\"\n url_market = \"&market=US\"\n url = url_base + url_album + url_market\n req = requests.get(url)\n data = req.json()\n album = data['items'][0]['id']\n return album",
"def get_artists_by_genres(self, genres: List[str]) -> List[str]:\n q = {\"genres\": {\"$all\": genres}}\n cols = {\"_id\": 1}\n r = list(self._artists.find(q, cols))\n\n return [x[\"_id\"] for x in r]",
"def artists(self):\n return tuple(filter(None, (ref() for ref in self._artists)))",
"def get_artists(self, with_connection=None):\n if with_connection:\n connection = with_connection\n else:\n connection = self.get_database_connection()\n rows = connection.execute(\"SELECT name FROM artists ORDER BY name\")\n results = [row for row in rows]\n if not with_connection:\n self.close_database_connection(connection)\n for artist in results:\n yield artist",
"def get_ids(self):\n return self.multiengine.get_ids()",
"def get_artist_related(self, artists_id):\n response = self.__get_data(self.url.artists_related_url().format(id=str(artists_id)))\n list_of_related_artists = []\n for related_artist in response['artists']:\n artist = Artist(artist_id=related_artist['id'], name=related_artist['name'],\n popularity=related_artist['popularity'], genres=related_artist['genres'])\n list_of_related_artists.append(artist)\n return list_of_related_artists",
"def get_tracks_from_artists(self, artists: List[str], start_date: str, end_date: str) -> List[str]:\n \n albums = self.get_albums_from_artists_by_date(artists, start_date, end_date)\n tracks = np.unique(self.get_tracks_from_albums(albums)).tolist()\n \n return tracks",
"def get_artist_albums(self, artist_id): # TODO initialize and return a list of Album objects\n return self.__get_data(self.url.artists_albums_url().format(id=str(artist_id)))",
"def get_artists_of_year(year: int) -> list:\n sql_request = sql_request_artists_year(year)\n\n sql_data = get_data_from_db(sql_request)\n artists = create_data_of_year(sql_data)\n return artists",
"def getSongIDs(songs):\r\n ids = []\r\n for i in range(len(songs)):\r\n ids.append(songs[i]['track']['id'])\r\n return ids",
"def get_exist_apartments_ids(self):\n self.cursor.execute('SELECT id FROM apartments')\n return [i[0] for i in self.cursor.fetchall()]",
"def get_all_file_ids():\n id_list = Score.objects.all().values(\"file_id\")\n return id_list",
"def get_performers_of_song(id_song: int) -> list:\n sql_request = sql_request_performers_of_song(id_song)\n\n sql_data = get_data_from_db(sql_request)\n artists = create_artists(sql_data)\n return artists",
"def get_all_playlist_ids(self):\r\n response = self.query(\r\n \"\"\"SELECT playlist_id\r\n FROM subreddit_playlists\r\n \"\"\"\r\n ).fetchall()\r\n\r\n playlist_ids = [playlist_id[0] for playlist_id in response]\r\n\r\n return playlist_ids",
"def get_album_artists(self, album_id):\n response = self.__get_data(self.url.albums_url().format(id=str(album_id)))\n artists = []\n for album_artists in response['artists']:\n artist = self.get_artist(album_artists['id'])\n artists.append(artist)\n return artists",
"def get_track_artists(self, track_id):\n response = self.__get_data(self.url.tracks_url().format(id=str(track_id)))\n artists = []\n for album_artists in response['artists']:\n artist = self.get_artist(album_artists['id'])\n artists.append(artist)\n return artists"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns all artists with album collection dates before max_date.
|
def get_artists_for_album_collection(self, max_date: str) -> List[str]:
q = {}
cols = {"_id": 1, "album_last_collected": 1}
r = list(self._artists.find(q, cols))
# Only append artists who need collection in result
result = []
for artist in r:
if "album_last_collected" in artist.keys():
if artist["album_last_collected"] < max_date:
result.append(artist["_id"])
else:
result.append(artist["_id"])
return result
|
[
"def db_annotater_get_latest_user_albums(album_date):\n\tstart_at\t= album_date['start_at']\n\tend_at\t\t= album_date['end_at']\n\t(hours, mins, secs) = get_time_diff_h_m_s(start_at, end_at)\n\twear_time \t= [{\"hours\":str(hours),\"minutes\":str(mins)}]\n\talbum_id \t= album_date['id']\n\tif album_date['annotation'] is True:\n\t\tsubmitted = \"Yes\"\n\telse:\n\t\tsubmitted = \"No\"\n\tcapture_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t# get images\n\timages \t\t= db_annotater_get_album_images(album_id)\n\tone_album \t= {\"wearTime\" : wear_time, \\\n\t\t\t\t\"submitted\" : submitted, \\\n\t\t\t\t\"date\" : capture_date, \\\n\t\t\t\t\"images\" : images}\n\treturn [one_album]",
"def get_tracks_from_artists(self, artists: List[str], start_date: str, end_date: str) -> List[str]:\n \n albums = self.get_albums_from_artists_by_date(artists, start_date, end_date)\n tracks = np.unique(self.get_tracks_from_albums(albums)).tolist()\n \n return tracks",
"def db_annotater_get_user_album_dates(albums_queryset):\n\n\t# analyse the queryset of all albums of a user\n\tlatest_date \t= \"\"#datetime.now().date()\n\tsubmit_dates\t= []\n\tunsubmit_dates\t= []\n\tlatest_album\t= None \n\tfor album_date in albums_queryset:\n\t\tif album_date['annotation'] is True:\n\t\t\tnew_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t\t\tsubmit_dates.append(new_date)\n\t\telse:\n\t\t\tnew_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t\t\tunsubmit_dates.append(new_date)\n\tif len(albums_queryset) > 0:\n\t\tlatest_album= albums_queryset.reverse()[0]\n\t\tlatest_date = latest_album['capture_date']\n\t\tlatest_date = get_date_dash_d_m_y(latest_date)\n\t\tlatest_album_id = latest_album['id']\n\talbum_dates = {'ld':latest_date,'s':submit_dates,'u':unsubmit_dates} \n\treturn (latest_album,album_dates)",
"def get_albums_for_track_collection(self) -> List[str]:\n q = {}\n cols = {\"_id\": 1, \"tracks\": 1}\n r = list(self._albums.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for album in r:\n if \"tracks\" not in album.keys():\n result.append(album[\"_id\"])\n return result",
"def update_artist_album_collected_date(self, artist_ids: List[str], date: str=None) -> None:\n date = dt.datetime.now().strftime(\"%Y-%m-%d\") if date is None else date\n\n for artist_id in tqdm(artist_ids):\n q = {\"_id\": artist_id}\n self._artists.update_one(\n q, {\"$set\": {\"album_last_collected\": date}}, upsert=True\n )",
"def get_queryset(self):\n return Album.objects.order_by('-pub_date')[:10]",
"def get_top_albums(self, artist=None, mbid=None, autocorrect=False,\n limit=None):\n perpage = min(30, limit) if limit else 30\n\n resp = self._paginate_request(\n 'GET',\n 'artist.getTopAlbums',\n 'album',\n params=dict(\n artist=artist,\n mbid=mbid,\n autocorrect=int(autocorrect),\n ),\n limit=limit,\n perpage=perpage,\n unwrap='topalbums'\n )['album']\n\n return self.model_iterator(common.Album, resp)",
"def get_artists_of_year(year: int) -> list:\n sql_request = sql_request_artists_year(year)\n\n sql_data = get_data_from_db(sql_request)\n artists = create_data_of_year(sql_data)\n return artists",
"def get_artists(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('artists', search, start,\r\n max_items)",
"def get_artist_albums(self, artist_id): # TODO initialize and return a list of Album objects\n return self.__get_data(self.url.artists_albums_url().format(id=str(artist_id)))",
"def test_get_top_artists(self):\n chart = self.chart.get_top_artists(page=2, limit=1)\n self.utils.assert_response_content(chart)\n assert_equal(chart['artists']['@attr']['page'], \"2\")\n del chart['artists']['@attr']\n assert_equal(len(chart['artists']), 1)",
"def get_n_latest_albums_for_artist_on_market_by_type(self, artistId,\n market=\"DE\",\n type=type,\n limit=10):\n spotify = self.__s\n\n result = []\n\n albumResult = spotify.artist_albums(artistId, limit=limit,\n album_type=type, country=market)\n albums = albumResult[\"items\"]\n\n # If next set of albums is empty then stop the album loop\n if len(albums) == 0:\n process = False\n else:\n # Iterate over next album set and append the found albums\n for album in albums:\n resultAlbum = self.get_full_album(album[\"id\"])\n result.append(resultAlbum)\n\n return result",
"def __album_after_last_update_filter(self, spotifyRelease):\n format = DATE_FORMAT\n\n lastUpdateString = get_last_update()\n lastUpdateDate = datetime.strptime(lastUpdateString, format)\n\n releaseDateString = spotifyRelease.releaseDate\n try:\n # If release date is in YYYY-mm-dd format\n releaseDate = datetime.strptime(releaseDateString, format)\n except ValueError:\n # If release date is in YYYY format\n releaseDate = datetime.strptime(\"%s-01-01\" % (releaseDateString),\n format)\n\n return releaseDate >= lastUpdateDate",
"def get_album_ids(name, artist_id, artist_name):\n albums_list = [album for album in musicbrainzngs.\n search_releases(query=name, arid=artist_id)[\"release-list\"]\n if remove_forbidden_characters(custom_replace_title(\n album[\"title\"])).lower() == name.lower()\n and \"date\" in album and album[\"date\"]]\n if not albums_list:\n raise ValueError(f\"Album {name} not literally found by artist \"\n f\"{artist_name}\")\n albums_list = sorted(albums_list, key=lambda a: a[\"date\"])\n use_for_cover = None\n for album in reversed(albums_list):\n try:\n musicbrainzngs.get_image_list(album[\"id\"])\n use_for_cover = album\n break\n except musicbrainzngs.musicbrainz.ResponseError:\n continue\n if use_for_cover is None:\n raise ValueError(f\"No cover art available for {name} by \"\n f\"{artist_name}, this is unsupported behaviour\")\n else:\n return albums_list[0][\"id\"], use_for_cover[\"id\"]",
"def get_popular_movie_artists():\n\n movie_artists = list()\n for page in range(1,41):\n movie_artists.append(get_popular_movie_artists_page(page))\n movie_artists = [movie_artist for page in movie_artists for movie_artist in page]\n return(movie_artists)",
"def top_artists(account=None, limit=10, date=None, period=\"day\"):\n check_top_kwargs(\n **{\"account\": account, \"limit\": limit, \"date\": date, \"period\": period}\n )\n\n qs_kwargs = {}\n\n if account:\n qs_kwargs[\"account\"] = account\n\n if date and period:\n min_post_time, max_post_time = get_period_times(date, period)\n qs_kwargs[\"min_post_time\"] = min_post_time\n qs_kwargs[\"max_post_time\"] = max_post_time\n\n qs = Artist.objects.with_scrobble_counts(**qs_kwargs)\n\n if limit != \"all\":\n qs = qs[:limit]\n\n return qs",
"def get_albums(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('albums', search, start,\r\n max_items)",
"def get_articles(client, days_back=10):\n coll = client['NewsAggregator'].news_stories\n items = []\n\n for item in coll.find(\n {\"publish_date\": {\"$gte\": datetime.utcnow() - timedelta(days=days_back) } }, { \"similar_articles\": 0 }):\n # Add the item to the dictionary\n if ('description' not in item):\n print(item)\n else:\n items.append(item)\n return items",
"def get_top_genres(artists, limit=3):\n genre_map = dict()\n\n # get count of each genre\n for artist in artists:\n for genre in artist[\"genres\"]:\n genre_map[genre] = genre_map.get(genre, 0) + 1\n\n # sort genres by frequency\n genres = sorted(genre_map.keys(), key=lambda key: genre_map[key], reverse=True)\n\n return genres[:limit]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets a list artists in DB that have one or more of the genres
|
def get_artists_by_genres(self, genres: List[str]) -> List[str]:
q = {"genres": {"$all": genres}}
cols = {"_id": 1}
r = list(self._artists.find(q, cols))
return [x["_id"] for x in r]
|
[
"def get_genres():\n \n return Genre.query.order_by('genre_name').all()",
"def populate_artist_genres(artist_list, music_genre_dict):\n\tpopulated_list = []\n\tfor artist in artist_list:\n\t\tif artist in music_genre_dict.keys():\n\t\t\tpopulated_list.append(artist)\n\t\t\tpopulated_list.extend(music_genre_dict[artist])\t\n\t\telse:\n\t\t\tpopulated_list.append(artist)\n\n\treturn populated_listo",
"def get_all_genres(data):\n\n total_genres = []\n for movie in data['genres'].values:\n total_genres.extend(movie)\n\n return list(set(total_genres))",
"def get_genres_of_movies():\n\tdf_of_genres = pd.read_csv('netflix_genres.csv')\n\tall_genres = set()\n\tfor movie_genre in df_of_genres['genres'].to_list():\n\t\tall_genres.update(movie_genre.split('|'))\n\tprint(\"all genres are:\")\n\tprint(all_genres)\n\tprint(\"Number of genres is: \")\n\tprint(len(all_genres))\n\n\tdf_of_movies_and_all_genres = pd.DataFrame(columns=all_genres)\n\tfor idx, row in df_of_genres.iterrows():\n\t\tmovie_id = row[0]\n\t\tmovie_genres = row[1].split('|')\n\t\tfor movie_genre in movie_genres:\n\t\t\tdf_of_movies_and_all_genres.loc[movie_id, movie_genre] = 1\n\tdf_of_movies_and_all_genres.fillna(0, inplace=True)\n\treturn df_of_movies_and_all_genres",
"def get_artist_genres(artist_id):\n # sleep before doing artist\n time.sleep(SLEEP_INTERVAL)\n\n # artist_id = song_data[\"artist_id\"]\n # artist_data = urllib2.urlopen(search_url).read()\n\n artist_params = (\n (\"api_key\", Config.KEY),\n (\"id\", artist_id),\n (\"format\", \"json\"),\n (\"bucket\", \"genre\")\n )\n artist_url = ARTIST_BASE_URL + \"?\" + urllib.urlencode(artist_params)\n artist_result = urllib2.urlopen(artist_url).read()\n artist_json = json.loads(artist_result)\n genres_raw = artist_json[\"response\"][\"artist\"][\"genres\"]\n genres = [x[\"name\"] for x in genres_raw]\n return genres",
"def populateGenre(self):\r\n \r\n data = showInformation.getJson(self.infourl)\r\n if \"genres\" in data:\r\n return data[\"genres\"]\r\n else:\r\n return False",
"def get_genres(track, artist, keys):\n genres_list = ['electronic', 'jazz', 'hip hop', 'pop', 'rock',\n 'alternative rock', 'metal', 'indie']\n\n # Format artist and track name and create url from them\n artist = remove_query_spaces(artist)\n track = remove_query_spaces(track)\n url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=\" \\\n + keys[2] + \"&artist=\" + artist + \"&track=\" + track + \"&format=json\"\n\n # Last.fm GET request and parsed to json.\n response = requests.get(url).json()\n\n # Find all Last.fm tags for the song.\n tags = []\n if 'track' in response:\n for tag in response['track']['toptags']['tag']:\n tags.append(tag['name'])\n\n # Returns the first genre that is matched by a tag and the genre\n # filter.\n for tag in tags:\n for genre in genres_list:\n if tag.lower() in genre or genre in tag.lower():\n return genre\n\n return None",
"def get_genres():\n response = requests.get(\"https://api.themoviedb.org/3/genre/movie/list?api_key=\" + tmdb_api_key + \"&language=en-US\")\n \n if response.status_code == 200:\n data = response.json()\n genres = []\n for genre in data['genres']:\n genres += [[str(genre.get(\"id\")), genre.get('name')]]\n return genres\n else:\n raise Exception('tmdb API gave status code {}'.format(response.status_code))",
"def moviesInThisGenre(genre):\n data = movies.find({\"genres\": {\"$in\": [genre] } })\n for movie in data:\n for key, value in movie.items():\n if key == \"title\":\n print(\"{title: %s}\" % value)",
"def filter_by_genre(self, genre: str) -> 'MusicFileSet':\n subset = MusicFileSet()\n subset.collection = [item for item in self.collection if item.genre == genre]\n return subset",
"def testRetrieveGenre(self):\n self.assert_(self.epg.genres())",
"def genres(self):\n response = self._api.api_call(path=\"films/genres\")\n genres_response = response.json()\n return genres_response",
"def get_genre_frequency(self, artists):\n top_genres = {}\n for artist in artists:\n genres = artist['genres']\n for genre in genres:\n if genre in top_genres:\n top_genres[genre] += 1\n else:\n top_genres[genre] = 1\n return top_genres",
"def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))",
"def load_artist_one_hot_genres_information_into_df(self):\n genres = set()\n for artists in self.playlist_information.artists_info.values:\n for artist in artists:\n for genre in artist[\"genres\"]:\n genres.add(genre)\n\n genres = list(genres)\n genres_df = []\n for artists in self.playlist_information.artists_info.values:\n song_genres = {'genre_' + genre: 0 for genre in genres}\n for artist in artists:\n for genre in artist[\"genres\"]:\n song_genres['genre_' + genre] = 1\n\n genres_df.append(song_genres)\n\n self.genres_df = pd.DataFrame(genres_df)",
"def _set_genres(self):\r\n try:\r\n genres = self.page.find('div', itemprop='genre')\r\n if genres:\r\n genres = genres.findAll('a')\r\n if genres:\r\n for genre in genres:\r\n try:\r\n genre = genre.contents[0].strip()\r\n if len(genre) > 0:\r\n self.genres.append(genre)\r\n except KeyError:\r\n pass\r\n except Exception, e:\r\n raise IMDBException('Unable to retrieve genre(%s)(%s)' %\r\n (self.imdb_id, e))",
"def get_genres(soup):\n # type: (BeautifulSoup) -> list\n genre = soup.find(\"h4\", string=\"Genre:\")\n if not genre:\n return []\n genres = genre.find_next(\"p\").find_all(\"a\")\n if len(genres):\n return [genre.text for genre in genres]\n return []",
"def getAllGenes(self):\n return self.data.getAllGenes()",
"def genesymbols_2_entrezids(genelist):\n # should check that genelist input does not have 'na' values\n probes_file = pd.read_csv('./data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv',\n usecols=['gene_symbol', 'entrez_id']).drop_duplicates()\n has_entrez = probes_file[probes_file.gene_symbol.isin(genelist)]\n has_entrez = has_entrez.drop_duplicates().dropna(subset=['entrez_id'])\n\n return has_entrez"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates artist db with list of new artist info
|
def update_artists(self, artist_info_list: List[Dict]) -> None:
for artist in tqdm(artist_info_list):
q = {"_id": artist["id"]}
# Writing updates (formatting changes)
artist["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
artist["total_followers"] = artist["followers"]["total"]
del artist["followers"]
del artist["id"]
self._artists.update_one(q, {"$set": artist}, upsert=True)
|
[
"def update_artist(artist, new_name):\n conn = sqlite3.connect(\"mydatabase.db\")\n cursor = conn.cursor()\n \n sql = \"\"\"\n UPDATE albums\n SET artist = ?\n WHERE artist = ?\n \"\"\"\n cursor.execute(sql, (new_name, artist))\n conn.commit()\n cursor.close()\n conn.close()",
"def _update_(self, db):\n self.artist_db = self._find(db, 'album', 'albumartist')\n self.music_db = db\n self.initialized = True",
"def add_artist(self, artist):\n self.artists[artist.name] = artist",
"def update_artist_list(xml_path, **kwargs):\r\n songs_array = kwargs.get('paths').get('songs_array')\r\n songs_path = kwargs.get('paths').get('songs_path')\r\n counter = 0\r\n if kwargs.get('skip_check'):\r\n tree = is_readable_xml(xml_path)\r\n root = tree.getroot()\r\n root = tree.find(\"artists\")\r\n for song in songs_array:\r\n try:\r\n file = TinyTag.get(os.path.join(songs_path, song + \".mp3\"))\r\n artist = file.artist\r\n if artist is not None: # artist value isn't blank\r\n artist = re.sub(\"'|\\\"\", '', artist)\r\n string = \".//artist[@value='{}']\".format(artist)\r\n if root.find(string) is None:\r\n ET.SubElement(root, \"artist\",{\"value\": artist.lower()})\r\n tree.write(xml_path, encoding='utf-8')\r\n \"\"\"else:\r\n print(type(root.find(\"[@value={}]\".format(file.artist))))\"\"\"\r\n except Exception: # unsupported files\r\n counter += 1",
"def new_artist( self, artist_name ):\n\n if artist_name in self.art_fields[\"artists\"]:\n raise ValueError( \"'{:s}' is already an artist in the database.\".format( artist_name ) )\n\n # find the first position where the new artist sorts (insensitively)\n # after everything before it.\n #\n # NOTE: we don't use something like the bisect module so as to\n # preserve the existing order of the artists, which may or may\n # not be sorted.\n #\n for index, existing_artist_name in enumerate( self.art_fields[\"artists\"] ):\n if artist_name.lower() < existing_artist_name.lower():\n break\n\n self.art_fields[\"artists\"].insert( index, artist_name )\n\n self.mark_data_dirty()",
"def update_all(self):\n\n # get all rows from table\n data = self.engine.fetch_all_like_entry(JoinSongArtist())\n\n # update each\n for row in data: self.update(row)",
"def insert_artist_data(df, cur):\n # insert artist record\n artist_columns = ['artist_id',\n 'artist_name',\n 'artist_location',\n 'artist_latitude',\n 'artist_longitude']\n artist_data = df[artist_columns].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)",
"def update_artists(artists, tracks, time_idx):\n time_artist, head_artist, *track_artists = artists\n time_artist.set_text(time_idx[:5])\n head_lonlat = []\n for artist, track in zip(track_artists, tracks):\n point = get_point(track, time_idx)\n if point is not None:\n lon, lat = artist.get_data()\n lon.append(point['lon'])\n lat.append(point['lat'])\n artist.set_data(lon, lat)\n head_lonlat.append((point['lon'], point['lat']))\n if head_lonlat:\n head_artist.set_offsets(head_lonlat)\n else:\n head_artist.set_offsets(ndarray(shape=(0, 2))) # empty scatter plot\n return artists",
"def insert_artist_record(cur, df):\n artist_columns = ['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']\n artist_data = df.loc[0, artist_columns].values.tolist()\n cur.execute(artist_table_insert, artist_data)",
"def set_up_artist_id_table(cur, conn):\n cur.execute(\"CREATE TABLE IF NOT EXISTS ArtistIds (artist_id INTEGER PRIMARY KEY, artist TEXT)\")\n #Calls the get_songs_and_artists() function and saves it into a list\n top_100_list = get_songs_and_artists()\n #Empty list to store artist names in\n artist_list = []\n #Loops through the top_100_list and adds in the artist and their id number\n count = 0\n for x in range(len(top_100_list)):\n #Only adds in the artist to the artist_list if it is not already in to artist_list\n if top_100_list[x][1] not in artist_list:\n artist_id = count\n artist = top_100_list[x][1]\n artist_list.append(artist)\n cur.execute(\"INSERT OR IGNORE INTO ArtistIds (artist_id, artist) VALUES (?, ?)\", (artist_id, artist))\n count = count + 1\n x = x + 1\n conn.commit()",
"def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))",
"def save_one_artist(self, artist, tag, text):\n # mandatory fields\n with tag('Key1'):\n text(artist.item_code)\n with tag('ItemCode'):\n text(artist.item_code)\n with tag('title'):\n text(Util.string_cleanup(artist.title))\n with tag('GlossaryType'):\n text(artist.glossary_type)\n with tag('KEXPName'):\n text(artist.name)\n with tag('KEXPSortName'):\n text(artist.sort_name)\n with tag('KEXPMBID'):\n text(artist.id)\n \n # optional fields\n\n if len(artist.alias_list) > 0:\n for alias in artist.alias_list:\n with tag('KEXPAlias'):\n text(alias)\n\n if artist.annotation > '':\n with tag('KEXPAnnotation'):\n text(artist.annotation)\n\n if artist.disambiguation > '':\n with tag('KEXPDisambiguation'):\n text(artist.disambiguation)\n\n if artist.type > '':\n with tag('KEXPArtistType'):\n text(artist.type)\n \n with tag('KEXPBeginArea'):\n text(artist.begin_area.name)\n with tag('KEXPBeginAreaMBID'):\n text(artist.begin_area.id)\n\n with tag('KEXPBeginDate'):\n text(artist.begin_date)\n with tag('KEXPEndDate'):\n text(artist.end_date)\n if artist.ended:\n with tag('KEXPEnded'):\n text(artist.ended)\n\n with tag('KEXPCountry'):\n text(artist.country.name)\n with tag('KEXPCountryMBID'):\n text(artist.country.id)\n \n with tag('KEXPEndArea'):\n text(artist.end_area.name)\n with tag('KEXPEndAreaMBID'):\n text(artist.end_area.id)\n\n if len(artist.ipi_list) > 0:\n for code in artist.ipi_list:\n with tag('KEXPIPICode'):\n text(code)\n\n if len(artist.isni_list) > 0:\n for code in artist.isni_list:\n with tag('KEXPISNICode'):\n text(code)\n\n if len(artist.url_relation_list) > 0:\n for link in artist.url_relation_list:\n with tag('KEXPLink'):\n text(link)",
"def save_one_artist(self, artist, tag, text):\n # mandatory fields\n with tag('Key1'):\n text(artist.item_code)\n with tag('ItemCode'):\n text(artist.item_code)\n with tag('title'):\n text(Util.stringCleanup(artist.title))\n with tag('GlossaryType'):\n text(artist.glossary_type)\n with tag('KEXPName'):\n text(artist.name)\n with tag('KEXPSortName'):\n text(artist.sort_name)\n with tag('KEXPMBID'):\n text(artist.id)\n \n # optional fields\n\n if len(artist.alias_list) > 0:\n for alias in artist.alias_list:\n with tag('KEXPAlias'):\n text(alias)\n\n if artist.annotation > '':\n with tag('KEXPAnnotation'):\n text(artist.annotation)\n\n if artist.disambiguation > '':\n with tag('KEXPDisambiguation'):\n text(artist.disambiguation)\n\n if artist.type > '':\n with tag('KEXPArtistType'):\n text(artist.type)\n \n with tag('KEXPBeginArea'):\n text(artist.begin_area.name)\n with tag('KEXPBeginAreaMBID'):\n text(artist.begin_area.id)\n\n with tag('KEXPBeginDate'):\n text(artist.begin_date)\n with tag('KEXPEndDate'):\n text(artist.end_date)\n if artist.ended:\n with tag('KEXPEnded'):\n text(artist.ended)\n\n with tag('KEXPCountry'):\n text(artist.country.name)\n with tag('KEXPCountryMBID'):\n text(artist.country.id)\n \n with tag('KEXPEndArea'):\n text(artist.end_area.name)\n with tag('KEXPEndAreaMBID'):\n text(artist.end_area.id)\n\n if len(artist.ipi_list) > 0:\n for code in artist.ipi_list:\n with tag('KEXPIPICode'):\n text(code)\n\n if len(artist.isni_list) > 0:\n for code in artist.isni_list:\n with tag('KEXPISNICode'):\n text(code)\n\n if len(artist.url_relation_list) > 0:\n for link in artist.url_relation_list:\n with tag('KEXPLink'):\n text(link)",
"def writeArtist( con, artist ):\n\tcon.execute( \"INSERT INTO artist(name) VALUES(\\'\" + artist + \"\\')\" )\n\n\treturn int(con.lastrowid)",
"def artist_uri(self, artist_uri):\r\n self.data['artist_uri'] = artist_uri",
"def get_artist(self):\n self.artist = self.spotify_client.get_artist(self.artist_name)",
"def update_blacklist(self, blacklist_name: str, artists: List[str]) -> None:\n q = {\"_id\": blacklist_name}\n [\n self._blacklists.update_one(q, {\"$addToSet\": {\"blacklist\": x}})\n for x in artists\n ]",
"def get_artist(id_artist: int) -> dict:\n sql_request = sql_request_artist(id_artist)\n sql_data = get_data_from_db(sql_request)\n artist = create_artist(sql_data)\n return artist",
"def regenereate_artists():\n savefile = open(BASE_DIRECTORY + \"artists.txt\", \"w+\")\n\n fullglob = BASE_DIRECTORY + FILE_PREFIX + \"*.txt\"\n for textfile in glob.glob(fullglob):\n with open(textfile, 'r') as f:\n for line in f:\n if is_artist_line(line):\n print(line)\n savefile.write(line)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates a list of artists album_collected date to today by default.
|
def update_artist_album_collected_date(self, artist_ids: List[str], date: str=None) -> None:
date = dt.datetime.now().strftime("%Y-%m-%d") if date is None else date
for artist_id in tqdm(artist_ids):
q = {"_id": artist_id}
self._artists.update_one(
q, {"$set": {"album_last_collected": date}}, upsert=True
)
|
[
"def get_artists_for_album_collection(self, max_date: str) -> List[str]:\n q = {}\n cols = {\"_id\": 1, \"album_last_collected\": 1}\n r = list(self._artists.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for artist in r:\n if \"album_last_collected\" in artist.keys():\n if artist[\"album_last_collected\"] < max_date:\n result.append(artist[\"_id\"])\n else:\n result.append(artist[\"_id\"])\n return result",
"def _display_today_on_list(self):\n today = str(dt.today())\n list_of_expenses = self.expenses_tracker.get_expenses_list_by_date(today)\n self._set_total_label(list_of_expenses)\n self._display_expenses(list_of_expenses)",
"def faxes_today(self, faxes_today):\n\n self._faxes_today = faxes_today",
"def _updateLatestAllCheckDate(self, date=None):\n if date is None:\n date = DateTime()\n self.latest_all_check = date\n del date",
"def update_artists(self, artist_info_list: List[Dict]) -> None:\n\n for artist in tqdm(artist_info_list):\n q = {\"_id\": artist[\"id\"]}\n\n # Writing updates (formatting changes)\n artist[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n artist[\"total_followers\"] = artist[\"followers\"][\"total\"]\n del artist[\"followers\"]\n del artist[\"id\"]\n\n self._artists.update_one(q, {\"$set\": artist}, upsert=True)",
"def db_annotater_get_user_album_dates(albums_queryset):\n\n\t# analyse the queryset of all albums of a user\n\tlatest_date \t= \"\"#datetime.now().date()\n\tsubmit_dates\t= []\n\tunsubmit_dates\t= []\n\tlatest_album\t= None \n\tfor album_date in albums_queryset:\n\t\tif album_date['annotation'] is True:\n\t\t\tnew_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t\t\tsubmit_dates.append(new_date)\n\t\telse:\n\t\t\tnew_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t\t\tunsubmit_dates.append(new_date)\n\tif len(albums_queryset) > 0:\n\t\tlatest_album= albums_queryset.reverse()[0]\n\t\tlatest_date = latest_album['capture_date']\n\t\tlatest_date = get_date_dash_d_m_y(latest_date)\n\t\tlatest_album_id = latest_album['id']\n\talbum_dates = {'ld':latest_date,'s':submit_dates,'u':unsubmit_dates} \n\treturn (latest_album,album_dates)",
"def set_sell_date(self, sell_date: datetime) -> None:\n self.sell_date = sell_date",
"def update_blacklist(self, blacklist_name: str, artists: List[str]) -> None:\n q = {\"_id\": blacklist_name}\n [\n self._blacklists.update_one(q, {\"$addToSet\": {\"blacklist\": x}})\n for x in artists\n ]",
"def refresh_update_date(self):\n self.last_updated = datetime.datetime.now()",
"def set_last_future_date(self,date):\n self.infer_freq()\n if isinstance(date,str):\n date = datetime.datetime.strptime(date,'%Y-%m-%d')\n self.future_dates = pd.Series(pd.date_range(start=self.current_dates.values[-1],end=date,freq=self.freq).values[1:])",
"def Update_All_Stock():\r\n conn = connect_db()\r\n cur = conn.cursor()\r\n cur.execute('SELECT id FROM stock;')\r\n stocks = cur.fetchall()\r\n today = datetime.date.today()\r\n tomorrow = today + datetime.timedelta(days = 1)\r\n # three_years_ago = today - relativedelta(years = 3)\r\n latest_date = get_latest_history_date() + datetime.timedelta(days = 1)\r\n\r\n for stock in stocks:\r\n Add_History(conn, cur, stock[0], latest_date.strftime('%Y%m%d'), tomorrow.strftime('%Y%m%d'))\r\n cur.close()\r\n print('Update all stock history success')",
"def update_albums(self, album_info: List) -> None:\n\n for album in album_info:\n if isinstance(album, dict):\n q = {\"_id\": album[\"id\"]}\n\n # Writing updates (formatting changes)\n album[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n del album[\"id\"]\n\n self._albums.update_one(q, {\"$set\": album}, upsert=True)",
"def _set_dates(self):\n if self.id is None or self.created_at is None:\n self.created_at = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")",
"def update_today_activity(self, activities):\n date = time.localtime()\n today_date = f\"{date.tm_mday} {date.tm_mon} {date.tm_year}\"\n\n if not self.check_date(today_date, activities[\"metadata\"][\"date\"]): # Different day\n activities[\"metadata\"][\"total_activity\"] = 0\n activities.pop(\"ACTIVITY\") \n activities[\"metadata\"][\"date\"] = today_date\n activities[\"ACTIVITY\"] = {}\n Functions.update_json(TODAY_ACTIVITY_FILE_PATH, activities)\n\n self.add_all_activity(activities)",
"def db_annotater_get_latest_user_albums(album_date):\n\tstart_at\t= album_date['start_at']\n\tend_at\t\t= album_date['end_at']\n\t(hours, mins, secs) = get_time_diff_h_m_s(start_at, end_at)\n\twear_time \t= [{\"hours\":str(hours),\"minutes\":str(mins)}]\n\talbum_id \t= album_date['id']\n\tif album_date['annotation'] is True:\n\t\tsubmitted = \"Yes\"\n\telse:\n\t\tsubmitted = \"No\"\n\tcapture_date = get_date_dash_d_m_y(album_date['capture_date'])\n\t# get images\n\timages \t\t= db_annotater_get_album_images(album_id)\n\tone_album \t= {\"wearTime\" : wear_time, \\\n\t\t\t\t\"submitted\" : submitted, \\\n\t\t\t\t\"date\" : capture_date, \\\n\t\t\t\t\"images\" : images}\n\treturn [one_album]",
"def set_prev_date(bundle_item):\r\n prev_date = bundle_item",
"def set_buy_date(self, buy_date: datetime) -> None:\n self.buy_date = buy_date",
"def SetCurrentDay(self):\n self.SetNow()\n self.set_day = self.day",
"def __call__(self):\n if \"expiration_date\" not in self.entity.cw_edited:\n delay = self._cw.vreg.config[\"default_expiration_delay\"]\n self.entity.cw_edited[\"expiration_date\"] = (\n datetime.date.today() + datetime.timedelta(delay))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a full blacklist record by name (id)
|
def get_blacklist(self, name: str) -> List[str]:
q = {"_id": name}
cols = {"_id": 1, "blacklist": 1, "type": 1, "input_playlist": 1}
return list(self._blacklists.find(q, cols))
|
[
"def blacklist_flush(name):\n engine = Engine(name).load()\n return engine.blacklist_flush()",
"def blacklist():\n # Get values used for pagination of the blacklist\n total = get_row_count('Blacklist')\n page, per_page, offset = get_page_args(\n page_parameter=\"page\", per_page_parameter=\"per_page\"\n )\n # Get the blacklist subset, limited to the pagination settings\n sql = 'SELECT * from Blacklist ORDER BY datetime(SystemDateTime) DESC limit {}, {}'.format(offset, per_page)\n g.cur.execute(sql)\n result_set = g.cur.fetchall()\n\n records = []\n for record in result_set:\n number = record[0]\n phone_no = '{}-{}-{}'.format(number[0:3], number[3:6], number[6:])\n records.append(dict(\n Phone_Number=phone_no,\n Name=record[1],\n Reason=record[2],\n System_Date_Time=record[3]))\n\n # Create a pagination object for the page\n pagination = get_pagination(\n page=page,\n per_page=per_page,\n total=total,\n record_name=\"blocked numbers\",\n format_total=True,\n format_number=True,\n )\n # Render the resullts with pagination\n return render_template(\n 'blacklist.htm',\n blacklist=records,\n page=page,\n per_page=per_page,\n pagination=pagination,\n )",
"def GetBlacklist(cls):\n entries = memcache.get(_BLACKLIST_MEMCACHE_KEY) or []\n if not entries:\n entries = Blacklist.query().fetch()\n memcache.set(\n _BLACKLIST_MEMCACHE_KEY, entries, time=_BLACKLIST_MEMCACHE_EXPIRATION)\n return entries",
"def get_blacklist_scans(subject_id, blacklist_path, new_id=None):\n try:\n with open(blacklist_path, 'r') as blacklist:\n lines = blacklist.readlines()\n except IOError:\n lines = []\n\n entries = []\n for line in lines:\n if subject_id in line:\n if new_id is not None:\n line = line.replace(subject_id, new_id)\n entries.append(line)\n return entries",
"def bullfighters_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=BULLFIGHTER_TYPE_URI,\n rdf_type_name=BULLFIGHTER_TYPE_NAME, \n kls=Bullfighter)",
"def blacklist(self):\n self.blacklisted = True\n self.save()",
"async def show_blacklist(self, ctx) -> None:\n blacklist = list(self.blocker_service.find_all_blacklisted(str(ctx.guild.id)))\n return_message = \"\"\n for blacklisted in blacklist:\n member = ctx.guild.get_member(int(blacklisted[\"member_id\"]))\n if member is not None:\n return_message += (\n member.name\n + \"#\"\n + member.discriminator\n + \" :: \"\n + str(member.id)\n + \"\\n\"\n )\n else:\n return_message += return_message + str(blacklisted[\"member_id\"]) + \"\\n\"\n if len(return_message) != 0:\n if len(return_message) <= max_message_length:\n await ctx.channel.send(return_message)\n else:\n await ctx.channel.send(\n file=File(\n fp=BytesIO(bytes(return_message, \"utf-8\")), filename=\"Blacklist\"\n )\n )\n else:\n await ctx.channel.send(\"Blacklist is empty\")",
"async def fill_blacklist(self):\n query = 'SELECT * FROM (SELECT guild_id AS snowflake_id, blacklisted FROM guild_config UNION ALL SELECT user_id AS snowflake_id, blacklisted FROM users_data) WHERE blacklisted=\"TRUE\"'\n cur = await self.db.execute(query)\n data = await cur.fetchall()\n self.blacklist = {r[0] for r in data} or set()",
"def get_blacklisted_subs(self):\n blacklist_location = path.join(str(settings.ROOT_DIR), \"blacklisted_subs.txt\")\n\n if not path.exists(blacklist_location):\n return []\n else:\n return self._read_blacklist_file(blacklist_location)",
"def blacklist(self, peer, query):\n self.checkstat(\"blacklist\")",
"async def list(self, ctx):\n keylist = []\n try:\n for key in data[ctx.message.server.id].keys():\n keylist.append(key)\n keylist = ', '.join(keylist)\n await self.Aya.say('Blacklisted words: \\n`' + keylist + '`')\n except KeyError:\n await self.Aya.say('You must add a word to the blacklist before invoking this command.')",
"def BlacklistByFccIdAndSerialNumber(self, request):\n pass",
"def test_throttling_blacklist_condition_id_get(self):\n pass",
"def check_sql_blacklist(self,suspect,runtimeconfig=None): \n #work in progress\n if not self.config.has_option(self.section, 'check_sql_blacklist') or not self.config.getboolean(self.section,'check_sql_blacklist'):\n return DUNNO\n \n from fuglu.extensions.sql import ENABLED\n if not ENABLED:\n self.logger.error('Cannot check sql blacklist, SQLALCHEMY extension is not available')\n return DUNNO\n \n from fuglu.extensions.sql import get_session\n \n try:\n dbsession=get_session(self.config.get(self.section,'sql_blacklist_dbconnectstring'))\n conf_sql=self.config.get(self.section,'sql_blacklist_sql')\n \n sql,params=self._replace_sql_params(suspect, conf_sql)\n \n resultproxy=dbsession.execute(sql,params)\n except Exception,e:\n self.logger.error('Could not read blacklist from DB: %s'%e)\n suspect.debug('Blacklist check failed: %s'%e)\n return DUNNO\n \n for result in resultproxy:\n dbvalue=result[0] # this value might have multiple words\n allvalues=dbvalue.split()\n for blvalue in allvalues:\n self.logger.debug(blvalue)\n #build regex\n #translate glob to regexr\n #http://stackoverflow.com/questions/445910/create-regex-from-glob-expression\n regexp = re.escape(blvalue).replace(r'\\?', '.').replace(r'\\*', '.*?')\n self.logger.debug(regexp)\n pattern=re.compile(regexp)\n \n if pattern.search(suspect.from_address):\n self.logger.debug('Blacklist match : %s for sa pref %s'%(suspect.from_address,blvalue))\n confcheck=self.config\n if runtimeconfig!=None:\n confcheck=runtimeconfig\n configaction=string_to_actioncode(confcheck.get(self.section,'highspamaction'),self.config)\n suspect.tags['spam']['SpamAssassin']=True\n prependheader=self.config.get('main','prependaddedheaders')\n suspect.addheader(\"%sBlacklisted\"%prependheader, blvalue)\n suspect.debug('Sender is Blacklisted: %s'%blvalue)\n if configaction==None:\n return DUNNO\n return configaction\n \n return DUNNO",
"def get_bl_artist(self, artist_name,\n with_connection=None, add_not=None):\n if with_connection:\n connection = with_connection\n else:\n connection = self.get_database_connection()\n art = self.get_artist(artist_name, with_connection=connection,\n add_not=add_not)\n if not art:\n return False\n art_id = art[0]\n rows = connection.execute(\"SELECT * FROM black_list WHERE artist = ?\",\n (art_id,))\n for row in rows:\n if not with_connection:\n self.close_database_connection(connection)\n return row\n if add_not:\n if not with_connection:\n self.close_database_connection(connection)\n return False\n connection.execute(\"INSERT INTO black_list (artist) VALUES (?)\",\n (art_id,))\n connection.execute(\"UPDATE black_list SET updated = DATETIME('now')\"\n \" WHERE artist = ?\", (art_id,))\n connection.commit()\n rows = connection.execute(\"SELECT * FROM black_list WHERE artist = ?\",\n (art_id,))\n for row in rows:\n if not with_connection:\n self.close_database_connection(connection)\n return row\n if not with_connection:\n self.close_database_connection(connection)\n return False",
"def getById(self, id):\n for item in self.list: \n if item.getId() == id:\n return item",
"async def get_blacklist_hist(self, search_time, limit=1000):\n\n start = search_time[0][0]\n end = search_time[0][1]\n\n url = f'https://{self.__api}/v3/blacklist/history'\n continuation = None\n full_resp = {}\n flag = True\n body = {\"filter[clientid]\": self.clientid, \"filter[start_time]\": start, \"filter[end_time]\": end,\n \"limit\": limit, \"continuation\": continuation}\n while True:\n with requests.get(url, params=body,\n headers={'X-WallarmAPI-UUID': self.__uuid,\n 'X-WallarmAPI-Secret': self.__secret}) as response:\n if response.status not in [200, 201, 202, 204, 304]:\n raise NonSuccessResponse(response.status, await response.text)\n continuation = response.json().get('body').get('continuation')\n\n if flag:\n full_resp = response.json()\n\n if continuation is not None:\n body['continuation'] = continuation\n if not flag:\n full_resp['body']['objects'].extend(response.json().get('body').get('objects'))\n else:\n break\n flag = False\n logger.debug(f'The function get_blacklist_hist has been successful by filter {body}'\n f'It has taken the history of blacklist for the timeshift')\n logger.info(f'The blacklist history for the given period has been received')\n return full_resp",
"def get_by_name(cls, context, name):\n db_bay = cls.dbapi.get_bay_by_name(context, name)\n bay = Bay._from_db_object(cls(context), db_bay)\n return bay",
"def _get_from_datastore(cls, name):\n return cls.all().filter('%s =' % cls._memcache_key_name, name).get()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
updates a blacklists artists given its name
|
def update_blacklist(self, blacklist_name: str, artists: List[str]) -> None:
q = {"_id": blacklist_name}
[
self._blacklists.update_one(q, {"$addToSet": {"blacklist": x}})
for x in artists
]
|
[
"def update_artist(artist, new_name):\n conn = sqlite3.connect(\"mydatabase.db\")\n cursor = conn.cursor()\n \n sql = \"\"\"\n UPDATE albums\n SET artist = ?\n WHERE artist = ?\n \"\"\"\n cursor.execute(sql, (new_name, artist))\n conn.commit()\n cursor.close()\n conn.close()",
"def update_artist_list(xml_path, **kwargs):\r\n songs_array = kwargs.get('paths').get('songs_array')\r\n songs_path = kwargs.get('paths').get('songs_path')\r\n counter = 0\r\n if kwargs.get('skip_check'):\r\n tree = is_readable_xml(xml_path)\r\n root = tree.getroot()\r\n root = tree.find(\"artists\")\r\n for song in songs_array:\r\n try:\r\n file = TinyTag.get(os.path.join(songs_path, song + \".mp3\"))\r\n artist = file.artist\r\n if artist is not None: # artist value isn't blank\r\n artist = re.sub(\"'|\\\"\", '', artist)\r\n string = \".//artist[@value='{}']\".format(artist)\r\n if root.find(string) is None:\r\n ET.SubElement(root, \"artist\",{\"value\": artist.lower()})\r\n tree.write(xml_path, encoding='utf-8')\r\n \"\"\"else:\r\n print(type(root.find(\"[@value={}]\".format(file.artist))))\"\"\"\r\n except Exception: # unsupported files\r\n counter += 1",
"def artists_action(menuitem, gui):\n \n result = xbmc.call.AudioLibrary.GetArtists()\n\n def convert(artist):\n text = artist[\"label\"]\n if artist.has_key('thumbnail'):\n image = self.cache.open_http(\n artist[\"thumbnail\"], self.config[\"default artist\"],\n image_convert)\n else:\n image = self.cache.open(self.config[\"default artist\"])\n\n def action(menuitem2, gui2):\n albums_action(menuitem2, gui2, artist[\"artistid\"])\n\n return MenuItem(image, text, action)\n\n self.artists_menu.fill(*map(convert, result[\"artists\"]))\n Menu.action_helper(self.artists_menu)(menuitem, gui)",
"def add_artist(self, artist):\n self.artists[artist.name] = artist",
"def get_bl_artist(self, artist_name,\n with_connection=None, add_not=None):\n if with_connection:\n connection = with_connection\n else:\n connection = self.get_database_connection()\n art = self.get_artist(artist_name, with_connection=connection,\n add_not=add_not)\n if not art:\n return False\n art_id = art[0]\n rows = connection.execute(\"SELECT * FROM black_list WHERE artist = ?\",\n (art_id,))\n for row in rows:\n if not with_connection:\n self.close_database_connection(connection)\n return row\n if add_not:\n if not with_connection:\n self.close_database_connection(connection)\n return False\n connection.execute(\"INSERT INTO black_list (artist) VALUES (?)\",\n (art_id,))\n connection.execute(\"UPDATE black_list SET updated = DATETIME('now')\"\n \" WHERE artist = ?\", (art_id,))\n connection.commit()\n rows = connection.execute(\"SELECT * FROM black_list WHERE artist = ?\",\n (art_id,))\n for row in rows:\n if not with_connection:\n self.close_database_connection(connection)\n return row\n if not with_connection:\n self.close_database_connection(connection)\n return False",
"def render_artists(self, artists, *args, **kwargs):\n must_be_drawn = artists is None\n if must_be_drawn:\n self.first_draw(*args, **kwargs)\n else:\n self.update(artists, *args, **kwargs)",
"def new_artist( self, artist_name ):\n\n if artist_name in self.art_fields[\"artists\"]:\n raise ValueError( \"'{:s}' is already an artist in the database.\".format( artist_name ) )\n\n # find the first position where the new artist sorts (insensitively)\n # after everything before it.\n #\n # NOTE: we don't use something like the bisect module so as to\n # preserve the existing order of the artists, which may or may\n # not be sorted.\n #\n for index, existing_artist_name in enumerate( self.art_fields[\"artists\"] ):\n if artist_name.lower() < existing_artist_name.lower():\n break\n\n self.art_fields[\"artists\"].insert( index, artist_name )\n\n self.mark_data_dirty()",
"def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))",
"def update_artists(self, artist_info_list: List[Dict]) -> None:\n\n for artist in tqdm(artist_info_list):\n q = {\"_id\": artist[\"id\"]}\n\n # Writing updates (formatting changes)\n artist[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n artist[\"total_followers\"] = artist[\"followers\"][\"total\"]\n del artist[\"followers\"]\n del artist[\"id\"]\n\n self._artists.update_one(q, {\"$set\": artist}, upsert=True)",
"def append(self, artist_name):\n if artist_name in self.names:\n return\n new = artist(artist_name)\n self.names.add(new.name.lower())\n self.scores = merge_dicts(lambda x, y: x+y, self.scores, new.similar)\n\n self.top_songs[artist_name] = new.top\n print(artist_name, new.top)\n self.similar[artist_name] = new.similar\n return",
"def scrape_artists():\r\n\tfor i in range(1, 14):\r\n\t\tif i > 1:\r\n\t\t\tresponse = requests.get(base_url + f'/list/{i}')\r\n\t\telse:\r\n\t\t\tresponse = requests.get(base_url)\r\n\t\thtml = response.text\r\n\t\thtml = html.split('class=\"item-name\">\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t')\r\n\t\tfor div in html[1:]:\r\n\t\t\tcorr_div = div.split('\\r\\n\\t')\r\n\t\t\tname = corr_div[0]\r\n\t\t\tif name.lower() not in artists:\r\n\t\t\t\tartists.append(name.lower())",
"def update_artists(artists, tracks, time_idx):\n time_artist, head_artist, *track_artists = artists\n time_artist.set_text(time_idx[:5])\n head_lonlat = []\n for artist, track in zip(track_artists, tracks):\n point = get_point(track, time_idx)\n if point is not None:\n lon, lat = artist.get_data()\n lon.append(point['lon'])\n lat.append(point['lat'])\n artist.set_data(lon, lat)\n head_lonlat.append((point['lon'], point['lat']))\n if head_lonlat:\n head_artist.set_offsets(head_lonlat)\n else:\n head_artist.set_offsets(ndarray(shape=(0, 2))) # empty scatter plot\n return artists",
"def update_timeline_avatar(self, name):\n print name",
"def buy_artwork(self, artwork):\n\n if self != artwork.owner:\n art_listing = None\n for listing in veneer.listings:\n if listing.art.title == artwork.title:\n art_listing = listing\n break\n if art_listing != None:\n listing.art.owner = self\n veneer.remove_listing(art_listing)",
"def get_artist(self):\n self.artist = self.spotify_client.get_artist(self.artist_name)",
"def sell_artwork(self, artwork, price):\n if self == artwork.owner:\n new_listing = Listing(artwork, price, self.name)\n veneer.add_listing(new_listing)",
"def artist_uri(self, artist_uri):\r\n self.data['artist_uri'] = artist_uri",
"def change_image_name(self, img, newname):\r\n return self.update(img, {\"name\": newname})",
"def filter_artist(self, artist_name):\n if VERBOSE:\n print (\"\\nSearching for artist: \" + artist_name)\n try:\n result = self.sp.search(q='artist:' + artist_name, type='artist')\n except spotipy.client.SpotifyException:\n print(\"ERROR: Couldnt not find artist: %s\" % artist_name)\n print(\"trying again\")\n try:\n result = self.sp.search(q='artist:' + artist_name, type='artist')\n except spotipy.client.SpotifyException as error:\n print(\"ERROR: Failed to search twice. Error below:\")\n print(error)\n return None\n except ValueError as error:\n print(\"ERROR: Failure while searching Spotify for artist: %s\" % artist_name)\n print(error)\n return None\n\n artists = result['artists']['items'] # list of dicts\n\n num_matches = int(result['artists']['total'])\n if num_matches == 0:\n if VERBOSE:\n print( \"No matches found!\")\n return None\n\n elif num_matches == 1:\n if VERBOSE:\n print (\"1 match found: \" + artists[0]['name'])\n if artists[0]['name'] == artist_name:\n print (\"Exact match!\")\n else:\n print (\"Close enough...\")\n return artists[0]['uri']\n\n elif num_matches > 1:\n if VERBOSE:\n print (\"%i matches found: \" % num_matches + str([a['name'] for a in artists]) )\n # check for exact match\n for a in artists:\n if a['name'] == artist_name:\n if VERBOSE:\n print(\"Exact match found!\")\n return a['uri']\n # If there is no exact match, the first match is probably best.\n return artists[0]['uri']\n\n # If we don't return in one of the If statements above, abort\n raise Exception('unexpected number of matches (%i) for artist %s' % (num_matches, artist))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all albums that need tracks added.
|
def get_albums_for_track_collection(self) -> List[str]:
q = {}
cols = {"_id": 1, "tracks": 1}
r = list(self._albums.find(q, cols))
# Only append artists who need collection in result
result = []
for album in r:
if "tracks" not in album.keys():
result.append(album["_id"])
return result
|
[
"def getTracks(self, album):\n\n\t\talbumSock = self.opener.open(album['url'])\t\t#download the album page\n\t\talbumPage = albumSock.read()\n\t\talbumSock.close()\n\n\t\tp = albumParser()\n\t\tp.feed(albumPage)\n\t\tp.close()\n\n\t\talbum['tracks'] = p.tracks\n\t\talbum['tracks'].sort(lambda x, y: cmp( x['num'], y['num'] )) #sort in track order",
"def get_album_tracks(self, album_id):\n response = self.__get_data(self.url.albums_tracks_url().format(id=str(album_id)))\n tracks = []\n for album_track in response['tracks']['items']:\n track = self.get_track(album_track['id'])\n tracks.append(track)\n return tracks",
"def get_tracks_from_albums(self, albums: List[str]) -> List[str]:\n q = {\"album_id\": {\"$in\": albums}}\n cols = {\"_id\": 1}\n r = list(self._tracks.find(q, cols))\n\n return [x[\"_id\"] for x in r]",
"def get(self): \n return getAllAlbums()",
"def getAlbums(self):\n\t\tbasketPage = self.request(site_prefix + 'basket.shtml')\n\n\t\tp = linksParser()\n\t\tp.feed(basketPage)\n\t\tp.close()\n\n\t\talbums = []\n\t\tfor link,desc in p.links.items():\n\t\t\tm = self.albumRe.match(link)\n\t\t\tif m:\n\t\t\t\tnew = dict()\n\t\t\t\tnew['url'] = site_prefix + \"downloads_iframe.shtml?\" + m.group(1)\n\t\t\t\tnew['artist'] = desc[1][0].strip()\n\t\t\t\tnew['title'] = \"\".join(desc[1][1:]).strip()\n\t\t\t\tnew['tracks'] = []\n\t\t\t\talbums.append(new)\n\n\t\treturn albums",
"def get_tracks_from_album(self, album, progress=None):\n q = {\"limit\": 50}\n url = \"albums/{}/tracks\".format(album['id'])\n page = self.get_api_v1(url, q)\n tracks = []\n for track in self.extract_page(page, progress):\n track['album'] = album\n tracks.append(Track(track))\n return tuple(tracks)",
"def get_album_tracklist(name, artist=None, token=None):\n if not token:\n token = get_token()\n album = get_spotify_api(\"https://api.spotify.com/v1/search\", get=True, data={\"q\": (artist + \" - \" if artist else \"\") + name, \"type\": \"album\", \"limit\": 1})\n if album[\"albums\"][\"items\"]:\n tracks = get_spotify_api(album[\"albums\"][\"items\"][0][\"href\"] + \"/tracks\", get=True)\n output = []\n for track in tracks[\"items\"]:\n output.append([track[\"artists\"][0][\"name\"], track[\"name\"]])\n return output\n else:\n return \"No results\"",
"def _get_all_songs(self):\n return self.call.AudioLibrary.GetSongs(fields=self.SONG_FIELDS)['songs']",
"def add_all_tracks(spotify, artist, playlist):\n store = set()\n def remove_duplicates(album):\n if album['name'] not in store:\n return True\n else:\n store.add(album['name'])\n return False\n\n response = spotify.artist_albums(artist, album_type = 'album', limit = 50)\n\n _albums = [album for album in response['items']]\n albums = []\n\n for album in _albums:\n if album['name'].lower() not in store:\n albums.append(album)\n store.add(album['name'].lower())\n\n album_tracks = [spotify.album_tracks(album['id'])['items'] for album in albums]\n\n tracks = [track['id'] for album in album_tracks for track in album]\n _tracks = [tracks[i:i + ADD_TRACKS_LIMIT] for i in range(0, len(tracks), ADD_TRACKS_LIMIT)]\n for tracks in _tracks:\n spotify.user_playlist_add_tracks(spotify.me()[\"id\"], playlist, tracks)",
"def getSongs(tracks):\r\n sp = getSP()\r\n songs = tracks[\"items\"]\r\n while tracks['next']:\r\n tracks = sp.next(tracks)\r\n for item in tracks[\"items\"]:\r\n songs.append(item)\r\n return songs",
"def get_albums(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('albums', search, start,\r\n max_items)",
"async def get_all_tracks(self) -> List[PlaylistTrack]:\n if isinstance(self._tracks, PartialTracks):\n return await self._tracks.build()\n\n _tracks = []\n offset = 0\n while len(self.tracks) < self.total_tracks:\n data = await self.__client.http.get_playlist_tracks(self.owner.id, self.id, limit=50, offset=offset)\n\n _tracks += [PlaylistTrack(self.__client, item) for item in data['items']]\n offset += 50\n\n self.total_tracks = len(self._tracks)\n return list(self._tracks)",
"def get_tracks_for_audio_analysis(self) -> List[str]:\n \n l.debug(\"Finding Tracks without audio analysis, this can take some time.\")\n q = {}\n cols = {\"_id\": 1, \"audio_analysis_flag\": 1}\n r = list(self._tracks.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for track in r:\n if \"audio_analysis_flag\" not in track.keys():\n result.append(track[\"_id\"])\n else:\n if not track[\"audio_analysis_flag\"]:\n result.append(track[\"_id\"])\n return result",
"def albums(self, albums, market=None):\n\n tlist = [self._get_id(\"album\", a) for a in albums]\n if market is not None:\n return self._get(\"albums/?ids=\" + \",\".join(tlist) + '&market=' + market)\n else:\n return self._get(\"albums/?ids=\" + \",\".join(tlist))",
"def get_everything(all_albums_data, all_tracks_data):\n\n get_all_albums(all_albums_data)\n\n if all_tracks_data:\n # Gets artist artwork\n if all_tracks_data[0]['artist_avatar']:\n url = 'https://f4.bcbits.com/img/' + all_tracks_data[0]['artist_avatar'] + '_10.jpg'\n artist_cover = requests.get(url).content\n else:\n artist_cover = None\n\n get_all_tracks(all_tracks_data[0]['artist'], 'Tracks', all_tracks_data, album_release=None,\n album_cover=artist_cover, artist_cover=artist_cover)",
"def list_album_tracks(uri):\r\n album = sp.album(uri)\r\n\r\n print('ALBUM NAME: ',album['name'])\r\n print('ARTIST: ',album['artists'][0]['name'])\r\n print('TYPE: ',album['album_type'])\r\n print('RELEASE DATE: ',album['release_date'])\r\n print('POPULARITY: ',album['popularity'],'\\n')\r\n \r\n album_tracks = sp.album_tracks(uri)\r\n \r\n print('TRACKS: \\n')\r\n \r\n for i in range(len(album_tracks['items'])):\r\n print('({}):'.format(i+1),album_tracks['items'][i]['name'])\r\n \r\n print('\\n---------------------------------------------------------------------------------\\n') \r\n \r\n return None",
"def albums():\n albums = app.config[\"albums\"]\n # TODO complete (return albums.get_albums() in JSON format)\n return json.dumps(albums.get_albums())",
"def fetch_songs(self):\n if len(self.songs) == 0:\n for file in self.MUSIC_DIR.joinpath (\"./songs\").iterdir():\n if file.is_file():\n self.songs.append (file)\n return self.songs",
"def get_all_tracks_from_artist(self, artist, progress=None):\n albums = self.get_albums_from_artist(artist)\n if albums:\n n = len(albums)\n tracks = []\n for i, a in enumerate(albums):\n for t in self.get_tracks_from_album(a):\n tracks.append(Track(t))\n if progress:\n progress.set_percent(float(i)/n)\n tracks = (t for t in tracks if artist['name'] in str(t))\n return tuple(tracks)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
update album info if needed.
|
def update_albums(self, album_info: List) -> None:
for album in album_info:
if isinstance(album, dict):
q = {"_id": album["id"]}
# Writing updates (formatting changes)
album["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
del album["id"]
self._albums.update_one(q, {"$set": album}, upsert=True)
|
[
"def _syncPhoto(self, photo_entry, username, albumname=None, refresh=False):\n gphoto_id = photo_entry.gphoto_id.text\n #if self.cli_verbose:\n # print \"syncPhoto album %s id %s\" % (albumname, gphoto_id)\n # if we're refreshing this data, then delete the PicasaPhoto first...\n if refresh:\n try:\n p = Photo.objects.get(gphoto_id=gphoto_id)\n p.delete()\n except ObjectDoesNotExist:\n pass\n\n if albumname is None:\n if self.cli_verbose:\n print \"Albumname unknown getAlbumFeed for\", username\n feed = self.getAlbumFeed(username=username)\n album_id = photo_entry.albumid.text\n albums = [album for album in feed.entry if album.gphoto_id.text==album_id]\n if len(albums)!=1:\n raise PicasawebSyncrError(\"No such album found for: %s\" % album_or_id)\n albumname = albums[0].name.text\n\n updated = datetime(*strptime(photo_entry.updated.text[:-4] + '000Z', \"%Y-%m-%dT%H:%M:%S.000Z\")[:7])\n try:\n geo_latitude = photo_entry.geo.latitude()\n geo_longtitude = photo_entry.geo.longtitude()\n except:\n geo_latitude = \"\"\n geo_longtitude = \"\"\n\n default_dict = {\n 'updated': updated,\n 'gphoto_id': gphoto_id,\n 'owner': username,\n 'title': photo_entry.title.text,\n 'description': photo_entry.summary.text or \"\",\n 'taken_date': datetime(\n *strptime(photo_entry.timestamp.isoformat()[:-4] +\n '000Z', \"%Y-%m-%dT%H:%M:%S.000Z\")[:7]),\n 'photopage_url': photo_entry.GetAlternateLink().href,\n #'square_url': urls['Square'],\n 'small_url': photo_entry.media.thumbnail[0].url,\n 'medium_url': photo_entry.media.thumbnail[1].url,\n 'thumbnail_url': photo_entry.media.thumbnail[2].url,\n 'content_url': photo_entry.media.content[0].url,\n #'license': photo_xml.photo[0]['license'],\n 'geo_latitude': geo_latitude,\n 'geo_longitude': geo_longtitude,\n 'exif_model': photo_entry.exif.model and photo_entry.exif.model.text or \"\",\n 'exif_make': photo_entry.exif.make and photo_entry.exif.make.text or \"\",\n #'exif_orientation': photo_entry.exif.,\n 'exif_exposure': photo_entry.exif.exposure and photo_entry.exif.exposure.text or \"\",\n #'exif_software': photo_entry.exif.,\n #'exif_aperture': photo_entry.exif.,\n 'exif_iso': photo_entry.exif.iso and photo_entry.exif.iso.text or \"\",\n #'exif_metering_mode': ,\n 'exif_flash': photo_entry.exif.flash and photo_entry.exif.flash.text or \"\",\n 'exif_focal_length': photo_entry.exif.focallength and photo_entry.exif.focallength.text or \"\",\n #'exif_color_space': self.getExifKey(exif_data, 'Color Space'),\n }\n\n photo_obj, created = Photo.objects.get_or_create(gphoto_id = gphoto_id,\n defaults=default_dict)\n\n # Add/remove tags.\n remote_slugnames = {}\n if photo_entry.media.keywords.text:\n # Get the \"names\" and slugs from the remote photo data.\n from django.template.defaultfilters import slugify\n for name in photo_entry.media.keywords.text.split(', '):\n remote_slugnames[slugify(name)] = name\n\n self.syncTags(remote_slugnames, photo_obj)\n\n\n if self.cli_verbose:\n status = created and \"created\" or \"already exists (same)\"\n if photo_obj.updated<updated:\n status = \"updated\"\n\n if not created and photo_obj.updated<updated:\n # update object\n for key, value in default_dict.items():\n setattr(photo_obj, key, value)\n photo_obj.save()\n return photo_obj",
"def _update_(self, db):\n self.artist_db = self._find(db, 'album', 'albumartist')\n self.music_db = db\n self.initialized = True",
"def _add_album_metadata(self, spotify_album):\r\n album = SpotifyAlbum(spotify_album.spotify_uri)\r\n params = {'uri': spotify_album.spotify_uri}\r\n res = requests.get(self.api_lookup_url, params=params)\r\n data = res.json()\r\n\r\n if 'album' in data:\r\n album.title = data['album']['name']\r\n album.artist_uri = data['album']['artist-id']\r\n\r\n return album",
"def update(request):\n p = request.POST\n images = defaultdict(dict)\n\n #create dictionary of properties for each image\n\n for k, v in p.items():\n if k.startswith(\"title\") or k.startswith(\"rating\") or k.startswith(\"tags\"):\n k, pk = k.split('-')\n images[pk][k] = v\n elif k.startswith(\"album\"):\n pk = k.split('-')[1]\n images[pk][\"albums\"] = p.getlist(k)\n\n for k, d in images.items():\n image = Image.objects.get(pk=k)\n image.title = d['title']\n image.rating =int(d['rating'])\n\n #tags - assign or create if a new tag!\n\n tags = d[\"tags\"].split(', ')\n lst = []\n for t in tags:\n if t:\n lst.append(Tag.objects.get_or_create(tag=t)[0])\n image.tags = lst\n\n if \"albums\" in d:\n image.albums = d[\"albums\"]\n image.save()\n return HttpResponseRedirect(request.META[\"HTTP_REFERER\"], dict(media_url = MEDIA_URL))",
"def task_6_song_edit_album():\n song = Song.objects.get(title='Superstition')\n song.album_name = 'Way to Go'\n song.save()",
"def metadata_ready_for_editing(self, aggregated_metadata):\n self.__log.call(aggregated_metadata)\n\n self.reset()\n\n metadata_editors = self.__metadata_editors\n\n for album_field_name in [\n \"album_title\",\n \"album_artist\",\n \"album_label\",\n \"album_genre\",\n \"album_year\",\n ]:\n widget = metadata_editors[album_field_name]\n widget.configure(values=aggregated_metadata[album_field_name])\n if aggregated_metadata[album_field_name]:\n widget.current(0)\n\n metadata_editors[\"album_discnumber\"].var.set(\n aggregated_metadata[\"album_discnumber\"])\n metadata_editors[\"album_disctotal\"].var.set(\n aggregated_metadata[\"album_disctotal\"])\n\n metadata_editors[\"album_compilation\"].var.set(\n aggregated_metadata[\"album_compilation\"])\n\n album_cover_editor = metadata_editors[\"album_cover\"]\n album_cover_editor.config(state=DISABLED)\n if aggregated_metadata[\"album_cover\"]:\n for filepath in aggregated_metadata[\"album_cover\"]:\n self.__add_album_cover_option(filepath, showinfo=False)\n album_cover_editor.config(state=NORMAL)\n\n # issues/5\n self._apply_naming_defaults()\n for encoding in [\"flac\", \"mp3\"]:\n for field_suffix in [\n \"subroot_trie\", \"album_folder\", \"track_filename\"]:\n custom_key = \"__%s_%s\" % (encoding, field_suffix)\n custom_spec = aggregated_metadata.get(custom_key)\n if custom_spec is not None:\n metadata_editors[custom_key].var.set(custom_spec)\n\n # issues/5\n self.__album_disctotal_observer_name = \\\n metadata_editors[\"album_disctotal\"].var.trace(\n 'w', self._apply_naming_defaults)\n\n self.__aggregated_metadata = deepcopy(aggregated_metadata)\n\n self._initialize_track_vars()\n\n # if persisted data was restored, manually select the cover image so\n # that it opens in Preview automatically\n fm = self.master\n if fm._persistence.restored and self.__album_covers:\n self.choose_album_cover(list(self.__album_covers.keys())[0])",
"def test_album(self, mock_get_metadata: MagicMock):\n self.assertEqual(mock_get_metadata.return_value, self.file_media.album)\n mock_get_metadata.assert_called_once_with(\"album\")",
"def fetchAlbumInfo(album_id):\n url = \"https://api.spotify.com/v1/albums/\" + album_id\n req = requests.get(url)\n if req.ok == False:\n return 'Error: bad Spotify API URL or similar error'\n data = req.json()\n album_info = {}\n #print data[u'artists']\n album_info['artist_id'] = data[u'artists'][0][u'id']\n album_info['album_id'] = data[u'id']\n album_info['name'] = data[u'name']\n release_date = data[u'release_date']\n year = int(release_date[0:4])\n album_info['year'] = year\n album_info['popularity'] = data[u'popularity']\n \n return album_info",
"def get_album(self, album_id):\n track = []\n img = None\n\n for i in self.__albums:\n for t_id, info in self.__tracks.items():\n if i[\"id\"] == t_id and t_id == album_id:\n img = i[\"img\"]\n\n for a, b in info.items():\n track.append({\n \"name\": a,\n \"length\": b\n })\n return {\"album_id\": album_id, \"img\": img, \"track\": track}",
"def fetchAlbumInfo(album_id):\n url_base = \"https://api.spotify.com/v1/albums/\" + album_id\n url = url_base\n req = requests.get(url)\n data = req.json()\n info={}\n info[\"artist_id\"]=data[\"artists\"][0][\"id\"]\n info[\"album_id\"]=album_id #string\n info[\"name\"]=data[\"name\"] #string\n info[\"release_date\"]=data[\"release_date\"] [:4]\n info[\"popularity\"]=data[\"popularity\"] #int\n return info\n #pass\"\"\"",
"def _update_info(self):",
"def fetchAlbumInfo(album_id):\n url = 'https://api.spotify.com/v1/albums/' + album_id\n req = requests.get(url)\n assert req.ok, 'n/a'\n data = req.json()\n album_info = {}\n assert data.get('name'), 'n/a'\n album_info['artist_id'] = data['artists'][0]['id']\n album_info['album_id'] = album_id\n album_info['name'] = data['name']\n album_info['year'] = data['release_date'][0:4]\n album_info['popularity'] = data['popularity']\n return album_info",
"def set_album_id(self, album_id):\n self.album_id = album_id",
"def add_album_to_queue(self, spotify_album):\r\n if not spotify_album.satisfied():\r\n spotify_album = self._add_album_metadata(spotify_album)\r\n\r\n return self.soco.add_to_queue(spotify_album)",
"def test_album_edit_route_post_logged_in_updates_the_correct_album(self):\n album_id = self.bob.albums.first().id\n photo_id = self.bob.photos.first().id\n self.client.login(username='bob', password='password')\n data = {\n 'title': 'test5',\n 'description': 'testing5',\n 'published': 'PRIVATE',\n 'photos': [photo_id]\n }\n self.client.post(reverse_lazy('album_edit', kwargs={'id': album_id}), data)\n album = Album.objects.get(id=album_id)\n self.assertEqual(album.title, 'test5')\n self.assertEqual(album.description, 'testing5')",
"def album_uri(self, uri):\r\n self.data['album_uri'] = uri",
"def downloadAlbum(self, album):\n\t\tfor track in album['tracks']:\n\t\t\tself.downloadTrack(album, track['num'])",
"def _fetch_album(self, gn_id, is_last_album=True):\n self.__log.call(gn_id, is_last_album=is_last_album)\n\n gn_queries = self._prepare_gn_queries(self.ALBUM_FETCH_XML)\n gn_queries.find(\"QUERY/GN_ID\").text = gn_id\n\n gn_responses = self._get_response(\n gn_queries, http_keep_alive=is_last_album)\n gn_album = gn_responses.find(\"RESPONSE/ALBUM\")\n\n self.__log.return_(gn_album)\n return gn_album",
"def addalbum(self, album):\n self.albums.append(album)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all tracks that need audio analysis added.
|
def get_tracks_for_audio_analysis(self) -> List[str]:
l.debug("Finding Tracks without audio analysis, this can take some time.")
q = {}
cols = {"_id": 1, "audio_analysis_flag": 1}
r = list(self._tracks.find(q, cols))
# Only append artists who need collection in result
result = []
for track in r:
if "audio_analysis_flag" not in track.keys():
result.append(track["_id"])
else:
if not track["audio_analysis_flag"]:
result.append(track["_id"])
return result
|
[
"def get_audio_analysis(self, track_id):\n url = \"https://api.spotify.com/v1/audio-analysis/\" + track_id\n headers = {'Authorization': \"Bearer \" + self.token}\n\n request = self.session.get(url, headers=headers)\n return request",
"def get_all_audio(self):\n return [x.file for x in self.audio_data.values()]",
"def audio_analysis(self, track_id):\n trid = self._get_id(\"track\", track_id)\n return self._get(\"audio-analysis/\" + trid)",
"def _get_all_songs(self):\n return self.call.AudioLibrary.GetSongs(fields=self.SONG_FIELDS)['songs']",
"def getSelectedTracks(self):\n return cmds.ls('Track*', sl=True)",
"def unmixed_audio(self) -> List[np.ndarray]:\n total = self.num_samples_total\n return [\n self._pad_track(track, offset=offset, total=total)\n for offset, track in zip(self.offsets, self.tracks)\n ]",
"def search( sp, track, lim=1 ):\n\n identifier = sp.search( q=\"track: \" + track, limit=lim, type=\"track\" )['tracks']['items'][0]['id']\n features = sp.audio_features( identifier )\n analisys = sp.audio_analysis( identifier )\n\n return identifier, features, analisys",
"def get_tracks_audio_features(track_ids):\n connect()\n url = 'https://api.spotify.com/v1/audio-features/'\n # Max that can be submitted to this endpoint is 100 at a time\n track_groups = make_chunks(track_ids, 100)\n audio_features = []\n for group in track_groups:\n query_params = {'ids': ','.join(group)}\n response = requests.get(\n url, params=query_params, headers=get_header()\n )\n resp_json = response.json()\n if resp_json.get('audio_features'):\n audio_features.extend(resp_json['audio_features'])\n return audio_features",
"def get_albums_for_track_collection(self) -> List[str]:\n q = {}\n cols = {\"_id\": 1, \"tracks\": 1}\n r = list(self._albums.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for album in r:\n if \"tracks\" not in album.keys():\n result.append(album[\"_id\"])\n return result",
"async def get_all_tracks(self) -> List[PlaylistTrack]:\n if isinstance(self._tracks, PartialTracks):\n return await self._tracks.build()\n\n _tracks = []\n offset = 0\n while len(self.tracks) < self.total_tracks:\n data = await self.__client.http.get_playlist_tracks(self.owner.id, self.id, limit=50, offset=offset)\n\n _tracks += [PlaylistTrack(self.__client, item) for item in data['items']]\n offset += 50\n\n self.total_tracks = len(self._tracks)\n return list(self._tracks)",
"def get_tracks(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('tracks', search, start,\r\n max_items)",
"def get_audio_features(track_id=None):\n\n # connect to MongoDB\n mongo = MongoDatabase()\n mongo.connect()\n db = mongo.db\n\n if track_id is None:\n # get Spotify Tracks ids\n spotify_ids = db.songs.find({}, {\"spotify_id\" : 1, \"_id\" : 0})\n all_ids = [item[\"spotify_id\"] for item in spotify_ids]\n limit = len(all_ids)\n \n # the url only accept 100 ids at a time, so we will use batch ids\n for i in range(0, 1370, 100):\n if i + 100 < limit:\n batch = ','.join(all_ids[i:i+100])\n else:\n batch = ','.join(all_ids[i:limit])\n\n url = f\"https://api.spotify.com/v1/audio-features/?ids={batch}\"\n \n # get audio features\n result = fetch_data(url)\n\n # keep relevant info \n for audio_feat in result[\"audio_features\"]:\n save_audio_features(audio_feat, db.songs)\n else:\n # get audio features\n url = f\"https://api.spotify.com/v1/audio-features/{track_id}\"\n result = fetch_data(url)\n return result",
"def filter_tracks_by_audio_feature(self, tracks: List[str], audio_filter: Dict) -> List[str]:\n q = {\"_id\": {\"$in\": tracks}, **audio_filter}\n cols = {\"_id\": 1}\n r = list(self._tracks.find(q, cols))\n\n return [x[\"_id\"] for x in r]",
"def getTracks(self, album):\n\n\t\talbumSock = self.opener.open(album['url'])\t\t#download the album page\n\t\talbumPage = albumSock.read()\n\t\talbumSock.close()\n\n\t\tp = albumParser()\n\t\tp.feed(albumPage)\n\t\tp.close()\n\n\t\talbum['tracks'] = p.tracks\n\t\talbum['tracks'].sort(lambda x, y: cmp( x['num'], y['num'] )) #sort in track order",
"def list_tracks(self):\n i = 0\n while i < len(self.ss):\n s = self.ss.get_track(i)\n i += 1\n self.log.info((\"%d. %s - %s - %s\" % (i, s['artist'], s['album'], s['title'])))\n self.log.info((\"Total %d seconds of tracks in queue.\" % (self.ss.duration)))",
"def get_search_queries(self):\n artists_songs = []\n\n # Iterating through the playlist track objects inside the paging object.\n for playlist_track in self.playlist[\"tracks\"][\"items\"]:\n # Getting the track itself from the playlist track object.\n track = playlist_track[\"track\"]\n\n # Extracting the list of artists and track name and creating the corresponding string.\n artists_song_str = \", \".join([artists[\"name\"] for artists in track[\"artists\"]]) + \" - \" + track[\"name\"]\n\n artists_songs.append(artists_song_str)\n\n # Adding the duration of the track to the total duration of the playlist.\n self.duration_ms += track[\"duration_ms\"]\n\n return artists_songs",
"def getSongs(tracks):\r\n sp = getSP()\r\n songs = tracks[\"items\"]\r\n while tracks['next']:\r\n tracks = sp.next(tracks)\r\n for item in tracks[\"items\"]:\r\n songs.append(item)\r\n return songs",
"def fetch_songs(self):\n if len(self.songs) == 0:\n for file in self.MUSIC_DIR.joinpath (\"./songs\").iterdir():\n if file.is_file():\n self.songs.append (file)\n return self.songs",
"def get_playlist_tracks(self):\n return self.playlist_track_ids"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns a track list based on an album list
|
def get_tracks_from_albums(self, albums: List[str]) -> List[str]:
q = {"album_id": {"$in": albums}}
cols = {"_id": 1}
r = list(self._tracks.find(q, cols))
return [x["_id"] for x in r]
|
[
"def get_album_tracklist(name, artist=None, token=None):\n if not token:\n token = get_token()\n album = get_spotify_api(\"https://api.spotify.com/v1/search\", get=True, data={\"q\": (artist + \" - \" if artist else \"\") + name, \"type\": \"album\", \"limit\": 1})\n if album[\"albums\"][\"items\"]:\n tracks = get_spotify_api(album[\"albums\"][\"items\"][0][\"href\"] + \"/tracks\", get=True)\n output = []\n for track in tracks[\"items\"]:\n output.append([track[\"artists\"][0][\"name\"], track[\"name\"]])\n return output\n else:\n return \"No results\"",
"def getTracks(self, album):\n\n\t\talbumSock = self.opener.open(album['url'])\t\t#download the album page\n\t\talbumPage = albumSock.read()\n\t\talbumSock.close()\n\n\t\tp = albumParser()\n\t\tp.feed(albumPage)\n\t\tp.close()\n\n\t\talbum['tracks'] = p.tracks\n\t\talbum['tracks'].sort(lambda x, y: cmp( x['num'], y['num'] )) #sort in track order",
"def get_albums_for_track_collection(self) -> List[str]:\n q = {}\n cols = {\"_id\": 1, \"tracks\": 1}\n r = list(self._albums.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for album in r:\n if \"tracks\" not in album.keys():\n result.append(album[\"_id\"])\n return result",
"def list_album_tracks(uri):\r\n album = sp.album(uri)\r\n\r\n print('ALBUM NAME: ',album['name'])\r\n print('ARTIST: ',album['artists'][0]['name'])\r\n print('TYPE: ',album['album_type'])\r\n print('RELEASE DATE: ',album['release_date'])\r\n print('POPULARITY: ',album['popularity'],'\\n')\r\n \r\n album_tracks = sp.album_tracks(uri)\r\n \r\n print('TRACKS: \\n')\r\n \r\n for i in range(len(album_tracks['items'])):\r\n print('({}):'.format(i+1),album_tracks['items'][i]['name'])\r\n \r\n print('\\n---------------------------------------------------------------------------------\\n') \r\n \r\n return None",
"def getSongs(tracks):\r\n sp = getSP()\r\n songs = tracks[\"items\"]\r\n while tracks['next']:\r\n tracks = sp.next(tracks)\r\n for item in tracks[\"items\"]:\r\n songs.append(item)\r\n return songs",
"def vk_get_album_list(request):\n if not request.user.is_superuser:\n return redirect('%s?next=%s' % (reverse('dc_parse:admin_auth'), request.path))\n vk_token,vk_user = get_vk_cookies(request)\n method_name = 'photos.getAlbums'\n parameters = {\n 'owner_id': vk_user,\n 'need_covers': 1,\n 'need_system': 1,\n }\n content = vk_method(method_name,vk_token,parameters)\n\n albums = content['items']\n for album in albums:\n album['created'] = psql_time(album.get('created')) if isinstance(album.get('created'),int) else None\n album['updated'] = psql_time(album.get('updated')) if isinstance(album.get('updated'),int) else None\n\n return render(request,'vk_get_album_list.html',{\n # 'content': content,\n 'albums': content['items'],\n # 'album': album,\n # 'tags': tags,\n # 'resume': resume\n })",
"def importAlbums(album_list):\n\n for album in album_list:\n point = str(album['georss$where']['gml$Point']['gml$pos']['$t'])\n pointStr = point.split(\" \")\n loc_obj, created = Location.objects.get_or_create(name=album['gphoto$location']['$t'], defaults={'lat':pointStr[0], 'lng': pointStr[1]})\n Album.objects.get_or_create(name=album['title']['$t'],\n cover=album['media$group']['media$thumbnail'][0]['url'],\n url=album['link'][1]['href'],\n feed=album['link'][0]['href'],\n date = datetime.datetime.strptime(album['published']['$t'], \"%Y-%m-%dT%H:%M:%S.000Z\"),\n location=loc_obj)",
"def get_tracks_from_album(self, album, progress=None):\n q = {\"limit\": 50}\n url = \"albums/{}/tracks\".format(album['id'])\n page = self.get_api_v1(url, q)\n tracks = []\n for track in self.extract_page(page, progress):\n track['album'] = album\n tracks.append(Track(track))\n return tuple(tracks)",
"def getAlbums(self):\n\t\tbasketPage = self.request(site_prefix + 'basket.shtml')\n\n\t\tp = linksParser()\n\t\tp.feed(basketPage)\n\t\tp.close()\n\n\t\talbums = []\n\t\tfor link,desc in p.links.items():\n\t\t\tm = self.albumRe.match(link)\n\t\t\tif m:\n\t\t\t\tnew = dict()\n\t\t\t\tnew['url'] = site_prefix + \"downloads_iframe.shtml?\" + m.group(1)\n\t\t\t\tnew['artist'] = desc[1][0].strip()\n\t\t\t\tnew['title'] = \"\".join(desc[1][1:]).strip()\n\t\t\t\tnew['tracks'] = []\n\t\t\t\talbums.append(new)\n\n\t\treturn albums",
"def get_tracklist(self, label):\r\n queryString = \"\"\"\r\n PREFIX etree:<http://etree.linkedmusic.org/vocab/>\r\n PREFIX mo:<http://purl.org/ontology/mo/>\r\n PREFIX event:<http://purl.org/NET/c4dm/event.owl#>\r\n PREFIX skos:<http://www.w3.org/2004/02/skos/core#>\r\n PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n\r\n SELECT DISTINCT ?audio ?label ?num ?tracklist ?name {{\r\n ?perf event:hasSubEvent ?tracklist.\r\n ?tracklist skos:prefLabel ?label.\r\n ?tracklist etree:number ?num.\r\n ?tracklist etree:audio ?audio.\r\n ?perf rdf:type mo:Performance.\r\n ?perf skos:prefLabel \"{0}\".\r\n ?perf mo:performer ?performer.\r\n ?performer foaf:name ?name.\r\n }} GROUP BY ?label ?audio ?num ORDER BY ?num \r\n \"\"\".format(label)\r\n self.sparql.setQuery(queryString)\r\n return self.sparql.query().convert()",
"def get(self): \n return getAllAlbums()",
"def get_albums(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('albums', search, start,\r\n max_items)",
"def albums(self, albums, market=None):\n\n tlist = [self._get_id(\"album\", a) for a in albums]\n if market is not None:\n return self._get(\"albums/?ids=\" + \",\".join(tlist) + '&market=' + market)\n else:\n return self._get(\"albums/?ids=\" + \",\".join(tlist))",
"def _search_album_songs(self, album: Optional[str] = None, artist: Optional[str] = None) ->\\\n Iterator[Tuple[str, Tuple[SongInformation, ...]]]:\n for result in self._search(query_type=\"album\", album=album, artist=artist):\n album_id: str = result['id']\n album_name: str = result['name']\n\n image_url: str = result.get('images', [{}])[0].get('url', None)\n image: Optional[Union[PNGSongImage, JPEGSongImage]] = self._fetch_image(image_url) \\\n if image_url is not None else None\n\n songs_raw = self._all_items(self.api.album_tracks(album_id))\n songs = [self._parse_track(song_result).altered(album=album_name, cover_image=image)\n for song_result in songs_raw]\n\n yield album_name, tuple(songs)",
"def fetchAlbumIds(artist_id):\n url = \"https://api.spotify.com/v1/artists/\" + artist_id + \"/albums?album_type=album&market=US\"\n req = requests.get(url)\n if req.ok == False:\n return 'Error: bad Spotify API URL or similar error'\n data = req.json()\n albums_list = []\n #print len(data[u'items'])\n for album in data[u'items']:\n album_id = album[u'id']\n albums_list.append(album_id)\n return albums_list",
"def get_spotify_tracks(url):\n if 'track' in url:\n return [get_spotify_track(url)]\n if 'album' in url:\n return get_spotify_album(url)\n if 'playlist' in url:\n return get_spotify_playlist(url)\n return []",
"def get_album_ids(name, artist_id, artist_name):\n albums_list = [album for album in musicbrainzngs.\n search_releases(query=name, arid=artist_id)[\"release-list\"]\n if remove_forbidden_characters(custom_replace_title(\n album[\"title\"])).lower() == name.lower()\n and \"date\" in album and album[\"date\"]]\n if not albums_list:\n raise ValueError(f\"Album {name} not literally found by artist \"\n f\"{artist_name}\")\n albums_list = sorted(albums_list, key=lambda a: a[\"date\"])\n use_for_cover = None\n for album in reversed(albums_list):\n try:\n musicbrainzngs.get_image_list(album[\"id\"])\n use_for_cover = album\n break\n except musicbrainzngs.musicbrainz.ResponseError:\n continue\n if use_for_cover is None:\n raise ValueError(f\"No cover art available for {name} by \"\n f\"{artist_name}, this is unsupported behaviour\")\n else:\n return albums_list[0][\"id\"], use_for_cover[\"id\"]",
"def get_album(self, album_id):\n track = []\n img = None\n\n for i in self.__albums:\n for t_id, info in self.__tracks.items():\n if i[\"id\"] == t_id and t_id == album_id:\n img = i[\"img\"]\n\n for a, b in info.items():\n track.append({\n \"name\": a,\n \"length\": b\n })\n return {\"album_id\": album_id, \"img\": img, \"track\": track}",
"def albums(self):\n\n c.artist = request.GET.get('artist', u'')\n c.album = request.GET.get('album', u'')\n\n try:\n self.m = g.p.connect()\n except (NoMPDConnection, ConnectionClosed):\n return render('/null.html')\n c.albums = self.m.albums(c.artist)\n\n aa = AlbumArt()\n c.album_imgs = aa.artist_art(c.artist)\n random.shuffle(c.album_imgs)\n return render('/albums.html')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a list of all tracks in the database.
|
def get_tracks(self) -> List[str]:
q = {}
cols = {"_id": 1}
r = list(self._tracks.find(q, cols))
return [x["_id"] for x in r]
|
[
"async def get_all_tracks(self) -> List[PlaylistTrack]:\n if isinstance(self._tracks, PartialTracks):\n return await self._tracks.build()\n\n _tracks = []\n offset = 0\n while len(self.tracks) < self.total_tracks:\n data = await self.__client.http.get_playlist_tracks(self.owner.id, self.id, limit=50, offset=offset)\n\n _tracks += [PlaylistTrack(self.__client, item) for item in data['items']]\n offset += 50\n\n self.total_tracks = len(self._tracks)\n return list(self._tracks)",
"def tracks(self, tracks, market=None):\n\n tlist = [self._get_id(\"track\", t) for t in tracks]\n return self._get(\"tracks/?ids=\" + \",\".join(tlist), market=market)",
"def list_tracks(self):\n i = 0\n while i < len(self.ss):\n s = self.ss.get_track(i)\n i += 1\n self.log.info((\"%d. %s - %s - %s\" % (i, s['artist'], s['album'], s['title'])))\n self.log.info((\"Total %d seconds of tracks in queue.\" % (self.ss.duration)))",
"def get_albums_for_track_collection(self) -> List[str]:\n q = {}\n cols = {\"_id\": 1, \"tracks\": 1}\n r = list(self._albums.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for album in r:\n if \"tracks\" not in album.keys():\n result.append(album[\"_id\"])\n return result",
"def getSongs(tracks):\r\n sp = getSP()\r\n songs = tracks[\"items\"]\r\n while tracks['next']:\r\n tracks = sp.next(tracks)\r\n for item in tracks[\"items\"]:\r\n songs.append(item)\r\n return songs",
"def getSelectedTracks(self):\n return cmds.ls('Track*', sl=True)",
"def get_playlist_tracks(self):\n return self.playlist_track_ids",
"def generate_track_list(self):\n with_lyrics = set(self._ldb.get_songs_with_lyrics())\n with_features = set(self._fdb.get_songs_with_all_features())\n with_both = with_lyrics.intersection(with_features)\n\n with open('tracks.txt', 'wb') as f:\n for t in with_both:\n f.write('%s\\n' % t)",
"def get_tracks(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('tracks', search, start,\r\n max_items)",
"def list_album_tracks(uri):\r\n album = sp.album(uri)\r\n\r\n print('ALBUM NAME: ',album['name'])\r\n print('ARTIST: ',album['artists'][0]['name'])\r\n print('TYPE: ',album['album_type'])\r\n print('RELEASE DATE: ',album['release_date'])\r\n print('POPULARITY: ',album['popularity'],'\\n')\r\n \r\n album_tracks = sp.album_tracks(uri)\r\n \r\n print('TRACKS: \\n')\r\n \r\n for i in range(len(album_tracks['items'])):\r\n print('({}):'.format(i+1),album_tracks['items'][i]['name'])\r\n \r\n print('\\n---------------------------------------------------------------------------------\\n') \r\n \r\n return None",
"def show_tracks(self):\n\n for track in self.__tracks:\n print(f\"Track {track.id}:\")\n for session in track.sessions():\n for talk in session.talks():\n print(talk)",
"def get_all(self):\n return self.session.query(Tour)",
"def get_tracks_for_audio_analysis(self) -> List[str]:\n \n l.debug(\"Finding Tracks without audio analysis, this can take some time.\")\n q = {}\n cols = {\"_id\": 1, \"audio_analysis_flag\": 1}\n r = list(self._tracks.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for track in r:\n if \"audio_analysis_flag\" not in track.keys():\n result.append(track[\"_id\"])\n else:\n if not track[\"audio_analysis_flag\"]:\n result.append(track[\"_id\"])\n return result",
"def list_songs(self):\n response = requests.get('http://localhost:5000/song/all')\n song_names = []\n for s in response.json():\n song_names.append(s['title'])\n self._player.list_songs(song_names)",
"def get_audios(self) -> List[Dict[str, str]]:\n with self.cursor(dictionary=True) as cur:\n cur.execute(self.SELECT_AUDIOS)\n return list(cur)",
"def get_tracks_from_albums(self, albums: List[str]) -> List[str]:\n q = {\"album_id\": {\"$in\": albums}}\n cols = {\"_id\": 1}\n r = list(self._tracks.find(q, cols))\n\n return [x[\"_id\"] for x in r]",
"def get_search_queries(self):\n artists_songs = []\n\n # Iterating through the playlist track objects inside the paging object.\n for playlist_track in self.playlist[\"tracks\"][\"items\"]:\n # Getting the track itself from the playlist track object.\n track = playlist_track[\"track\"]\n\n # Extracting the list of artists and track name and creating the corresponding string.\n artists_song_str = \", \".join([artists[\"name\"] for artists in track[\"artists\"]]) + \" - \" + track[\"name\"]\n\n artists_songs.append(artists_song_str)\n\n # Adding the duration of the track to the total duration of the playlist.\n self.duration_ms += track[\"duration_ms\"]\n\n return artists_songs",
"def get_album_tracks(self, album_id):\n response = self.__get_data(self.url.albums_tracks_url().format(id=str(album_id)))\n tracks = []\n for album_track in response['tracks']['items']:\n track = self.get_track(album_track['id'])\n tracks.append(track)\n return tracks",
"def get_track_info(self, track_ids: List[str], fields: Dict={\"artists\": 0, \"audio_analysis\": 0}) -> List[Dict]:\n\n # Check if needs to be done in batches\n id_lim = 50000\n batches = np.array_split(track_ids, int(np.ceil(len(track_ids) / id_lim)))\n result = []\n for batch in batches:\n\n q = {\"_id\": {\"$in\": batch.tolist()}}\n cols = fields\n r = list(self._tracks.find(q, cols))\n result.extend(r)\n\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns all available information for every track in track_ids. Done in batches as it is a large database.
|
def get_track_info(self, track_ids: List[str], fields: Dict={"artists": 0, "audio_analysis": 0}) -> List[Dict]:
# Check if needs to be done in batches
id_lim = 50000
batches = np.array_split(track_ids, int(np.ceil(len(track_ids) / id_lim)))
result = []
for batch in batches:
q = {"_id": {"$in": batch.tolist()}}
cols = fields
r = list(self._tracks.find(q, cols))
result.extend(r)
return result
|
[
"def get_general_info_mult_tracks(track_ids):\n connect()\n url = 'https://api.spotify.com/v1/tracks'\n # Max that can be submitted to this endpoint is 50 at a time\n track_groups = make_chunks(track_ids, 50)\n track_details = []\n for group in track_groups:\n query_params = {'ids': ','.join(group)}\n response = requests.get(\n url, params=query_params, headers=get_header()\n )\n resp_json = response.json()\n if resp.get('tracks'):\n track_details.extend(resp_json['tracks'])\n return track_details",
"def tracks(self, tracks, market=None):\n\n tlist = [self._get_id(\"track\", t) for t in tracks]\n return self._get(\"tracks/?ids=\" + \",\".join(tlist), market=market)",
"def retrieve_all_songs_and_ids(sp, uris, spotify_songs, spotify_songs_ids):\n for i in range(len(uris)):\n tracks = sp.album_tracks(uris[i])\n\n for n in range(len(tracks['items'])):\n if tracks['items'][n]['name'] not in spotify_songs:\n spotify_songs.add(tracks['items'][n]['name'])\n spotify_songs_ids.append(tracks['items'][n].get('id'))",
"def get_tracks_audio_features(track_ids):\n connect()\n url = 'https://api.spotify.com/v1/audio-features/'\n # Max that can be submitted to this endpoint is 100 at a time\n track_groups = make_chunks(track_ids, 100)\n audio_features = []\n for group in track_groups:\n query_params = {'ids': ','.join(group)}\n response = requests.get(\n url, params=query_params, headers=get_header()\n )\n resp_json = response.json()\n if resp_json.get('audio_features'):\n audio_features.extend(resp_json['audio_features'])\n return audio_features",
"def get_metadata_bulk(dids, inherit=False, *, session: \"Session\"):\n if inherit:\n parent_list = []\n unique_dids = []\n parents = [1, ]\n depth = 0\n for did in dids:\n unique_dids.append((did['scope'], did['name']))\n parent_list.append([(did['scope'], did['name']), ])\n\n while parents and depth < 20:\n parents = []\n for did in list_parent_dids_bulk(dids, session=session):\n scope = did['scope']\n name = did['name']\n child_scope = did['child_scope']\n child_name = did['child_name']\n if (scope, name) not in unique_dids:\n unique_dids.append((scope, name))\n if (scope, name) not in parents:\n parents.append((scope, name))\n for entry in parent_list:\n if entry[-1] == (child_scope, child_name):\n entry.append((scope, name))\n dids = [{'scope': did[0], 'name': did[1]} for did in parents]\n depth += 1\n unique_dids = [{'scope': did[0], 'name': did[1]} for did in unique_dids]\n meta_dict = {}\n for did in unique_dids:\n try:\n meta = get_metadata(did['scope'], did['name'], plugin='JSON', session=session)\n except exception.DataIdentifierNotFound:\n meta = {}\n meta_dict[(did['scope'], did['name'])] = meta\n for dids in parent_list:\n result = {'scope': dids[0][0], 'name': dids[0][1]}\n for did in dids:\n for key in meta_dict[did]:\n if key not in result:\n result[key] = meta_dict[did][key]\n yield result\n else:\n condition = []\n for did in dids:\n condition.append(and_(models.DataIdentifier.scope == did['scope'],\n models.DataIdentifier.name == did['name']))\n try:\n for chunk in chunks(condition, 50):\n stmt = select(\n models.DataIdentifier\n ).with_hint(\n models.DataIdentifier, \"INDEX(DIDS DIDS_PK)\", 'oracle'\n ).where(\n or_(*chunk)\n )\n for row in session.execute(stmt).scalars():\n yield row.to_dict()\n except NoResultFound:\n raise exception.DataIdentifierNotFound('No Data Identifiers found')",
"def retrieve_all_songs_and_ids_app(sp, name, uris, spotify_songs, spotify_songs_ids):\n for i in range(len(uris)):\n tracks = sp.album_tracks(uris[i])\n\n for n in range(len(tracks['items'])):\n for g in tracks['items'][n]['artists']:\n if g.get('name') == name:\n if tracks['items'][n]['name'] not in spotify_songs:\n spotify_songs.add(tracks['items'][n]['name'])\n spotify_songs_ids.append(tracks['items'][n].get('id'))",
"def get_audio_features(track_id=None):\n\n # connect to MongoDB\n mongo = MongoDatabase()\n mongo.connect()\n db = mongo.db\n\n if track_id is None:\n # get Spotify Tracks ids\n spotify_ids = db.songs.find({}, {\"spotify_id\" : 1, \"_id\" : 0})\n all_ids = [item[\"spotify_id\"] for item in spotify_ids]\n limit = len(all_ids)\n \n # the url only accept 100 ids at a time, so we will use batch ids\n for i in range(0, 1370, 100):\n if i + 100 < limit:\n batch = ','.join(all_ids[i:i+100])\n else:\n batch = ','.join(all_ids[i:limit])\n\n url = f\"https://api.spotify.com/v1/audio-features/?ids={batch}\"\n \n # get audio features\n result = fetch_data(url)\n\n # keep relevant info \n for audio_feat in result[\"audio_features\"]:\n save_audio_features(audio_feat, db.songs)\n else:\n # get audio features\n url = f\"https://api.spotify.com/v1/audio-features/{track_id}\"\n result = fetch_data(url)\n return result",
"def list_tracks(self):\n i = 0\n while i < len(self.ss):\n s = self.ss.get_track(i)\n i += 1\n self.log.info((\"%d. %s - %s - %s\" % (i, s['artist'], s['album'], s['title'])))\n self.log.info((\"Total %d seconds of tracks in queue.\" % (self.ss.duration)))",
"def trackdata(request):\n tracklist = SupportedTrackName.objects.all()\n\n detailed_trackdata = []\n \n # Lookup the metadata for each of the supported tracks.\n for track in tracklist:\n temp_data = {}\n temp_data['id'] = track.id\n temp_data['trackname'] = track.trackkey.trackname\n temp_data['racecount'] = 0\n temp_data['recent_racedate'] = None\n \n # Get the number of racing currently in the system.\n racecount = SingleRaceDetails.objects.filter(trackkey=track.trackkey.id).count()\n temp_data['racecount'] = racecount\n # Get the most recent race date\n recent_racedate = SingleRaceDetails.objects.filter(trackkey=track.trackkey.id).order_by('-racedate')[:1]\n if recent_racedate:\n temp_data['recent_racedate'] = recent_racedate.get().racedate\n \n detailed_trackdata.append(temp_data)\n \n\n return render_to_response('trackdata.html', {'track_list':detailed_trackdata}, context_instance=RequestContext(request))",
"def collector():\n channels = Builder(\"channels\")\n\n with open(\"ids.pickle\", \"rb\") as fp:\n items = pickle.load(fp)\n\n ids = list(items[\"ids\"])\n start = 0\n while True:\n end = start + 50\n batch = ids[start:end]\n if batch:\n batch = \",\".join(batch)\n # If i haven´t reach the end of the list, request info\n response = channels.list(\n part=\"snippet,statistics\",\n id=batch,\n maxResults=50,\n fields=\"items(id,snippet(title,publishedAt,country),statistics(viewCount,commentCount,subscriberCount,videoCount))\",\n )\n print(response)\n break\n # start = end\n else:\n break",
"def get_id_pairs(track_list):\r\n\r\n return [(t[\"id\"], t.get(\"playlistEntryId\")) for t in track_list]",
"def getSongs(tracks):\r\n sp = getSP()\r\n songs = tracks[\"items\"]\r\n while tracks['next']:\r\n tracks = sp.next(tracks)\r\n for item in tracks[\"items\"]:\r\n songs.append(item)\r\n return songs",
"def read_tracks(self):\n # Each track is a bs4 Tag object\n track_soup = self.find_track_holder()\n data_attrs = [\"startTime\", \"duration\", \"name\"]\n for track in track_soup.children:\n # Initialize data storage\n data_keys = []\n data_vals = []\n if track.name is None:\n continue\n # For each of the child elements in the track,\n for child in track.children:\n # If the name isn't None (emptystr) and the name starts with\n # \"xmpdm:\", the prefix on all of the data tags,\n if child.name is not None and child.name in data_attrs:\n # Append the name (minus the prefix) to the keys list\n data_keys.append(child.name.lower())\n # Append the value to the values list\n data_vals.append(\n self.number_normalizer(child.contents[0])\n )\n # if child.name == \"xmpdm:name\":\n # print(\"Reading %s...\" % child.contents[0])\n # This looks like\n # {\n # 'name':'Wolfgun - Road to Jupiter',\n # 'starttime':10300,\n # 'duration':347000\n # }\n data = dict(zip(data_keys, data_vals))\n self.tracklist.append(data)",
"def parse_track_ids_from_metadata(tracks):\n\n track_ids = []\n for track in tracks:\n if not track.get('track'):\n continue\n track_id = track['track']['id']\n track_ids.append(track_id)\n if not track_ids:\n raise ValueError\n return track_ids",
"def update_tracks(self, track_info_list: List[Dict]) -> None:\n\n for track in track_info_list:\n\n # Add track to album record\n q = {\"_id\": track[\"album_id\"]}\n self._albums.update_one(q, {\"$push\": {\"tracks\": track[\"id\"]}}, upsert=True)\n\n # Add track data to tracks\n q = {\"_id\": track[\"id\"]}\n track[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n del track[\"id\"]\n self._tracks.update_one(q, {\"$set\": track}, upsert=True)",
"def fetch_tracks(sp, playlist, user_id):\n log.debug('Fetching saved tracks')\n offset = 0\n songs_dict = {}\n if user_id is None:\n current_user_id = sp.current_user()['id']\n else:\n current_user_id = user_id\n while True:\n if playlist is None:\n results = sp.current_user_saved_tracks(limit=50, offset=offset)\n else:\n results = sp.user_playlist_tracks(current_user_id, playlist, None,\n limit=50, offset=offset)\n\n log.debug('Got result json %s', results)\n for item in results['items']:\n track = item['track']\n\n if track is not None:\n track_name = str(track['name'])\n track_artist = str(track['artists'][0]['name'])\n log.debug('Appending %s to'\n 'songs list', (track['name'] + ' - ' + track['artists'][0]['name']))\n songs_dict.update({track_name: track_artist})\n else:\n log.warning(\"Track/artist name for %s not found, skipping\", track)\n\n offset += 1\n\n if results.get('next') is None:\n log.info('All pages fetched, time to leave.'\n ' Added %s songs in total', offset)\n break\n return songs_dict",
"def get_features_by_msd(self, track_id):\n track_id = (track_id,)\n for row in self.db.execute('SELECT * FROM songs WHERE track_id=?', track_id):\n return MSFeatures(row[self._fstart:])",
"def getTracesByIds(self, trace_ids, adjust):\n pass",
"def get_audio_features( tracks, tracks_artistnames):\n if not tracks:\n print('No tracks provided.')\n return\n\n \n track_map = {track.get('id'): track for track in tracks}\n\n # Request the audio features for the chosen tracks (limited to 50)\n \n tracks_features_response = spotify.audio_features(tracks=track_map.keys())\n\n desired_features = [\n 'tempo',\n 'time_signature',\n 'key',\n 'mode',\n 'loudness',\n 'energy',\n 'danceability',\n 'acousticness',\n 'instrumentalness',\n 'liveness',\n 'speechiness',\n 'valence'\n ]\n\n tracks_features_list = []\n for track_features in tracks_features_response:\n \n features_dict = dict()\n for feature in desired_features:\n \n feature_value = track_features.get(feature)\n\n \n if feature == 'key':\n feature_value = translate_key_to_pitch(feature_value)\n \n features_dict[feature] = feature_value\n \n tracks_features_list.append(features_dict)\n\n\n\n tracks_features_map = {f.get('id'): [tracks_artistnames[i], tracks_features_list[i], \"https://open.spotify.com/track/\" + f.get('id')] for i, f in enumerate(tracks_features_response)}\n\n \n \n \n \n \n\n return tracks_features_map"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns all tracks in database from a list of artists and a date range for releases.
|
def get_tracks_from_artists(self, artists: List[str], start_date: str, end_date: str) -> List[str]:
albums = self.get_albums_from_artists_by_date(artists, start_date, end_date)
tracks = np.unique(self.get_tracks_from_albums(albums)).tolist()
return tracks
|
[
"def get_release_list(artist_str):\n username = 'Username'\n password = 'Password'\n \n musicbrainzngs.set_useragent(username, password)\n artist_list = musicbrainzngs.search_artists(artist=artist_str)['artist-list']\n artist = sorted(artist_list, reverse=True, key=lambda artist:int(artist['ext:score']))[0]\n artist_id = artist['id']\n\n limit = 100\n offset = 0\n release_list = []\n release_count = 1\n\n while offset < release_count:\n print 'Requesting tracks {0} - {1}'.format(str(offset), str(offset+limit))\n result = musicbrainzngs.browse_releases(artist=artist_id, release_status=['official'], release_type=['album'], includes=['recordings'], limit=limit, offset=offset)\n release_count = result['release-count']\n release_list += result['release-list']\n offset += limit\n \n return release_list",
"def get_releases(artist_ids):\n all_releases = []\n for art_id in artist_ids:\n releases = get_releases_artist(art_id)\n all_releases.extend(releases)\n all_releases = sorted(all_releases,\n key=lambda r: r['first-release-date'],\n reverse=True)\n return all_releases",
"def tracks(self, tracks, market=None):\n\n tlist = [self._get_id(\"track\", t) for t in tracks]\n return self._get(\"tracks/?ids=\" + \",\".join(tlist), market=market)",
"def get_artists_of_year(year: int) -> list:\n sql_request = sql_request_artists_year(year)\n\n sql_data = get_data_from_db(sql_request)\n artists = create_data_of_year(sql_data)\n return artists",
"def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))",
"def get_artists(self, with_connection=None):\n if with_connection:\n connection = with_connection\n else:\n connection = self.get_database_connection()\n rows = connection.execute(\"SELECT name FROM artists ORDER BY name\")\n results = [row for row in rows]\n if not with_connection:\n self.close_database_connection(connection)\n for artist in results:\n yield artist",
"def get_search_queries(self):\n artists_songs = []\n\n # Iterating through the playlist track objects inside the paging object.\n for playlist_track in self.playlist[\"tracks\"][\"items\"]:\n # Getting the track itself from the playlist track object.\n track = playlist_track[\"track\"]\n\n # Extracting the list of artists and track name and creating the corresponding string.\n artists_song_str = \", \".join([artists[\"name\"] for artists in track[\"artists\"]]) + \" - \" + track[\"name\"]\n\n artists_songs.append(artists_song_str)\n\n # Adding the duration of the track to the total duration of the playlist.\n self.duration_ms += track[\"duration_ms\"]\n\n return artists_songs",
"def get_releases_artist(art_id):\n uri = \"http://musicbrainz.org/ws/2/artist/{}?inc=release-groups&fmt=json\"\n while True:\n logging.info(\"Querying MusicBrainz for artist_id:%s\", art_id)\n page = requests.get(uri.format(art_id))\n if page.status_code == 200:\n break\n logging.warning(\"MusicBrainz returned status=%d\", page.status_code)\n time.sleep(5)\n j = json.loads(page.content.decode('utf-8'))\n releases = j['release-groups']\n del j['release-groups']\n for release in releases:\n release['artist'] = j\n return releases",
"def get_albums_for_track_collection(self) -> List[str]:\n q = {}\n cols = {\"_id\": 1, \"tracks\": 1}\n r = list(self._albums.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for album in r:\n if \"tracks\" not in album.keys():\n result.append(album[\"_id\"])\n return result",
"def getStockHistory(self, start, end):\n stock_hist = self.historicalinformation_set.filter(\n date__gte=start.date(),\n date__lte=end.date()) \\\n .order_by(\"date\") \n # check if the range of dates stored is sufficient for the query\n # i.e. check if the greatest date = start and the smallest = end\n gaps = []\n if len(stock_hist) == 0:\n gaps = [(start, end)]\n else:\n earliest_in_range = stock_hist[0].date\n latest_in_range = stock_hist[len(stock_hist) - 1].date\n #if our records don't go far enough back\n if start.date() < earliest_in_range:\n gaps.append((start, earliest_in_range))\n # check for any gaps in the stored data\n for i in range(len(stock_hist) - 1):\n d = stock_hist[i].date\n d1 = stock_hist[i + 1].date\n if 1 < (d1 - d).days:\n gaps.append((d, d1))\n # if our records aren't up to date enough\n if end.date() > latest_in_range:\n gaps.append((earliest_in_range, end))\n # return the list of stock history models\n # fill in the gaps in our stock history\n for g in gaps:\n df = sh.getHistoricalStockInformation(self.ticker, g[0], g[1])\n self.addHistFromDf(df)\n return self.historicalinformation_set.filter(\n date__gte=start.date(),\n date__lte=end.date()) \\\n .order_by(\"date\")",
"def get_track_info(self, track_ids: List[str], fields: Dict={\"artists\": 0, \"audio_analysis\": 0}) -> List[Dict]:\n\n # Check if needs to be done in batches\n id_lim = 50000\n batches = np.array_split(track_ids, int(np.ceil(len(track_ids) / id_lim)))\n result = []\n for batch in batches:\n\n q = {\"_id\": {\"$in\": batch.tolist()}}\n cols = fields\n r = list(self._tracks.find(q, cols))\n result.extend(r)\n\n return result",
"def retrieve_all_songs_and_ids_app(sp, name, uris, spotify_songs, spotify_songs_ids):\n for i in range(len(uris)):\n tracks = sp.album_tracks(uris[i])\n\n for n in range(len(tracks['items'])):\n for g in tracks['items'][n]['artists']:\n if g.get('name') == name:\n if tracks['items'][n]['name'] not in spotify_songs:\n spotify_songs.add(tracks['items'][n]['name'])\n spotify_songs_ids.append(tracks['items'][n].get('id'))",
"def getSongs(tracks):\r\n sp = getSP()\r\n songs = tracks[\"items\"]\r\n while tracks['next']:\r\n tracks = sp.next(tracks)\r\n for item in tracks[\"items\"]:\r\n songs.append(item)\r\n return songs",
"def load_artist_top_tracks_information_into_df(self):\n artist_top_track_information_df = pd.DataFrame()\n\n avg_popularity_artist_top_tracks = []\n avg_duration_artist_top_tracks = []\n avg_acousticness_artist_top_tracks = []\n avg_danceability_artist_top_tracks = []\n avg_energy_artist_top_tracks = []\n avg_instrumentalness_artist_top_tracks = []\n avg_liveness_artist_top_tracks = []\n avg_loudness_artist_top_tracks = []\n avg_speechiness_artist_top_tracks = []\n avg_valence_artist_top_tracks = []\n avg_tempo_artist_top_tracks = []\n\n for artists in self.playlist_information.artists_info.values:\n popularities, durations, acousticnesses, danceabilities, energies, instrumentalnesses, livenesses, loudnesses, speechinesses, valences, tempos = [], [], [], [], [], [], [], [], [], [], []\n\n for artist_info in artists:\n artist_top_songs = self.artist_top_tracks[\n self.artist_top_tracks.id.isin(self.artist_to_top_tracks[artist_info['id']])]\n\n for i in range(artist_top_songs.shape[0]):\n\n if artist_top_songs.iloc[i].track_info:\n popularities.append(artist_top_songs.iloc[i].track_info['popularity'])\n\n if artist_top_songs.iloc[i].track_features:\n artist_top_track_features = artist_top_songs.iloc[i].track_features\n\n durations.append(artist_top_track_features['duration_ms'] / 1000)\n acousticnesses.append(artist_top_track_features['acousticness'])\n danceabilities.append(artist_top_track_features['danceability'])\n energies.append(artist_top_track_features['energy'])\n instrumentalnesses.append(artist_top_track_features['instrumentalness'])\n livenesses.append(artist_top_track_features['liveness'])\n loudnesses.append(artist_top_track_features['loudness'])\n speechinesses.append(artist_top_track_features['speechiness'])\n valences.append(artist_top_track_features['valence'])\n tempos.append(artist_top_track_features['tempo'])\n\n avg_popularity_artist_top_tracks.append(np.array(popularities).mean() if len(popularities) > 0 else None)\n avg_duration_artist_top_tracks.append(np.array(durations).mean() if len(durations) > 0 else None)\n avg_acousticness_artist_top_tracks.append(\n np.array(acousticnesses).mean() if len(acousticnesses) > 0 else None)\n avg_danceability_artist_top_tracks.append(\n np.array(danceabilities).mean() if len(danceabilities) > 0 else None)\n avg_energy_artist_top_tracks.append(np.array(energies).mean() if len(energies) > 0 else None)\n avg_instrumentalness_artist_top_tracks.append(\n np.array(instrumentalnesses).mean() if len(instrumentalnesses) > 0 else None)\n avg_liveness_artist_top_tracks.append(np.array(livenesses).mean() if len(livenesses) > 0 else None)\n avg_loudness_artist_top_tracks.append(np.array(loudnesses).mean() if len(loudnesses) > 0 else None)\n avg_speechiness_artist_top_tracks.append(np.array(speechinesses).mean() if len(speechinesses) > 0 else None)\n avg_valence_artist_top_tracks.append(np.array(valences).mean() if len(valences) > 0 else None)\n avg_tempo_artist_top_tracks.append(np.array(tempos).mean() if len(tempos) > 0 else None)\n\n artist_top_track_information_df['avg_popularity_artist_top_tracks'] = pd.Series(avg_popularity_artist_top_tracks)\n artist_top_track_information_df['avg_duration_artist_top_tracks'] = pd.Series(avg_duration_artist_top_tracks)\n artist_top_track_information_df['avg_acousticness_artist_top_tracks'] = pd.Series(avg_acousticness_artist_top_tracks)\n artist_top_track_information_df['avg_danceability_artist_top_tracks'] = pd.Series(avg_danceability_artist_top_tracks)\n artist_top_track_information_df['avg_energy_artist_top_tracks'] = pd.Series(avg_energy_artist_top_tracks)\n artist_top_track_information_df['avg_instrumentalness_artist_top_tracks'] = pd.Series(avg_instrumentalness_artist_top_tracks)\n artist_top_track_information_df['avg_liveness_artist_top_tracks'] = pd.Series(avg_liveness_artist_top_tracks)\n artist_top_track_information_df['avg_loudness_artist_top_tracks'] = pd.Series(avg_loudness_artist_top_tracks)\n artist_top_track_information_df['avg_speechiness_artist_top_tracks'] = pd.Series(avg_speechiness_artist_top_tracks)\n artist_top_track_information_df['avg_valence_artist_top_tracks'] = pd.Series(avg_valence_artist_top_tracks)\n artist_top_track_information_df['avg_tempo_artist_top_tracks'] = pd.Series(avg_tempo_artist_top_tracks)\n\n self.artist_top_track_information_df = artist_top_track_information_df",
"def list_album_tracks(uri):\r\n album = sp.album(uri)\r\n\r\n print('ALBUM NAME: ',album['name'])\r\n print('ARTIST: ',album['artists'][0]['name'])\r\n print('TYPE: ',album['album_type'])\r\n print('RELEASE DATE: ',album['release_date'])\r\n print('POPULARITY: ',album['popularity'],'\\n')\r\n \r\n album_tracks = sp.album_tracks(uri)\r\n \r\n print('TRACKS: \\n')\r\n \r\n for i in range(len(album_tracks['items'])):\r\n print('({}):'.format(i+1),album_tracks['items'][i]['name'])\r\n \r\n print('\\n---------------------------------------------------------------------------------\\n') \r\n \r\n return None",
"def generate_artists(tracks):\n artist_pool = _generate_artist_pool_lower_case(tracks)\n artists = []\n for track in chain.from_iterable([d.values() for d in tracks.values()]):\n for name, import_ in track[\"artists\"]:\n name = artist_pool[normalize_accents(name.lower())]\n if (name, import_) not in artists:\n artists.append((name, import_))\n artists, tracks = filter_artists(artists, tracks)\n return artists, tracks",
"def get_artists(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('artists', search, start,\r\n max_items)",
"def filter_artists(artists, tracks=None):\n to_replace = construct_replacement_list(artists)\n artists = fix_artists_list(artists, to_replace)\n if tracks:\n artist_pool = _generate_artist_pool_lower_case(tracks)\n for dnum, disc in tracks.items():\n for tnum, track in disc.items():\n track[\"artists\"] = fix_artists_list(\n [\n (artist_pool[normalize_accents(art.lower())], imp)\n for art, imp in track[\"artists\"]\n ],\n to_replace,\n )\n return artists, tracks",
"def retrieve_all_songs_and_ids(sp, uris, spotify_songs, spotify_songs_ids):\n for i in range(len(uris)):\n tracks = sp.album_tracks(uris[i])\n\n for n in range(len(tracks['items'])):\n if tracks['items'][n]['name'] not in spotify_songs:\n spotify_songs.add(tracks['items'][n]['name'])\n spotify_songs_ids.append(tracks['items'][n].get('id'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates a track and its album frm a list.
|
def update_tracks(self, track_info_list: List[Dict]) -> None:
for track in track_info_list:
# Add track to album record
q = {"_id": track["album_id"]}
self._albums.update_one(q, {"$push": {"tracks": track["id"]}}, upsert=True)
# Add track data to tracks
q = {"_id": track["id"]}
track["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
del track["id"]
self._tracks.update_one(q, {"$set": track}, upsert=True)
|
[
"def update_albums(self, album_info: List) -> None:\n\n for album in album_info:\n if isinstance(album, dict):\n q = {\"_id\": album[\"id\"]}\n\n # Writing updates (formatting changes)\n album[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n del album[\"id\"]\n\n self._albums.update_one(q, {\"$set\": album}, upsert=True)",
"def update_track_features(self, tracks: List[Dict]) -> None:\n for track in tracks:\n q = {\"_id\": track[\"id\"]}\n\n # Writing updates (formatting changes)\n track[\"audio_features\"] = True\n track[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n del track[\"id\"]\n\n self._tracks.update_one(q, {\"$set\": track}, upsert=True)",
"def update_playlist(self, songs):\n self.playlists[self.curr_playlist_name] = songs\n pickle.dump(self.playlists, open(\"playlists.pkl\", \"wb\"), protocol = 2)\n return True",
"def task_6_song_edit_album():\n song = Song.objects.get(title='Superstition')\n song.album_name = 'Way to Go'\n song.save()",
"def add_track(self, track):\n self.tracks.add(track.id)\n self.artists.update(track.artists)",
"def add_tracks_to_playlist(self, tracks, playlist):\n playlist_id = playlist['id']\n endpoint = f'playlists/{playlist_id}/tracks'\n iterations = math.ceil(len(tracks) / 100)\n\n for i in range(iterations):\n upper_limit = (i + 1) * 100\n lower_limit = upper_limit - 100\n track_uris = tracks[lower_limit:upper_limit]\n data = {\n 'uris': track_uris\n }\n self._api_update_request(endpoint, data)",
"def _update_(self, db):\n self.artist_db = self._find(db, 'album', 'albumartist')\n self.music_db = db\n self.initialized = True",
"def update_artists(self, artist_info_list: List[Dict]) -> None:\n\n for artist in tqdm(artist_info_list):\n q = {\"_id\": artist[\"id\"]}\n\n # Writing updates (formatting changes)\n artist[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n artist[\"total_followers\"] = artist[\"followers\"][\"total\"]\n del artist[\"followers\"]\n del artist[\"id\"]\n\n self._artists.update_one(q, {\"$set\": artist}, upsert=True)",
"def _sync_queue(self):\n resp = self.get('playlist.json').json()\n playlist = resp['children'][0]\n for i, d in enumerate(playlist['children']):\n track = self._tracks[int(d['name'])]\n track.idx = i\n track.plid = int(d['id'])",
"def change_song_metadata(self, songs):\r\n\r\n mutate_call = mobileclient.BatchMutateTracks\r\n mutations = [{'update': s} for s in songs]\r\n self._make_call(mutate_call, mutations)\r\n\r\n #TODO\r\n # store tracks don't send back their id, so we're\r\n # forced to spoof this\r\n return [utils.id_or_nid(d) for d in songs]",
"def updateFromTrack(self, track):\n try:\n tags = mutagenID3(self.filename)\n except ID3NoHeaderError:\n tags = mutagenID3()\n tags[\"TIT2\"] = TIT2(encoding=3, text=track.title)\n if track.artist:\n tags[\"TPE1\"] = TPE1(encoding=3, text=track.artist.name)\n tags[\"TRCK\"] = TRCK(encoding=3, text=str(track.trackNumber))\n if self.config:\n if 'DoClearComments' in self.config:\n if self.config['DoClearComments'].lower() == \"true\":\n tags.delall(u\"COMM::'en'\")\n tags.save(self.filename)",
"def update(request):\n p = request.POST\n images = defaultdict(dict)\n\n #create dictionary of properties for each image\n\n for k, v in p.items():\n if k.startswith(\"title\") or k.startswith(\"rating\") or k.startswith(\"tags\"):\n k, pk = k.split('-')\n images[pk][k] = v\n elif k.startswith(\"album\"):\n pk = k.split('-')[1]\n images[pk][\"albums\"] = p.getlist(k)\n\n for k, d in images.items():\n image = Image.objects.get(pk=k)\n image.title = d['title']\n image.rating =int(d['rating'])\n\n #tags - assign or create if a new tag!\n\n tags = d[\"tags\"].split(', ')\n lst = []\n for t in tags:\n if t:\n lst.append(Tag.objects.get_or_create(tag=t)[0])\n image.tags = lst\n\n if \"albums\" in d:\n image.albums = d[\"albums\"]\n image.save()\n return HttpResponseRedirect(request.META[\"HTTP_REFERER\"], dict(media_url = MEDIA_URL))",
"def upload_track(track, ytmusic):\n print_filesize(track, track)\n ytmusic.upload_song(track)",
"def playlist_replace_items(self, playlist_id, items):\n plid = self._get_id(\"playlist\", playlist_id)\n ftracks = [self._get_uri(\"track\", tid) for tid in items]\n payload = {\"uris\": ftracks}\n return self._put(\n \"playlists/%s/tracks\" % (plid), payload=payload\n )",
"def updatePlaylists(self, playlistsDict: dict):\n for title in playlistsDict:\n playlist = playlistsDict[title]\n self.collectedData[\"Playlists data\"][playlist.getTitle()] = playlist.getData()\n self.playlistsData[title] = playlist.getSongsTitles()\n self.saveData()",
"def update_a_list(uuid: str, list):\n with get_db_cursor() as cursor:\n cursor.execute(\"UPDATE list SET box_id = %s, item_name= %s, item_id = %s WHERE id = %s\", [list['box_id'], list['item_name'], list['item_id'], uuid])",
"def update_artist(artist, new_name):\n conn = sqlite3.connect(\"mydatabase.db\")\n cursor = conn.cursor()\n \n sql = \"\"\"\n UPDATE albums\n SET artist = ?\n WHERE artist = ?\n \"\"\"\n cursor.execute(sql, (new_name, artist))\n conn.commit()\n cursor.close()\n conn.close()",
"def update_lists(self, list_id, extra=None):\n data = {}\n\n if extra:\n data.update(**extra)\n\n return self.post(\n \"{}/{}\".format(\n ENDPOINT[\"lists\"],\n list_id,\n ),\n data=data\n )",
"def update_mp4(mp4obj, tagobj):\n valid_keys = [\n ('\\xa9alb', 'album'),\n ('\\xa9wrt', 'composer'),\n ('\\xa9gen', 'genre'),\n ('\\xa9day', 'date'),\n #no lyricist field\n ('\\xa9nam', 'title'),\n #no version field\n ('\\xa9ART', 'artist'),\n #('trkn', 'tracknumber')\n #missing asin, mbalbumartistid, mbalbumid, mbtrackid\n ]\n\n for key, field_name in valid_keys:\n if mp4obj.has_key(key):\n if isinstance(mp4obj[key], list):\n tagobj[field_name] = ','.join(mp4obj[key])\n \n if mp4obj.has_key('trkn') and len(mp4obj['trkn']) > 0:\n trkn = mp4obj['trkn'][0]\n if type(trkn) == tuple and len(trkn) == 2:\n tagobj['tracknumber'], tagobj['totaltracks'] = trkn\n elif type(trkn) == unicode:\n tagobj['tracknumber'] = trkn\n else:\n log.info('Unknown type of mp4 track number: %s' % trkn)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates a track's record with audio features
|
def update_track_features(self, tracks: List[Dict]) -> None:
for track in tracks:
q = {"_id": track["id"]}
# Writing updates (formatting changes)
track["audio_features"] = True
track["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
del track["id"]
self._tracks.update_one(q, {"$set": track}, upsert=True)
|
[
"def _update_audio_(course_id, audio_info):\n course = Course.objects.get(course_id=course_id)\n dir = audio_info[\"url\"].split(\"/\")\n if dir[-2] == \"audio_temp\":\n audio = AudioTemp.objects.get(pk=audio_info[\"id\"]).position\n course.audio_url = File(audio, dir[-1])\n audio.close()\n course.save()",
"def addFeaturesSongs(self, f, songs, target, weight):\r\n sp = getSP()\r\n ids = getSongIDs(songs)\r\n k = 0\r\n for i in range(0, len(ids), 50):\r\n audio_features = sp.audio_features(ids[i:i+50])\r\n for track in audio_features:\r\n if track != None:\r\n track['id'] = ids[k]\r\n track['song_title'] = songs[k]['track']['name']\r\n track['artist'] = songs[k]['track']['artists'][0]['name']\r\n popularity = songs[k]['track']['popularity']\r\n k = k + 1\r\n for j in range(0, weight):\r\n f.append(track)\r\n f[-1]['trackPopularity'] = popularity\r\n f[-1]['target'] = target\r\n else:\r\n break",
"def get_audio_features(track_id=None):\n\n # connect to MongoDB\n mongo = MongoDatabase()\n mongo.connect()\n db = mongo.db\n\n if track_id is None:\n # get Spotify Tracks ids\n spotify_ids = db.songs.find({}, {\"spotify_id\" : 1, \"_id\" : 0})\n all_ids = [item[\"spotify_id\"] for item in spotify_ids]\n limit = len(all_ids)\n \n # the url only accept 100 ids at a time, so we will use batch ids\n for i in range(0, 1370, 100):\n if i + 100 < limit:\n batch = ','.join(all_ids[i:i+100])\n else:\n batch = ','.join(all_ids[i:limit])\n\n url = f\"https://api.spotify.com/v1/audio-features/?ids={batch}\"\n \n # get audio features\n result = fetch_data(url)\n\n # keep relevant info \n for audio_feat in result[\"audio_features\"]:\n save_audio_features(audio_feat, db.songs)\n else:\n # get audio features\n url = f\"https://api.spotify.com/v1/audio-features/{track_id}\"\n result = fetch_data(url)\n return result",
"def save_audio_features(result, collection):\n result_id = result.pop(\"id\")\n result.pop(\"type\")\n result.pop(\"uri\")\n result.pop(\"track_href\")\n result.pop(\"analysis_url\")\n\n # save to database\n try:\n collection.update_one({\"spotify_id\" : result_id}, { \"$set\" : result })\n except Exception as e:\n print(e)",
"def _parse_track(self, obj: Dict, add_features: bool = True) -> SongInformation:\n\n def _fetch_features(track_id: str) -> Dict[str, float]:\n feature_dict = self.api.audio_features([track_id])[0]\n return {k: v for k, v in feature_dict.items() if k in FEATURES}\n\n track_id: str = obj['id']\n\n name: str = obj.get('name', None)\n album: str = obj.get('album', {}).get('name', None)\n\n _release_date: str = obj.get('album', {}).get('release_date', None)\n year: int = int(_release_date.split(\"-\")[0]) if _release_date is not None else None\n\n _track_number: str = obj.get('track_number', None)\n track_number: int = int(_track_number) if _track_number is not None else None\n\n total_tracks: int = obj.get('album', {}).get('total_tracks', 0)\n\n _artists: Tuple[str, ...] = tuple(art['name'] for art in obj.get('artists', {}))\n artists: Tuple[str, ...] = _artists if len(_artists) > 0 else None\n\n links: Dict[str, str] = obj.get('external_urls', None)\n\n image_url: str = obj.get('album', {}).get('images', [{}])[0].get('url', None)\n image: Optional[Union[PNGSongImage, JPEGSongImage]] = self._fetch_image(image_url) \\\n if image_url is not None else None\n\n _additional_information = _fetch_features(track_id) if add_features else {}\n additional_information = \"\\n\".join(f\"{k} {v}\" for k, v in _additional_information.items())\n\n return SongInformation(name, album, (track_number, total_tracks), artists, image, year, links=links,\n additional_information=additional_information)",
"def get_audio_features( tracks, tracks_artistnames):\n if not tracks:\n print('No tracks provided.')\n return\n\n \n track_map = {track.get('id'): track for track in tracks}\n\n # Request the audio features for the chosen tracks (limited to 50)\n \n tracks_features_response = spotify.audio_features(tracks=track_map.keys())\n\n desired_features = [\n 'tempo',\n 'time_signature',\n 'key',\n 'mode',\n 'loudness',\n 'energy',\n 'danceability',\n 'acousticness',\n 'instrumentalness',\n 'liveness',\n 'speechiness',\n 'valence'\n ]\n\n tracks_features_list = []\n for track_features in tracks_features_response:\n \n features_dict = dict()\n for feature in desired_features:\n \n feature_value = track_features.get(feature)\n\n \n if feature == 'key':\n feature_value = translate_key_to_pitch(feature_value)\n \n features_dict[feature] = feature_value\n \n tracks_features_list.append(features_dict)\n\n\n\n tracks_features_map = {f.get('id'): [tracks_artistnames[i], tracks_features_list[i], \"https://open.spotify.com/track/\" + f.get('id')] for i, f in enumerate(tracks_features_response)}\n\n \n \n \n \n \n\n return tracks_features_map",
"def setAudio(self, audio, mode):\n\t\tpass",
"def addFeatures(self, f, playlist, target, weight):\r\n #sp = getSP() # update Spotify authorization\r\n tracks = playlist[\"tracks\"]\r\n songs = getSongs(tracks)\r\n # ids = getSongIDs(tracks, songs)\r\n self.addFeaturesSongs(f, songs, target, weight)",
"def update_mp4(mp4obj, tagobj):\n valid_keys = [\n ('\\xa9alb', 'album'),\n ('\\xa9wrt', 'composer'),\n ('\\xa9gen', 'genre'),\n ('\\xa9day', 'date'),\n #no lyricist field\n ('\\xa9nam', 'title'),\n #no version field\n ('\\xa9ART', 'artist'),\n #('trkn', 'tracknumber')\n #missing asin, mbalbumartistid, mbalbumid, mbtrackid\n ]\n\n for key, field_name in valid_keys:\n if mp4obj.has_key(key):\n if isinstance(mp4obj[key], list):\n tagobj[field_name] = ','.join(mp4obj[key])\n \n if mp4obj.has_key('trkn') and len(mp4obj['trkn']) > 0:\n trkn = mp4obj['trkn'][0]\n if type(trkn) == tuple and len(trkn) == 2:\n tagobj['tracknumber'], tagobj['totaltracks'] = trkn\n elif type(trkn) == unicode:\n tagobj['tracknumber'] = trkn\n else:\n log.info('Unknown type of mp4 track number: %s' % trkn)",
"def record():\n\ttime.sleep(0.5)\n\tp = pyaudio.PyAudio()\n\tstream = p.open(format=FORMAT, channels=1, rate=RATE,\n\t\tinput=True, output=True,\n\t\tframes_per_buffer=CHUNK_SIZE)\n\n\tnum_silent = 0\n\tsnd_started = False\n\n\tr = array('h')\n\n\twhile 1:\n\t\t# little endian, signed short\n\t\tsnd_data = array('h', stream.read(CHUNK_SIZE))\n\t\tif byteorder == 'big':\n\t\t\tsnd_data.byteswap()\n\t\tr.extend(snd_data)\n\n\t\tsilent = is_silent(snd_data)\n\n\t\tif silent and snd_started:\n\t\t\tnum_silent += 1\n\t\telif not silent and not snd_started:\n\t\t\tsnd_started = True\n\n\t\tif snd_started and num_silent > 75:\n\t\t\tbreak\n\n\tsample_width = p.get_sample_size(FORMAT)\n\tstream.stop_stream()\n\tstream.close()\n\tp.terminate()\n\n\tr = normalize(r)\n\tr = trim(r)\n\tr = add_silence(r, 0.5)\n\treturn sample_width, r",
"def updateFromTrack(self, track):\n try:\n tags = mutagenID3(self.filename)\n except ID3NoHeaderError:\n tags = mutagenID3()\n tags[\"TIT2\"] = TIT2(encoding=3, text=track.title)\n if track.artist:\n tags[\"TPE1\"] = TPE1(encoding=3, text=track.artist.name)\n tags[\"TRCK\"] = TRCK(encoding=3, text=str(track.trackNumber))\n if self.config:\n if 'DoClearComments' in self.config:\n if self.config['DoClearComments'].lower() == \"true\":\n tags.delall(u\"COMM::'en'\")\n tags.save(self.filename)",
"def play(self, track):\n raise NotImplementedError",
"def search( sp, track, lim=1 ):\n\n identifier = sp.search( q=\"track: \" + track, limit=lim, type=\"track\" )['tracks']['items'][0]['id']\n features = sp.audio_features( identifier )\n analisys = sp.audio_analysis( identifier )\n\n return identifier, features, analisys",
"def update_sample_record(data):\n session = controller.connect_to_database()\n record = session.query(Sample).filter_by(id=data[\"id\"]).one()\n\n record = Sample()\n record.sample = data[\"sample\"]\n record.panel = data[\"panel\"]\n record.sample_taken = data[\"sample_taken\"]\n record.genotyping = data[\"genotyping\"]\n record.variant_calling = data[\"variant_calling\"]\n record.qc_status = data[\"qc_status\"]\n record.qc_report = data[\"qc_report\"]\n record.coverage = data[\"coverage\"]\n\n session.commit()\n session.close()",
"def record_voice():\n fs = 44100 # Sample rate\n seconds = 3 # Duration of recording\n # sd.default.device = \"Built-in Audio\" # Speakers full name here\n\n print(\"Say something:\")\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait() # Wait until recording is finished\n write(\"speech_emotion_recognition/recordings/myvoice.wav\", fs, myrecording)\n print(\"Voice recording saved.\")",
"def audioTrack(solo=bool, insertTrack=int, track=int, lock=bool, removeTrack=int, mute=bool, title=\"string\", numTracks=int, removeEmptyTracks=bool, swapTracks=int):\n pass",
"def record():\n pi = pyaudio.PyAudio()\n stream = pi.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = pi.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n pi.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.2)\n return sample_width, r",
"def update_bad_track_features(self, bad_tracks: List[str]) -> None:\n for track in tqdm(bad_tracks):\n q = {\"_id\": track[\"id\"]}\n\n # Writing updates (formatting changes)\n track[\"audio_features\"] = False\n track[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n del track[\"id\"]\n\n self._tracks.update_one(q, {\"$set\": track}, upsert=True)",
"def test_audiences_get_audience_update(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If tracks that can't get features are identified, mark them here
|
def update_bad_track_features(self, bad_tracks: List[str]) -> None:
for track in tqdm(bad_tracks):
q = {"_id": track["id"]}
# Writing updates (formatting changes)
track["audio_features"] = False
track["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
del track["id"]
self._tracks.update_one(q, {"$set": track}, upsert=True)
|
[
"def hastracks(self):\n return len(self._tracks) > 0",
"def isLayerTrack(self):\r\n\t\treturn None",
"def update_track_features(self, tracks: List[Dict]) -> None:\n for track in tracks:\n q = {\"_id\": track[\"id\"]}\n\n # Writing updates (formatting changes)\n track[\"audio_features\"] = True\n track[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n del track[\"id\"]\n\n self._tracks.update_one(q, {\"$set\": track}, upsert=True)",
"def get_track_features(\n creds_filepath='/Users/lukewoloszyn/.apikeys/spotify.json',\n max_retries=10):\n\n client = MongoClient()\n db = client['albumpitch']\n coll = db['pitchfork']\n\n with open(creds_filepath) as f:\n creds = json.loads(f.read())\n my_id = creds['id']\n my_secret = creds['secret']\n\n ccm = SpotifyClientCredentials(\n client_id=my_id,\n client_secret=my_secret)\n\n # sp = spotipy.Spotify(auth=access_token)\n sp = spotipy.Spotify(client_credentials_manager=ccm)\n sp.trace = False\n\n coll_spotify_albums = db['spotify_albums']\n coll_spotify_albums.create_index(\n [('id', ASC), ('pitchfork_id', ASC)],\n unique=True)\n\n coll_audio_features = db['spotify_audio_features']\n coll_audio_features.create_index(\n [('id', ASC), ('album_id', ASC), ('pitchfork_id', ASC)],\n unique=True)\n\n # for i, doc in enumerate(coll.find(), 1):\n for i, doc in enumerate(coll.find({'spotify_found': {'$exists': 0}}), 1):\n if i % 50 == 0:\n print('Got audio features for {:d} albums'.format(i))\n\n artist = ' '.join(doc['artists'])\n # artist = doc['artists'][0]\n album = doc['album']\n\n # spotify doesn't like the EP ending so remove it\n if album.split()[-1] == 'EP':\n album = ' '.join(album.split()[0:-1])\n\n # a few tricks to get a few more items\n # try to find a non-collector's edition (second pass)\n # album = re.sub(r'[\\[\\(].*(Edition|Remastered).*[\\]\\)]', '',\n # album, flags=re.IGNORECASE).strip()\n # album = re.sub(r'[:[(][^:]*(Edition|Remaster|Reissue).*', '',\n # album, flags=re.IGNORECASE).strip()\n # album = re.sub(r'\\bOST\\b', 'Original Soundtrack', album)\n # album = re.sub(r'\\bOST\\b', '', album)\n # album = re.sub(r'[:[(][^:]*(Original|OST).*', '',\n # album, flags=re.IGNORECASE).strip()\n\n album = re.sub(':', '', album)\n artist = re.sub(':', '', artist)\n try:\n artist = unidecode(artist)\n album = unidecode(album)\n query = 'artist:{:s} album:{:s}'.format(artist, album)\n except:\n print(\"Can't decode {:s}\".format(query))\n continue\n\n for j in xrange(max_retries):\n try:\n result = sp.search(query, type='album')\n break\n except:\n print('Query for album {:s} failed, {:d} retries left'\n .format(doc['url'], max_retries-j-1))\n time.sleep(5)\n continue\n else:\n with open('../logs/unable_to_search_album', 'a') as f:\n f.write(doc['url']+'\\n')\n continue\n\n if not len(result['albums']['items']):\n with open('../logs/query_not_in_spotify_catalog_ost2', 'a') as f:\n f.write(doc['url']+'\\n')\n continue\n\n albums = result['albums']['items']\n\n for j, album in enumerate(albums):\n album['pitchfork_id'] = doc['review_id']\n album['pitchfork_url'] = doc['url']\n album['result_number'] = j\n\n album_id = album['id']\n tracks = sp.album_tracks(album_id)\n album['tracks'] = tracks\n try:\n coll_spotify_albums.insert_one(album)\n except pymongo.errors.DuplicateKeyError:\n print('Duplicate album')\n pass\n\n # now get audio features for all tracks in album\n track_ids = [track['id'] for track in tracks['items']]\n track_afs = sp.audio_features(tracks=track_ids)\n # replace empty tracks with dict (spotify error?)\n track_afs = [track_af if track_af else {'track_corrupt': True}\n for track_af in track_afs]\n for track_af in track_afs:\n track_af['pitchfork_id'] = doc['review_id']\n track_af['pitchfork_url'] = doc['url']\n track_af['album'] = doc['album']\n track_af['artist'] = doc['artists']\n track_af['album_id'] = album_id\n\n # coll_audio_features.insert_many(audio_features)\n for track_af in track_afs:\n try:\n coll_audio_features.insert_one(track_af)\n except pymongo.errors.DuplicateKeyError:\n print('Duplicate track')\n pass\n time.sleep(1)\n\n coll.update_one(\n {'_id': doc['_id']},\n {\n '$set': {'spotify_found': True},\n '$currentDate': {'lastModified': True}\n })\n\n client.close()",
"def get_audio_features( tracks, tracks_artistnames):\n if not tracks:\n print('No tracks provided.')\n return\n\n \n track_map = {track.get('id'): track for track in tracks}\n\n # Request the audio features for the chosen tracks (limited to 50)\n \n tracks_features_response = spotify.audio_features(tracks=track_map.keys())\n\n desired_features = [\n 'tempo',\n 'time_signature',\n 'key',\n 'mode',\n 'loudness',\n 'energy',\n 'danceability',\n 'acousticness',\n 'instrumentalness',\n 'liveness',\n 'speechiness',\n 'valence'\n ]\n\n tracks_features_list = []\n for track_features in tracks_features_response:\n \n features_dict = dict()\n for feature in desired_features:\n \n feature_value = track_features.get(feature)\n\n \n if feature == 'key':\n feature_value = translate_key_to_pitch(feature_value)\n \n features_dict[feature] = feature_value\n \n tracks_features_list.append(features_dict)\n\n\n\n tracks_features_map = {f.get('id'): [tracks_artistnames[i], tracks_features_list[i], \"https://open.spotify.com/track/\" + f.get('id')] for i, f in enumerate(tracks_features_response)}\n\n \n \n \n \n \n\n return tracks_features_map",
"def has_mark(self):",
"def setUpFeatures(self):\r\n if (self.goodPlaylists == self.oldGood and\r\n self.badPlaylists == self.oldBad):\r\n self.features = self.oldFeatures\r\n else:\r\n self.addPlaylistFeatures(self.features, self.goodPlaylists, 1)\r\n self.addPlaylistFeatures(self.features, self.badPlaylists, 0)",
"def test_get_feature_flag_statuses(self):\n pass",
"def get_features(track_id: str, sp: ...) -> ...: # TODO ***************\n features = sp.audio_features('spotify:track:' + track_id)\n return([features[0]['acousticness'], features[0]['danceability'], features[0]['energy'],\n features[0]['duration_ms'], features[0]['instrumentalness'], features[\n 0]['valence'], features[0]['tempo'], features[0]['liveness'],\n features[0]['loudness'], features[0]['speechiness'], features[0]['key']])",
"def prepare_genome_features_track(self, genome_ref, vfs_url):\n shock_handles = list()\n gff_track = \"\"\n\n # 1) Download gff using genomefileutil\n gff_file_info = self.gfu.genome_to_gff({'genome_ref': genome_ref})\n gff_file = gff_file_info[\"file_path\"]\n\n # 2) sort gff\n outfile = gff_file + \"_sorted\"\n sorted_gff_cmd = \" \".join([\"sort -k1,1 -k4,4n\",\n gff_file, \">\", outfile])\n self._run_cmd(sorted_gff_cmd)\n\n # 3) compress gff\n zip_cmd = \"bgzip \" + outfile\n self._run_cmd(zip_cmd)\n\n # 4) index gff\n index_gff_cmd = \"tabix -p gff \" + gff_file + \"_sorted.gz\"\n self._run_cmd(index_gff_cmd)\n\n gff_gz_file_path = gff_file + \"_sorted.gz\"\n gff_index_file_path = gff_file + \"_sorted.gz.tbi\"\n\n # 5) Upload gff and gff index to shock\n if os.path.exists(gff_gz_file_path):\n gff_shock_ref = self.dfu.file_to_shock(\n {'file_path': gff_gz_file_path, 'make_handle': 1}\n )\n if os.path.exists(gff_index_file_path):\n gff_index_shock_ref = self.dfu.file_to_shock(\n {'file_path': gff_index_file_path, 'make_handle': 1}\n )\n\n # 6 Create gff track text that will be used for genome features track\n gff_track = '''\n {\n \"label\": \"Genome Features\",\n \"key\": \"GenomeFeatures\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/GFF3Tabix\",\n \"urlTemplate\":\"<vfs_url>/<gff_shock_ref>\",\n \"tbiUrlTemplate\": \"<vfs_url>/<gff_index_shock_ref>\",\n \"type\": \"JBrowse/View/Track/CanvasFeatures\"\n }\n '''\n gff_track = gff_track.replace(\"<gff_shock_ref>\",\n gff_shock_ref['handle']['id'])\n gff_track = gff_track.replace(\"<gff_index_shock_ref>\",\n gff_index_shock_ref['handle']['id'])\n gff_track = gff_track.replace(\"<vfs_url>\", vfs_url)\n gff_track_dict = json.loads(gff_track)\n\n # 7) Capture shock handles\n shock_handles.append(gff_shock_ref['handle'])\n shock_handles.append(gff_index_shock_ref['handle'])\n\n # 8) return shock handles and gff track info\n return {\"shock_handle_list\": shock_handles, \"track_item\": gff_track_dict}",
"def is_tracks(self):\n\n return len(self.files) == len(self.tracks)",
"def _ape_master_track_update_fire(self):\r\n pass",
"def test_post_feature_flag(self):\n pass",
"def get_all_features(track_list = list, artist_list = list, sp=None):\n\n track_features = []\n artist_features = []\n\n track_iters = int(len(track_list)/50)\n track_remainders = len(track_list)%50\n\n start = 0\n end = start+50\n\n for i in range(track_iters):\n track_features.extend(sp.audio_features(track_list[start:end]))\n artist_features.extend(sp.artists(artist_list[start:end]).get('artists'))\n start += 50\n end = start+50\n\n\n if track_remainders:\n end = start + track_remainders\n track_features.extend(sp.audio_features(track_list[start:end]))\n artist_features.extend(sp.artists(artist_list[start:end]).get('artists'))\n\n\n return track_features, artist_features",
"def test_get_feature_flag(self):\n pass",
"def build_tracks(self, dataset, data_name):\n pass",
"def trackObj(detectSeq, thresh, trackThresh, dir):\n # Initialize new tracks from the first frame\n \n objID = np.arange(len(detectSeq[0][1]))\n objVel = np.zeros((2,len(objID)))\n feats = featureExtract(dir+detectSeq[0][0],detectSeq[0][1],centr=True,patchFeat=False)\n\n objTrack = list()\n for i in range(len(objID)):\n objTrack.append([(detectSeq[0][1][i],0)])\n \n for frame in range(0,len(detectSeq)-1):\n \n # Predict position of past detections assuming constant velocity\n \n if objVel.shape[0]>0:\n patches_1 = feats+objVel\n else:\n patches_1 = np.zeros(0)\n\n\n # Compute features of new detections\n \n patches_2 = featureExtract(dir+detectSeq[frame+1][0],detectSeq[frame+1][1],centr=True,patchFeat=False)\n newObjID = np.zeros(len(detectSeq[frame+1][1]))\n newObjVel = np.zeros((2,len(newObjID)))\n\n # Assign new detections to existing tracks\n\n if(patches_1.shape[0]>0 and patches_2.shape[0]>0):\n match = matchError(patches_1, patches_2, thresh)\n else:\n match = -1*np.ones(patches_2.shape[1])\n\n # Update the existing tracks and start new tracks for unassigned detections\n\n for j in range(patches_2.shape[1]):\n if match[j]==-1:\n newObjID[j] = len(objTrack)\n objTrack.append([(detectSeq[frame+1][1][j],frame+1)])\n else:\n newObjID[j] = objID[match[j]]\n lastObjInd = len(objTrack[int(newObjID[j])])-1\n newObjVel[0,j] = patches_2[0,j]-objTrack[int(newObjID[j])][lastObjInd][0][0]\n newObjVel[1,j] = patches_2[1,j]-objTrack[int(newObjID[j])][lastObjInd][0][1]\n objTrack[int(newObjID[j])].append((detectSeq[frame+1][1][j],frame+1))\n objID = newObjID\n objVel = newObjVel\n feats = patches_2\n \n # Only keep tracks which are long enough\n\n filteredTracks = []\n for track in objTrack:\n if len(track)>trackThresh:\n filteredTracks.append(track)\n\n return filteredTracks",
"def process_track(filename):\n track = Track.from_gpx(filename)[0]\n track.compute_metrics()\n\n for segment in track.segments:\n features = extract_features_2(segment.points)\n return features",
"def classify_success(feature_frame, users_threshold=10, success_threshold=0.75):\n assert (\n users_threshold >= 10,\n \"Acoustic features from Spotify API only obtained for playlists with more than 10 \"\n \"monthly users\",\n )\n\n # Filtering out playlists with an outlier number of tracks\n n_tracks_upper_quantile = feature_frame[\"n_tracks\"].quantile(0.75)\n n_tracks_lower_quantile = feature_frame[\"n_tracks\"].quantile(0.25)\n iqr = n_tracks_upper_quantile - n_tracks_lower_quantile\n\n upper_track_limit = n_tracks_upper_quantile + (1.5 * iqr)\n lower_track_limit = n_tracks_lower_quantile - (1.5 * iqr)\n\n target_frame = (\n feature_frame.loc[lambda f: f[\"n_tracks\"] <= upper_track_limit]\n .loc[lambda f: f[\"n_tracks\"] >= lower_track_limit]\n .loc[lambda f: f[\"users_adjusted\"] > users_threshold]\n ).reset_index(drop=True)\n\n num_playlists_all = len(feature_frame)\n num_playlists_thresh = len(target_frame)\n logging.info(f\"# of playlists: {num_playlists_all}\")\n logging.info(f\"# of playlists above the users_threshold: {num_playlists_thresh}\")\n logging.info(f\"% of playlists removed: {num_playlists_all - num_playlists_thresh}\")\n logging.info(\n f\"% of playlists remaining: {round(num_playlists_thresh / num_playlists_all * 100, 1)}\"\n )\n\n threshold_frame_plays = target_frame.groupby(\"genre_1\").quantile(\n q=success_threshold\n )[[\"streaming_ratio_users\"]]\n threshold_frame_plays.columns = [\n str(col) + \"_thresh\" for col in threshold_frame_plays.columns\n ]\n\n success_frame = target_frame.merge(\n threshold_frame_plays.reset_index()[\n [\n \"genre_1\",\n \"streaming_ratio_users_thresh\",\n ]\n ],\n on=\"genre_1\",\n how=\"left\",\n ).assign(\n success_streaming_ratio_users=lambda f: np.where(\n f[\"streaming_ratio_users\"] >= f[\"streaming_ratio_users_thresh\"], 1, 0\n )\n )\n return success_frame"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes in a specific audio_filter format to get tracks with a filter
|
def filter_tracks_by_audio_feature(self, tracks: List[str], audio_filter: Dict) -> List[str]:
q = {"_id": {"$in": tracks}, **audio_filter}
cols = {"_id": 1}
r = list(self._tracks.find(q, cols))
return [x["_id"] for x in r]
|
[
"def apply_audio_filters(\n audio_filters: list[AudioMatch] | bool | None,\n original_tracks: list[Box],\n):\n if not audio_filters:\n return []\n\n original_tracks = deepcopy(original_tracks)\n\n tracks = []\n for audio_match in audio_filters:\n if audio_match.match_item == MatchItem.ALL:\n track_select = original_tracks.copy()\n if track_select:\n if audio_match.match_type == MatchType.FIRST:\n track_select = [track_select[0]]\n elif audio_match.match_type == MatchType.LAST:\n track_select = [track_select[-1]]\n for track in track_select:\n tracks.append((track, audio_match))\n\n elif audio_match.match_item == MatchItem.TITLE:\n subset_tracks = []\n for track in original_tracks:\n if audio_match.match_input.lower() in track.tags.get(\"title\", \"\").casefold():\n subset_tracks.append((track, audio_match))\n if subset_tracks:\n if audio_match.match_type == MatchType.FIRST:\n tracks.append(subset_tracks[0])\n elif audio_match.match_type == MatchType.LAST:\n tracks.append(subset_tracks[-1])\n else:\n tracks.extend(subset_tracks)\n\n elif audio_match.match_item == MatchItem.TRACK:\n for track in original_tracks:\n if track.index == int(audio_match.match_input):\n tracks.append((track, audio_match))\n\n elif audio_match.match_item == MatchItem.LANGUAGE:\n subset_tracks = []\n for track in original_tracks:\n try:\n if Lang(audio_match.match_input) == Lang(track.tags[\"language\"]):\n subset_tracks.append((track, audio_match))\n except (InvalidLanguageValue, KeyError):\n pass\n if subset_tracks:\n if audio_match.match_type == MatchType.FIRST:\n tracks.append(subset_tracks[0])\n elif audio_match.match_type == MatchType.LAST:\n tracks.append(subset_tracks[-1])\n else:\n tracks.extend(subset_tracks)\n\n elif audio_match.match_item == MatchItem.CHANNELS:\n subset_tracks = []\n for track in original_tracks:\n if int(audio_match.match_input) == track.channels:\n subset_tracks.append((track, audio_match))\n if subset_tracks:\n if audio_match.match_type == MatchType.FIRST:\n tracks.append(subset_tracks[0])\n elif audio_match.match_type == MatchType.LAST:\n tracks.append(subset_tracks[-1])\n else:\n tracks.extend(subset_tracks)\n\n return sorted(tracks, key=lambda x: x[0].index)",
"def compare_magnitude(filter_type, original_audio, filtered_audio, sr):\n plt.grid(True)\n plt.subplot(2, 1, 1)\n plt.title('Original')\n librosa.display.waveplot(original_audio, sr=sr)\n plt.subplot(2, 1, 2)\n plt.title('Filtered by ' + filter_type)\n librosa.display.waveplot(filtered_audio, sr=sr)\n plt.show()\n\n plt.plot(original_audio, label='Input', color='r')\n plt.plot(filtered_audio, label='Output', color='b')\n plt.show()",
"def preprocess_filters(x, Fs):\n # Low pass at 200Hz\n x_lo = neurodsp.filter(x, Fs, 'lowpass', f_lo=200, N_seconds=.1)\n\n # Highpass at 2Hz - figure out order\n x_hi = neurodsp.filter(x_lo, Fs, 'highpass', f_hi=2, N_seconds=2)\n\n # Notch filter at 60Hz, 120Hz and 180Hz\n N_seconds = .5\n x_notch = neurodsp.filter(x_hi, Fs, 'bandstop', f_lo=58, f_hi=62, N_seconds=N_seconds)\n x_notch = neurodsp.filter(x_notch, Fs, 'bandstop', f_lo=118, f_hi=122, N_seconds=N_seconds)\n x_notch = neurodsp.filter(x_notch, Fs, 'bandstop', f_lo=178, f_hi=182, N_seconds=N_seconds)\n\n return x_notch",
"def load_filter(given_filter):\n\n filternamemap={}\n filttype=str.split(given_filter,'_')\n if filttype[0]=='SDSS':\n filternamemap=filttype[0].lower()+'2010-'+filttype[1].lower()\n if filttype[0]=='DECAM':\n if filttype[1]=='Y':\n filternamemap=filttype[0].lower()+'2014-'+filttype[1]\n else: filternamemap=filttype[0].lower()+'2014-'+filttype[1].lower()\n if filttype[0]=='WISE':\n filternamemap=filttype[0].lower()+'2010-'+filttype[1]\n\n filter_response=speclite.filters.load_filter(filternamemap)\n return filter_response",
"def search( sp, track, lim=1 ):\n\n identifier = sp.search( q=\"track: \" + track, limit=lim, type=\"track\" )['tracks']['items'][0]['id']\n features = sp.audio_features( identifier )\n analisys = sp.audio_analysis( identifier )\n\n return identifier, features, analisys",
"def load_filter(fpath):\n with open(fpath, 'rb') as f:\n filt = np.frombuffer(f.read(), dtype=np.float)\n \n return filt, 44100",
"def get_tracks_audio_features(track_ids):\n connect()\n url = 'https://api.spotify.com/v1/audio-features/'\n # Max that can be submitted to this endpoint is 100 at a time\n track_groups = make_chunks(track_ids, 100)\n audio_features = []\n for group in track_groups:\n query_params = {'ids': ','.join(group)}\n response = requests.get(\n url, params=query_params, headers=get_header()\n )\n resp_json = response.json()\n if resp_json.get('audio_features'):\n audio_features.extend(resp_json['audio_features'])\n return audio_features",
"def get_song_info(self, song):\n song_details = song.audio_features\n filter_mappings = {\n \"duration_ms\": song_details.duration,\n \"key\": song_details.key,\n \"tempo\": song_details.tempo,\n \"danceability\": song_details.danceability,\n \"energy\": song_details.energy,\n \"loudness\": song_details.loudness,\n \"mode\": song_details.mode,\n \"speechiness\": song_details.speechiness,\n \"acousticness\": song_details.acousticness,\n \"instrumentalness\": song_details.instrumentalness,\n \"liveness\": song_details.liveness,\n \"valence\": song_details.valence,\n \"time_signature\": song_details.time_signature\n }\n # store only the specified filters and corresponding filter values\n # into a dictionary\n filtered_song_details = {}\n for filter in self.filter_list:\n filtered_song_details[filter] = filter_mappings[filter]\n return filtered_song_details",
"def get_tracks_for_audio_analysis(self) -> List[str]:\n \n l.debug(\"Finding Tracks without audio analysis, this can take some time.\")\n q = {}\n cols = {\"_id\": 1, \"audio_analysis_flag\": 1}\n r = list(self._tracks.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for track in r:\n if \"audio_analysis_flag\" not in track.keys():\n result.append(track[\"_id\"])\n else:\n if not track[\"audio_analysis_flag\"]:\n result.append(track[\"_id\"])\n return result",
"def pre_process_audio(self):\n # clean up nans that have been mysteriously appearing..\n self._raw_audio_sample[np.isnan(self._raw_audio_sample)] = 0\n\n # Calculate the current volume for silence detection\n self._volume = 1 + aubio.db_spl(self._raw_audio_sample) / 100\n self._volume = max(0, min(1, self._volume))\n self._volume_filter.update(self._volume)\n\n # Calculate the frequency domain from the filtered data and\n # force all zeros when below the volume threshold\n if self._volume_filter.value > self._config[\"min_volume\"]:\n self._processed_audio_sample = self._raw_audio_sample\n\n # Perform a pre-emphasis to balance the highs and lows\n if self.pre_emphasis:\n self._processed_audio_sample = self.pre_emphasis(\n self._raw_audio_sample\n )\n\n # Pass into the phase vocoder to get a windowed FFT\n self._frequency_domain = self._phase_vocoder(\n self._processed_audio_sample\n )\n else:\n self._frequency_domain = self._frequency_domain_null",
"def get_audio_features( tracks, tracks_artistnames):\n if not tracks:\n print('No tracks provided.')\n return\n\n \n track_map = {track.get('id'): track for track in tracks}\n\n # Request the audio features for the chosen tracks (limited to 50)\n \n tracks_features_response = spotify.audio_features(tracks=track_map.keys())\n\n desired_features = [\n 'tempo',\n 'time_signature',\n 'key',\n 'mode',\n 'loudness',\n 'energy',\n 'danceability',\n 'acousticness',\n 'instrumentalness',\n 'liveness',\n 'speechiness',\n 'valence'\n ]\n\n tracks_features_list = []\n for track_features in tracks_features_response:\n \n features_dict = dict()\n for feature in desired_features:\n \n feature_value = track_features.get(feature)\n\n \n if feature == 'key':\n feature_value = translate_key_to_pitch(feature_value)\n \n features_dict[feature] = feature_value\n \n tracks_features_list.append(features_dict)\n\n\n\n tracks_features_map = {f.get('id'): [tracks_artistnames[i], tracks_features_list[i], \"https://open.spotify.com/track/\" + f.get('id')] for i, f in enumerate(tracks_features_response)}\n\n \n \n \n \n \n\n return tracks_features_map",
"def audio_to_array(in_path: str, in_format: str, sr: bool = False, as_sample: bool = False,\n ffmpeg_path: str = r\"C:/ffmpeg/bin/ffmpeg.exe\"):\n if in_format == 'mp3':\n AudioSegment.converter = ffmpeg_path\n sample = AudioSegment.from_file(in_path, in_format)\n song = sample\n srt = sample.frame_rate\n sample = sample.get_array_of_samples()\n sample = np.array(sample, dtype=float)\n if sr & as_sample:\n return sample, srt, song\n elif sr:\n return sample, srt\n elif as_sample:\n return sample, song\n else:\n return sample",
"def get_audio_analysis(self, track_id):\n url = \"https://api.spotify.com/v1/audio-analysis/\" + track_id\n headers = {'Authorization': \"Bearer \" + self.token}\n\n request = self.session.get(url, headers=headers)\n return request",
"def get_audio_features(track_id=None):\n\n # connect to MongoDB\n mongo = MongoDatabase()\n mongo.connect()\n db = mongo.db\n\n if track_id is None:\n # get Spotify Tracks ids\n spotify_ids = db.songs.find({}, {\"spotify_id\" : 1, \"_id\" : 0})\n all_ids = [item[\"spotify_id\"] for item in spotify_ids]\n limit = len(all_ids)\n \n # the url only accept 100 ids at a time, so we will use batch ids\n for i in range(0, 1370, 100):\n if i + 100 < limit:\n batch = ','.join(all_ids[i:i+100])\n else:\n batch = ','.join(all_ids[i:limit])\n\n url = f\"https://api.spotify.com/v1/audio-features/?ids={batch}\"\n \n # get audio features\n result = fetch_data(url)\n\n # keep relevant info \n for audio_feat in result[\"audio_features\"]:\n save_audio_features(audio_feat, db.songs)\n else:\n # get audio features\n url = f\"https://api.spotify.com/v1/audio-features/{track_id}\"\n result = fetch_data(url)\n return result",
"def register_filter(wavelength, throughput, format='photon', reference='User', description='None'):\n\t\t\t\t\t\n\t# Read existing filters and create a list of filter ID numbers (which are the same as the filternames)\n\tOldFilterFiles = glob.glob(FortesFit_Settings.FilterDirectory+'*.fortesfilter.xml')\n\tif(len(OldFilterFiles) == 1):\n\t\tprint('You are registering your first filter. Exciting!')\n\tOldIDs = []\n\tfor OldFile in OldFilterFiles:\n\t\tOldIDs.append(np.int(os.path.basename(OldFile).split('.')[0]))\n\tOldIDs = np.array(OldIDs,dtype=int)\t\t\n\n\t# Assign a random and unique 6 digit number for the new filter.\n\t# This approach allows for a maximum of N=900000 filters, which should be sufficient.\n\t# When the number of filters approaches N, this method of assignment becomes in efficient. \n\tNewIDChecked = False\n\twhile (not NewIDChecked):\n\t\tNewID = np.random.randint(100000, high=999999 + 1)\n\t\tindex, = np.where(OldIDs == NewID)\n\t\tif(len(index) == 0):\n\t\t\tNewIDChecked = True\t\t\t\t\t\n\n\t# Convert inputs to Numpy Arrays\n\tWaveLength = np.array(wavelength)\n\tThroughPut = np.array(throughput)\n\t\n\t# Sort the inputs by wavelength low to high\n\tsortindex = np.argsort(WaveLength)\n\tWaveLength = WaveLength[sortindex]\n\tThroughPut = ThroughPut[sortindex]\n\n\t# Clean the throughput. If < 1e-4*max, set to 0.0\n\tMaxThroughPut = ThroughPut.max()\n\tCleanedThroughPut = np.where(ThroughPut > 1.0e-4*MaxThroughPut, ThroughPut, np.full(len(ThroughPut),0.0))\n\t\n\t# Create the output table\n\tfilter_table = Table([WaveLength, CleanedThroughPut],\\\n\t\t\t\t\t\t names = ['Wavelength', 'Throughput'])\n\tmaintable = votable_routines.tree.VOTableFile.from_table(filter_table,'FORTESAGN')\n\tmaintable.resources[0].infos.append(votable_routines.tree.Info(name='format',value=format))\n\tmaintable.resources[0].infos.append(votable_routines.tree.Info(name='description',value=description))\n\tmaintable.resources[0].links.append(votable_routines.tree.Link(href=reference))\n\tmaintable.resources[0].tables[0].fields[0].unit = '10-6m'\n\tmaintable.resources[0].tables[0].fields[1].unit = ''\n\t\n\t# Write the filter function to a FITS file\n\tOutFile = FortesFit_Settings.FilterDirectory+'{0:6d}.fortesfilter.xml'.format(NewID)\n\tmaintable.to_xml(OutFile)\n\t\n\tsummarize_filters()\n\t\n\treturn NewID",
"def get_track_features(track_id, sp):\n\n feature_filter = ['danceability', 'energy', 'instrumentalness', 'loudness', 'speechiness', 'tempo', 'valence']\n return_features = []\n\n # Get features from this track.\n features = sp.audio_features([track_id])\n\n if None in features:\n return []\n\n # Add desired features of track.\n for feature in features[0]:\n if feature in feature_filter:\n return_features.append(features[0][feature])\n\n return return_features",
"def test_audio_convert_to_wav(self):\n pass",
"def get_tracks_audio_features_from_category(category):\n tracks_meta = get_all_songs_in_category(category)\n track_ids = parse_track_ids_from_metadata(tracks_meta)\n return get_tracks_audio_features(track_ids)",
"def _filter(self, stream):\n w = self.widgets\n type = str(w.qComboBox_filterType.currentText()).lower()\n options = {}\n options['corners'] = 1\n options['zerophase'] = True#w.qCheckBox_zerophase.isChecked()\n if type in (\"bandpass\", \"bandstop\"):\n options['freqmin'] = w.qDoubleSpinBox_highpass.value()\n options['freqmax'] = w.qDoubleSpinBox_lowpass.value()\n elif type == \"lowpass\":\n options['freq'] = w.qDoubleSpinBox_lowpass.value()\n elif type == \"highpass\":\n options['freq'] = w.qDoubleSpinBox_highpass.value()\n if type in (\"bandpass\", \"bandstop\"):\n msg = \"%s (zerophase=%s): %.2f-%.2f Hz\" % \\\n (type, options['zerophase'],\n options['freqmin'], options['freqmax'])\n elif type in (\"lowpass\", \"highpass\"):\n msg = \"%s (zerophase=%s): %.2f Hz\" % \\\n (type, options['zerophase'], options['freq'])\n #try:\n stream.filter(type, **options)\n print msg\n #except:\n err = \"Error during filtering. Showing unfiltered data.\"\n print >> sys.stderr, err"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a unique trackname based on the name and artists, avoids the same track being counted multiple times
|
def gen_unique_track_id(self, track_name: str, artists: List[str]) -> str:
bad_chars = ",. "
for char in bad_chars:
track_name = track_name.replace(char, "")
artist_string = "A&A".join(artists)
return track_name + "T&A" + artist_string
|
[
"def get_track_identifier(self):\n return (self.name, ','.join(self.artists))",
"def _generate_track_filename(self, extention):\n track_filename = ''\n for char in self.title:\n if char in \" -,.;:(){}[]`~'\":\n track_filename += '_'\n else:\n track_filename += char\n\n if extention != '':\n track_filename = f'{track_filename}.{extention}'\n else:\n pass\n\n return track_filename",
"def renderName(self, torrentdata):\n if len(torrentdata[\"group\"][\"musicInfo\"][\"artists\"]) > self.config[\"pattern\"][\"listindividualartists\"]:\n artist = self.config[\"pattern\"][\"variousartists\"]\n else:\n artist = self.config[\"pattern\"][\"artistjoiner\"].join(sorted([artist[\"name\"] for artist in torrentdata[\"group\"][\"musicInfo\"][\"artists\"]]))\n\n fileformat = torrentdata[\"torrent\"][\"format\"]\n\n formatdata = {\n \"artist\": artist,\n \"album\": torrentdata[\"group\"][\"name\"],\n \"year\": torrentdata[\"group\"][\"year\"],\n \"format\": fileformat\n }\n name = self.config[\"pattern\"][\"string\"] % formatdata\n\n return name",
"def get_artist_name(self):\n with open(\"./data/artist_names.json\") as names:\n artist_names = json.load(names)\n names.close()\n\n random.seed(time.time())\n random_key_num = str(random.randint(0,len(artist_names) - 1))\n artist_name = artist_names[random_key_num]\n \n return artist_name",
"def full_album_name(artist_name, song_name):\n full_name = {'artist name': artist_name, 'song_name': song_name}\n return full_name",
"def generate_artists(tracks):\n artist_pool = _generate_artist_pool_lower_case(tracks)\n artists = []\n for track in chain.from_iterable([d.values() for d in tracks.values()]):\n for name, import_ in track[\"artists\"]:\n name = artist_pool[normalize_accents(name.lower())]\n if (name, import_) not in artists:\n artists.append((name, import_))\n artists, tracks = filter_artists(artists, tracks)\n return artists, tracks",
"def generate_mp3_basename(metadata):\n _log.call(metadata)\n return _generate_basename(\"MP3\", metadata)",
"def _insert_album(\n self,\n *,\n medium_count=2,\n track_count=3,\n artists=None,\n **kwargs,\n ): # yapf: disable\n for discnumber in range(1, medium_count + 1):\n for tracknumber in range(1, track_count + 1):\n extra_kwargs = {}\n if artists is not None:\n extra_kwargs['artist'] = artists[tracknumber - 1]\n track = self._insert_track(\n tracknumber=str(tracknumber),\n title=f'Cool Song #{tracknumber}',\n discnumber=str(discnumber),\n discsubtitle=f'Sweet Disc #{discnumber}',\n **extra_kwargs,\n **kwargs,\n )\n return track.album_token",
"def generate_flac_basename(metadata):\n _log.call(metadata)\n return _generate_basename(\"FLAC\", metadata)",
"def generateSongName(style=None):\n\n if style:\n generator = generators[style]\n else:\n generator = random.choice(generators.values())\n\n song_title = generator()\n return string.capwords(song_title)",
"def generate_unique_shot_name(base_name, shot_name_increment=10):\n logger.debug(\"generating unique shot number based on: %s\" % base_name)\n logger.debug(\"shot_name_increment is: %s\" % shot_name_increment)\n import re\n from stalker.db.session import DBSession\n from stalker import Shot\n\n regex = re.compile(\"[0-9]+\")\n\n # base_name: Ep001_001_0010\n name_parts = base_name.split(\"_\")\n\n # find the shot number\n try:\n shot_number_as_string = regex.findall(name_parts[-1])[-1]\n except IndexError:\n # no number in name\n name_parts = [name_parts[0], \"000\"]\n shot_number_as_string = \"000\"\n\n padding = len(shot_number_as_string)\n shot_number = int(shot_number_as_string)\n\n # initialize from the given shot_number\n i = shot_number\n\n logger.debug(\"start shot_number: %s\" % shot_number)\n\n # initialize existing_shot variable with base_name\n while True and i < 100000:\n name_parts[-1] = str(i).zfill(padding)\n shot_name = \"_\".join(name_parts)\n with DBSession.no_autoflush:\n existing_shot = (\n DBSession.query(Shot.name).filter(Shot.name == shot_name).first()\n )\n if not existing_shot:\n logger.debug(\"generated unique shot name: %s\" % shot_name)\n return shot_name\n i += shot_name_increment\n\n raise RuntimeError(\"Can not generate a unique shot name!!!\")",
"def generate_track(\n self,\n trackno,\n discno,\n artists,\n title,\n replay_gain=None,\n peak=None,\n format_=None,\n explicit=None,\n isrc=None,\n stream_id=None,\n streamable=None,\n **kwargs,\n ):\n return {\n \"track#\": str(trackno),\n \"disc#\": str(discno),\n \"tracktotal\": None, # Filled out once all tracks are scraped.\n \"disctotal\": None, # Same ^\n \"artists\": artists,\n \"title\": title,\n \"replay_gain\": replay_gain,\n \"peak\": peak,\n \"explicit\": explicit,\n \"isrc\": isrc,\n \"format\": format_,\n \"stream_id\": stream_id,\n \"streamable\": streamable,\n **kwargs,\n }",
"def _create_album_info(self, title, artist_name, tracks, length):\n album = Label(0.04167, \"text\", 0.50146, 0.13,\n artist_name + \" - \" + title, font_weight=\"bold\")\n album.set_size(0.4393, 0.06510)\n album.set_ellipsize(pango.ELLIPSIZE_END)\n self.add(album)\n\n minutes = str(length / 60)\n\n num_of_tracks = Label(0.02604, \"subtitle\", 0.50146, 0.18,\n _(\"%(total)s tracks, %(time)s minutes\") % \\\n {'total': len(tracks), 'time': minutes}, font_weight=\"bold\")\n self.add(num_of_tracks)",
"def build_tracks(self, dataset, data_name):\n pass",
"async def filename_generator(self):\n chars=list(string.ascii_letters+string.digits)\n name=''\n for i in range(random.randint(9,25)):\n name+=random.choice(chars)\n \n if name not in self.player['audio_files']:\n return name\n\n \n return await self.filename_generator()",
"def get_artist_from_tracklist(self, tracklistURL):\r\n name = self.execute_string(\"\"\"\r\n PREFIX etree:<http://etree.linkedmusic.org/vocab/>\r\n PREFIX mo:<http://purl.org/ontology/mo/>\r\n PREFIX event:<http://purl.org/NET/c4dm/event.owl#>\r\n PREFIX skos:<http://www.w3.org/2004/02/skos/core#>\r\n PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n\r\n SELECT DISTINCT ?name WHERE \r\n {{ \r\n <{0}> mo:performer ?performer.\r\n ?performer foaf:name ?name.\r\n }} LIMIT 1\r\n \"\"\".format(tracklistURL))\r\n\r\n return name['results']['bindings'][0]['name']['value']",
"def make_name(name):\n return \"int_%s_%s%s\" % (impl_instance.iontype, name, self.sample_resource_md5)",
"def task_4_artists_create_song():\n Song.objects.create(artist_id=3, title='worship the father', album_name='to god be the glory')",
"def get_songs_names(playlist):\n songs = []\n for song in playlist:\n song = song['track']\n name = ''\n for artist in song['artists']:\n name += artist['name'] + ', '\n name = name[:-2]\n name += ' - ' + song['name']\n songs.append(name)\n return songs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints an error payload, which can also be used for action responses
|
def print_error_payload( response ):
try:
print( get_error_messages( response ) )
except:
# No response body
if response.status >= 400:
print( "Failed" )
else:
print( "Success" )
|
[
"def print_error(response):\n print 'Status code: {0}'.format(response.status_code)",
"def print_error(error):\n print json.dumps({'error': error})",
"def print_api_error(error):\n sys.stderr.write('\\nERROR: %s\\n' % error)",
"def error_print():\n print(\"ERROR: Invalid Entry!\")",
"def indicate_error():\n print_right('[' + c.red('ERROR') + ']', 8)",
"def error(self, message, command):\n if \"error\" in message.trailing:\n print(10 / 0)",
"def print_error(self):\n print('\\n'.join(self.error_buffer))",
"def error(what,say):\n print 'ERROR: ', what, say",
"async def error_to_text(req: Request, res: Response, exc: HTTPError):\n res.status_code = exc.status_code\n text = exc.title\n if exc.detail:\n text += f\"\\n{exc.detail}\"\n res.text = text",
"def dump_error(err_message):\n print(formatter.Formatter(err_message).print_error(), file=sys.stderr)",
"def displayError(err):\n print(\"\\nError: %s.\" % err)\n displayUsage()",
"def error(s):\n print s\n exit(1)",
"def display(error):\n\tif error is not None:\n\t\tflash(\"Error: \" + error)",
"def format_error(module, error):\n logging.error(module)\n # Beautify JSON error\n if type(error) == list:\n print \"Application not found\"\n else:\n print json.dumps(error, sort_keys=True, indent=4, separators=(',', ': '))\n exit(1)",
"def error(endpoint, reason, advice=None):\n return u'7::%s:%s+%s' % (endpoint or '',\n (reason or ''),\n (advice or ''))",
"async def error_to_html(req: Request, res: Response, exc: HTTPError):\n res.status_code = exc.status_code\n html = f\"<h1>{exc.title}</h1>\"\n if exc.detail:\n html += f\"\\n<p>{exc.detail}</p>\"\n res.html = html",
"def error(s):\n print('Robotics toolbox error:', s)\n\n #traceback.print_exc();\n raise ValueError",
"def printErrorMsg(text):\r\n\tprint >> stderr, text",
"def error(self):\n self.render('error.html')",
"def display_error(msg, *args):\n munkicommon.display_error('Munkireport: %s' % msg, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
send body back to infos queue
|
def send_to_info_queue(body):
print("trying connection to publisher")
connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq'))
channel = connection.channel()
channel.queue_declare(queue='infos')
data = body
channel.basic_publish(exchange='', routing_key='infos', body=(data)) # TODO: conferir esse dumps, (colocar body=body se der ruim)
print(" [x] {body} sent to infos queue")
connection.close()
|
[
"def _add_details(self, info):\r\n msg_dicts = info.pop(\"messages\", [])\r\n super(QueueClaim, self)._add_details(info)\r\n parsed = urlparse.urlparse(self.href)\r\n self.id = parsed.path.rsplit(\"/\", 1)[-1]\r\n self.messages = [QueueMessage(self.manager._message_manager, item)\r\n for item in msg_dicts]",
"def process(self, info, queue):\n if info['status'] != 200:\n print 'putting %s back on queue' % info['url']\n queue.put(info['url'])",
"def _add_details(self, info):\r\n super(QueueMessage, self)._add_details(info)\r\n if self.href is None:\r\n return\r\n parsed = urlparse.urlparse(self.href)\r\n self.id = parsed.path.rsplit(\"/\", 1)[-1]\r\n query = parsed.query\r\n if query:\r\n self.claim_id = query.split(\"claim_id=\")[-1]",
"def bodyRequest(group, index):",
"def handle_inform(self, msg):\n print msg",
"def callback(ch, method, properties, body):\n data = jsonpickle.decode(body)\n try:\n redis_dict = get_license_plates(data[\"image\"])\n send_to_redis(data[\"hash\"], jsonpickle.encode(redis_dict), 1)\n if redis_dict[\"plates\"]:\n for plate in redis_dict[\"plates\"]:\n hash_list = get_from_redis(plate[\"plate\"], 3)\n if hash_list is None:\n send_to_redis(plate[\"plate\"], jsonpickle.encode([data[\"hash\"]]), 3)\n else:\n hash_list = jsonpickle.decode(hash_list)\n hash_list.append(data[\"hash\"])\n send_to_redis(plate[\"plate\"], jsonpickle.encode(hash_list), 3)\n send_to_logs(\"Image Processed (Hash): \" +data[\"hash\"]+ \", Status: success, Plate: \"+plate[\"plate\"]+ \", Worker: \" + socket.gethostname())\n else:\n send_to_logs(\"Image Processed (Hash): \" +data[\"hash\"]+ \", Status: success, Plate: none, Worker: \" + socket.gethostname())\n except Exception as inst:\n send_to_logs(\"Image Processed (Hash): \" +data[\"hash\"]+ \", Status: failure, Error: \"+inst+ \", Worker: \" + socket.gethostname())\n ch.basic_ack(delivery_tag=method.delivery_tag)",
"def __on_request_response__(self, ch, method, props, body):\r\n\t\ttry:\r\n\t\t\tself.last_message = json.loads(body)\r\n\t\texcept ValueError:\r\n\t\t\tprint 'encountered an error while decoding the message'\r\n\t\t\tself.last_message = body\r\n\r\n\t\tself.response = 'received'",
"def _process_queue(self):\n try:\n uid = self._queue.get(timeout=self.QUEUE_TIMEOUT)\n self._send_request(uid)\n except queue.Empty:\n pass",
"async def text_handler(request):\n input = await request.json()\n print(input)\n response = web.Response(content_type='text/html')\n response.text = \"Message was successfully queued.\\n\"\n return response",
"def publish_message(self, message, queue):",
"def feed(self):\n if cherrypy.request.method == 'POST':\n cherrypy.response.headers[\"Content-Type\"] = \"application/json\"\n cl = cherrypy.request.headers['Content-Length']\n rawbody = cherrypy.request.body.read(int(cl))\n #print(rawbody)\n unicodebody = rawbody.decode(encoding=\"utf-8\")\n body = json.loads(unicodebody)\n revolutions = body.get('data').get('r') # revolutions of the axis with blades\n #rawCounter = body.get('data').get('rawCounter')\n bpm = body.get('data').get('bpm') # enden, bladesPerMinute, viewPulsesPerMinute\n uuid = body.get('data').get('key') # deviceKey\n macAddress = body.get('data').get('mac') # macAddress \n #isOpen = body.get('data').get('isOpen') \n #showData = body.get('data').get('showData') \n #message = body.get('data').get('message')\n version = body.get('data').get('v') # firmwareVersion\n blades = body.get('data').get('b') # number of blades\n\n #backwards compatible for sender 0.1.2 and before\n #{\"data\": {\"revolutions\":\"0\",\"rawCounter\":\"6\",\"viewPulsesPerMinute\":\"0\",\"firmwareVersion\":\"0.1.2\",\n # \"deviceKey\":\"88888888-4444-4444-4444-121212121212\",\"macAddress\":\"A0:20:A6:14:85:06\",\"isOpen\":\"1\",\"showData\":\"1\",\"message\":\"\"}}'\n backwards_compatibility_on = False\n if revolutions == None:\n revolutions = body.get('data').get('revolutions')\n if bpm == None:\n bpm = body.get('data').get('viewPulsesPerMinute')\n blades = 4 # default\n backwards_compatibility_on = True\n if version == None:\n version = body.get('data').get('firmwareVersion')\n if uuid == None:\n uuid = body.get('data').get('deviceKey')\n if macAddress == None:\n macAddress = body.get('data').get('macAddress')\n\n # rph is needed for the models, revolutions per hour, to get a big enough number\n rph = None\n try:\n rph = str(round(int(bpm) * 60 / int(blades))) # revolutions per hour of the axis with blades\n except:\n pass\n\n\n # TODO: use uuid as the authentication-uuid-key from the device->pSettings\n # TODO: the factory-setting of the device is the fallback if the authentication-chain is broken\n # TODO: authenticate here, and return the new generated authentication-uuid so the device can save the new value\n #print('sender', macAddress)\n # TODO: with \"pushFirmware=esp8266_0.0.9.bin\" to push this version\n # TODO: \"pushFirmware=latest to push the latest\n\n try:\n previous_rph = mac_address_sender.get(macAddress).get(\"stored_rph\")\n if (int(rph) == 0) and (previous_rph > self.max_delta):\n rph = str(int(previous_rph - self.max_delta))\n except:\n pass\n try: \n mac_address_sender[macAddress].update({\"stored_rph\":int(rph)} )\n except:\n if rph != None:\n mac_address_sender.update({macAddress:{\"stored_rph\":int(rph)}} )\n\n feed_counter = 0\n try:\n feed_counter = mac_address_sender.get(macAddress).get(\"feed_counter\") or 0\n feed_counter += 1\n if feed_counter > self.max_feed_counter:\n # push Update\n # only update when bpm == 0\n # do this because an update call blocks the device (shortly)\n if rph and rph == \"0\":\n feed_counter = -1 # means check for update\n else:\n feed_counter = 0 # means no check on update\n except:\n pass\n try:\n mac_address_sender[macAddress].update({\"feed_counter\": feed_counter} )\n except:\n mac_address_sender.update({macAddress:{\"feed_counter\": feed_counter}} )\n #print('version', version, feed_counter, bpm, uuid)\n #print(mac_address_sender)\n\n\n # put feeded data in the dynamic features\n self.set(mac_address=macAddress,\n uuid=uuid,\n #rawCounter=rawCounter,\n #bpm=bpm,\n rph=rph,\n blades=blades,\n revolutions=revolutions,\n #isOpen=isOpen,\n #showData=showData,\n #message=message\n )\n\n result = {}\n\n if backwards_compatibility_on == True:\n # \"84:CC:A8:A3:09:11\": { \"comment\": \"(Tweemanspolder) Nr.3\",\n # \"A0:20:A6:29:18:13\": { \"comment\": \"de Roos\",\n # \"84:CC:A8:A0:FE:2D\": { \"comment\": \"de Hoop, Zoetermeer\",\n \n backwards_compatible_list = ()\n \n if macAddress in backwards_compatible_list:\n feed_counter = 0 # skip update to new version\n\n result.update({\"bpm\":bpm,\n #\"message\":message,\n \"proposedUUID\": uuid, # TODO: change this \n \"pushFirmware\" : feed_counter == -1 and \"latest\" or \"\",\n \"macAddress\": macAddress})\n else:\n result.update({\"pKey\": uuid, # proposedUUID ->TODO: change this value when needed as safety measurement (authentication of the sender) \n \"pFv\" : feed_counter == -1 and \"latest\" or \"\"\n })\n\n result_string = json.dumps(result)\n cherrypy.response.headers[\"Content-Length\"] = len(result_string)\n return result_string.encode('utf-8', 'replace')\n\n result_string = '{\"Error\": \"Request method should be POST\"}'\n cherrypy.response.headers[\"Content-Length\"] = len(result_string)\n return result_string.encode('utf-8', 'replace')",
"def post_send_message(self, msg):\r\n pass",
"def update(self, body):\n self.body = body",
"def bowie_sensors():\n msgs = []\n _queue\n print('in queue: '+str(_queue.qsize()))\n while _queue.qsize() > 0:\n msgs.append(_queue.get().decode('utf-8'))\n return json.dumps(dict(data=msgs))",
"def send_to_process(self, request):\n self.process.q.put(request)",
"def sendToFarm(self):\n\n if self.debugMode:\n log.info('Debug mode is on')\n log.info('Job to send : \\n\\n')\n print self.job.asTcl()\n return\n\n # Generate archives local\n if self.archivesGeneration == 'local':\n self.generateArchivesLocaly()\n\n jobId = self.job.spool()\n\n if jobId:\n hou.ui.displayMessage(text='Job sent to Tractor : \\n{}#jid={}'.format(self.tractorUrl, jobId))",
"def queuestatus():\n return GlobalVars.bodyfetcher.print_queue()",
"def getMessageInfo(self):\n # type: () -> IHttpRequestResponse",
"async def queue_info(self, ctx):\r\n vc = ctx.voice_client\r\n\r\n if not vc or not vc.is_connected():\r\n return await ctx.send('I am not currently connected to a channel.' )\r\n\r\n player = self.get_player(ctx)\r\n if player.queue.empty():\r\n return await ctx.send('The queue is empty.')\r\n\r\n # Grab up to 5 entries from the queue...\r\n upcoming = list(itertools.islice(player.queue._queue, 0, 5))\r\n\r\n fmt = '\\n'.join(f'**`{_[\"title\"]}`**' for _ in upcoming)\r\n embed = discord.Embed(title=f'Upcoming - Next {len(upcoming)}', description=fmt)\r\n\r\n await ctx.send(embed=embed)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tweets selected by year, month and day. Use the cleanded data from the getallweets.py module, this function loads the data and compute the frequrence for days, months and years. Use tree dicts to store the output with pickle into locale storage for makePicture.py.
|
def computeTime(inputData):
import pickle
data = None
with open(inputData, "rb") as f:
data = pickle.load(f)
years = {}
months = {}
days = {}
for tweet in data:
year = tweet[3].year
month = tweet[3].month
day = tweet[3].day
# Strings
year = str(year)
month = str(month)
if len(month) == 1:
month = "0" + month
month = str(year) + month
day = str(day)
if len(day) == 1:
day = "0" + day
day = month + day
if year in years.keys():
altvalue = years[year]
years[year] = altvalue + 1
else:
years[year] = 1
if month in months.keys():
altvalue = months[month]
months[month] = altvalue + 1
else:
months[month] = 1
if day in days.keys():
altvalue = days[day]
days[day] = altvalue + 1
else:
days[day] = 1
# with open("data/"+ inputData + "_FreqOfyear.db", "wb") as f:
# pickle.dump(years, f)
# print(inputData + "_FreuOfYear.db was stored!")
#
# with open("data/"+ inputData + "_FreqOfmonth.db", "wb") as f:
# pickle.dump(months, f)
# print(inputData + "_FreuOfmonth.db was stored!")
#
# with open("data/"+ inputData + "_FreqOfday.db", "wb") as f:
# pickle.dump(days, f)
# print("data/"+ inputData + "_FreuOfday.db was stored!")
return (years, months, days)
|
[
"def scrape_month_weather(self, year: int, month: int) -> dict:\n try:\n print('Scraping data of year: {0}, month: {1}...'.format(year, month))\n days_of_current_month = calendar.monthrange(year, month)[1]\n # Get raw info from HTML parse\n url = (\"http://climate.weather.gc.ca/\"\n + \"climate_data/daily_data_e.html\"\n + \"?StationID=27174\"\n + \"&timeframe=2&StartYear=1840\"\n + \"&EndYear=\" + str(year)\n + \"&Day=1&Year=\" + str(year)\n + \"&Month=\" + str(month) + \"#\")\n\n new_scraper = WeatherScraper()\n with urllib.request.urlopen(url) as response:\n html = str(response.read())\n new_scraper.feed(html)\n\n # If the date_list in the website already be scraped, then we will stop the scrapper.\n if new_scraper.date_list != [] and new_scraper.date_list in self.date_list:\n self.stop_scraping = True\n print('There is no data for year: {0}, month: {1}.'.format(year, month))\n return {}\n\n result = new_scraper.get_data().split(',')\n\n # print('debug: result')\n # count = 0\n # for r in result:\n # print(str(r) + ',', end='')\n # count += 1\n # if count == 11:\n # print()\n # count = 0\n # print()\n\n # Convert raw info to weather list.\n # From the website, each row has 11 column, and the last 4 lines are useless(sum, avg, xtrm, summary).\n columns = 11\n if date.today().year == year and date.today().month == month:\n rows = date.today().day\n else:\n rows = days_of_current_month\n result_grouping = [result[i:i + columns] for i in range(0, rows * columns, columns)]\n daily_temps_list = []\n for item in result_grouping:\n if len(item) >= 3:\n # '' only happened in yesterday and today, others are 'M'\n if item[0] != '' and item[1] != '' and item[2] != '':\n my_dict = {\"Max\": str(item[0]), \"Min\": str(item[1]), \"Mean\": str(item[2])}\n daily_temps_list.append(my_dict)\n\n # print('debug: daily_temps_list')\n # for item in daily_temps_list:\n # print(str(item))\n\n # Zip weather list items with the date\n month_dict = dict(zip(new_scraper.date_list, daily_temps_list))\n self.date_list.append(new_scraper.date_list)\n self.weather.update(month_dict)\n\n # print('debug: month_dict')\n # for key, value in month_dict.items():\n # print(key + ':' + str(value))\n\n return month_dict\n except Exception as e:\n self.logger.error(e)",
"def weather_scraper(year, month, day):\n date = year + month + day\n\n urlstart = 'http://api.wunderground.com/api/37d281e3f1931e1e/history_'\n urlend = '/q/Ireland/Dublin.json'\n url = urlstart + str(date) + urlend\n data = requests.get(url).json()\n\n for i in data['history']['observations']:\n if 'METAR' in i['metar']:\n datetime = year + \"-\" + month + \"-\" + day + \" \" + \\\n i['date']['hour'] + ':' + i['date']['min'] + ':00'\n summary = i['conds']\n temp = str(math.floor(float(i[\"tempm\"])))\n rain = i['rain']\n wind_speed = str(math.floor(float(i['wspdm'])))\n # Store relevant informaion in a specific format in an array\n weather_array = [datetime, summary, temp, rain, wind_speed]\n #Write each line to a csv file\n write_to.writerow(weather_array)",
"def countTweetPerDay(file):\n\n with open(file, \"r\") as tweet_corpus:\n TweetsPerDay_count = {}\n \n for line in tweet_corpus.readlines():\n tweet = json.loads(line)\n date = re.match(\"(.{4}-.{2}-.{2}) .*\", tweet['created_at']).group(1)\n\n # update TweetsPerDay_count\n TweetsPerDay_count[date] = TweetsPerDay_count.setdefault(date, 0)\n TweetsPerDay_count[date] += 1\n return TweetsPerDay_count",
"def preprocessing(company, lang):\n\n # get tweets\n tweets = np.array(execute(\"SELECT * FROM tweet WHERE searchterm = '@\" + company + \"'\"))\n tweets = tweets[:,2]\n\n # count retweets\n pattern = re.compile(\"^RT \")\n rt_tweets = [ tweet for tweet in tweets if pattern.match(tweet) ]\n\n # only lang tweets\n lang_tweets = []\n for tweet in rt_tweets:\n try:\n if detect(tweet) == lang:\n lang_tweets.append(tweet)\n except:\n continue\n\n # no urls\n url = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n no_url_tweets = [ re.sub(url, '', tweet) for tweet in lang_tweets ]\n\n # remove @ words\n no_arobas_tweets = [ re.sub(r\"([@?]\\w+)\\b\", '', text) for text in no_url_tweets ]\n\n # remove non-alphanumerical characters\n only_alphanum_tweets = [ re.sub(r'[^\\w]', ' ', text) for text in no_arobas_tweets ]\n\n # tokenizing\n tokenized_tweets = [ tweet.split(\" \") for tweet in only_alphanum_tweets ]\n\n # lower tweets and remove one char words\n lowered_tweets = [ [ word.lower() for word in text if len(word) > 1 ] for text in tokenized_tweets ]\n \n # remove stopwords\n stopwords = open(\"./stopwords\").read().split(\"\\n\")\n stopwords += [\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\", \n \"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\", \"jul\", \"aug\", \"sep\", \"oct\", \"nov\", \"dec\",\n \"amp\", \"rt\", \"https\"]\n filtered_tweets = [ [ word for word in text if word not in stopwords ] for text in lowered_tweets ]\n\n # isolate bigrams\n bigrams = mark_bigrams(filtered_tweets)\n\n # reduce to one list of words\n flat_text_bigrams = [ word for tweet in bigrams for word in tweet ]\n flat_text = [ word for tweet in filtered_tweets for word in tweet ]\n\n # get frequency dictionary\n frequ = collections.Counter(flat_text_bigrams).most_common()\n\n # return format\n # * name company\n # * number tweets\n # * nb retweet\n # * language chosen\n # * nb tweet in chosen language\n # * nb words\n # * nb unique words\n data = (company, len(tweets), len(rt_tweets), lang, len(lang_tweets), len(flat_text_bigrams), len(frequ), filtered_tweets)\n\n return data",
"def read_user_tweets(dir_path):\n tweet_dict = {}\n words = []\n tokenize_dict = {}\n user_tweets = \"\"\n i = 0\n cachedStopWords = stopwords.words(\"english\")\n# print(cachedStopWords) #print stop words\n# loop over the user files\n for filename in os.listdir(dir_path):\n #skip files if it's not xml \n if filename.endswith(\".xml\"): \n dom = ElementTree.parse(dir_path+filename) \n tweets = dom.find('documents')\n #loop over tweet of one user \n for tweet in tweets:\n #concantanate tweets of one user by new line \n user_tweets = user_tweets + \"\\n\" + (tweet.text).lower()\n #remove punctiation and numbers\n user_tweets = re.sub(r'[^\\w\\s]','', user_tweets)\n user_tweets = re.sub(r'[0-9]','', user_tweets)\n #cut '.xml' from file name to get user value as the same as in txt file\n filename = filename[:-4]\n #lowercase the text\n tweet_dict[filename] = user_tweets.lower()\n #tokenize user tweets\n tokenize = word_tokenize(user_tweets)\n tokenize = [word for word in tokenize if not (word.startswith('http') or word.startswith('amp') or word.startswith('xx')) ]\n tokenize_dict[filename] = tokenize\n i += 1\n if i % 100 == 0:\n print(i)\n words += [word for word in tokenize_dict[filename] if word not in cachedStopWords]\n user_tweets = \"\"\n \n return tweet_dict, words",
"def gettweets(self, path):\n #tweet_folder = 'tweets'\n tweet_folder = 'tweets_analyze'\n tweet_folder1 = 'tweets'\n for (root, dirs, files) in os.walk(path):\n if \"content\" in root and \"nytimes\" not in root:\n for f in files:\n idstr = f.split('_')[0]\n if not os.path.exists(root+'/../'+tweet_folder):\n os.mkdir(root+'/../'+tweet_folder)\n os.mkdir(root+'/../'+tweet_folder1)\n f1 = open(root+'/'+f, 'r')\n lines = f1.readlines()\n p = root+'/../'+tweet_folder+'/'\n p_objs = root+'/../'+tweet_folder1+'/'\n self.genrelatedtweets(idstr, p, p_objs, lines)\n f1.close()",
"def import_web_archive_tweets(list_of_file_paths, pre_2014_08=False, delete_old_index=False,\n langs=None, min_n_hashtags=1,\n indexed_tweet_ids=None, update_existing_docs=False, update_report_interval=25000):\n if pre_2014_08:\n tweet_dict_creator = create_tweet_dict_from_pre_2014_08_tweet\n else:\n tweet_dict_creator = create_tweet_dict_from_tweet\n if indexed_tweet_ids is None:\n indexed_tweet_ids = []\n if not update_existing_docs:\n _logger.warning(\"'update_existing_docs=False' setting will be ignored because indexed_tweet_ids==[]\")\n start_time = time.time()\n _logger.info(\"started importing from %d tar file(s)\\n\" % len(list_of_file_paths))\n if delete_old_index:\n create_new_index()\n es_docs = []\n all_tweets_count = 0\n good_tweets_count = 0\n other_doc_count = 0\n existing_tweets_count = 0\n for file_path in list_of_file_paths:\n _logger.info(\"started importing from %s\\n\" % file_path.split(\"/\")[-1])\n file_tweets_count = 0\n file_good_tweets_count = 0\n file_start_time = time.time()\n tar_file_iterator = tarfile.open(file_path, mode='r|')\n for member in tar_file_iterator:\n if member.isfile():\n if member.name[-4:] == \".bz2\":\n f = tar_file_iterator.extractfile(member)\n try:\n bz2_file = bz2.BZ2File(f)\n for line in bz2_file.readlines():\n try:\n tweet = json.loads(line.decode('utf8'))\n except ValueError:\n _logger.error(\"couldn't read the following line:\\n%s\" % line.decode('utf8'))\n continue\n if 'id' in tweet:\n file_tweets_count += 1\n all_tweets_count += 1\n # if es.exists(index=INDEX_NAME, doc_type=TYPE_NAME, id=tweet['id']):\n if tweet['id_str'] in indexed_tweet_ids: # the document has been indexed before, skip?\n existing_tweets_count += 1\n if existing_tweets_count % update_report_interval == 0:\n _logger.info(\n \"%s %d already indexed docs by now\" %\n (\"updated\" if update_existing_docs else \"skipped\", existing_tweets_count)\n )\n if not update_existing_docs:\n continue\n try:\n if len(tweet['entities']['hashtags']) >= min_n_hashtags:\n if langs:\n if tweet['lang'] in langs:\n es_docs.append(tweet_dict_creator(tweet))\n good_tweets_count += 1\n file_good_tweets_count += 1\n else:\n es_docs.append(tweet_dict_creator(tweet))\n good_tweets_count += 1\n file_good_tweets_count += 1\n except KeyError as e:\n # _logger.warning(\n # \"skipping tweet #%d id:%s - raised a KeyError: %s\" %\n # (file_tweets_count, tweet['id'], e)\n # )\n continue # logging was too heavy on I/O because of too many tweets missing 'lang'\n except Exception as ee:\n _logger.warning(\n \"skipping tweet #%d id:%s - raised %s\" % (file_tweets_count, tweet['id'], ee)\n )\n else:\n # these are usually tweet delete requests ...\n other_doc_count += 1\n continue\n except EOFError as eee:\n _logger.error(\"the file %s raised an EOFError: %s\" % (member.name[-4:], eee))\n except Exception as eeee:\n _logger.error(\"the file %s raised %s\" % (member.name[-4:], eeee))\n if len(es_docs) >= BATCH_SIZE:\n _logger.info(\n (\"tweets in %s & with >=%d tags \\tpassed to ES: %d, \"\n \"passed to ES overall: %d (from which %s: %d), \"\n \"encountered in the file: %d, encountered: %d\") %\n (langs, min_n_hashtags, len(es_docs), file_good_tweets_count,\n \"updated\" if update_existing_docs else \"skipped\", existing_tweets_count,\n file_tweets_count, all_tweets_count)\n )\n update_esindex(es_docs)\n # good_tweets_counter += len(es_docs)\n # file_good_tweets_counter += len(es_docs)\n es_docs = []\n if len(es_docs): # write the remaining docs from the .bz2 file to Elasticsearch index\n _logger.info(\n (\"the last batch of tweets in %s & with >=%d tags \\tpassed to ES: %d, \"\n \"passed to ES overall: %d (from which %s: %d), \"\n \"encountered in the file: %d, encountered: %d\") %\n (langs, min_n_hashtags, len(es_docs), file_good_tweets_count,\n \"updated\" if update_existing_docs else \"skipped\", existing_tweets_count,\n file_tweets_count, all_tweets_count)\n )\n update_esindex(es_docs)\n # good_tweets_counter += len(es_docs)\n # file_good_tweets_counter += len(es_docs)\n es_docs = []\n _logger.info(\n \"finished indexing %d tweets (seen %d) from %s in %d seconds\\n\" %\n (file_good_tweets_count, file_tweets_count, file_path.split(\"/\")[-1], round(time.time() - file_start_time))\n )\n _logger.info(\n \"finished importing %d tweets (seen %d) from %d tar file(s) in %d seconds\" %\n (good_tweets_count, all_tweets_count, len(list_of_file_paths), round(time.time() - start_time))\n )",
"def analyse_tweet(self,tweet):\n flag=False\n try:\n #extract date\n dat=date_extract(tweet)\n #find type of tweet\n tw_typ=tweet_type(tweet)\n #extract text of tweet\n text=tweet_text(tweet,tw_typ)\n\n #find the keys of tweet:\n kys=self.whichkeys(text)\n\n \n #remove blank tweets and tweets that have lost their date:\n flag= (dat!=None) and (text!=None)\n except:\n return None\n #for a tweet satisfying the above\n if flag:\n #new date considered\n if dat not in self.dates.keys():\n self.new_date(dat)\n #update information\n for key in kys:\n try:\n #sentiment of tweet\n s=self.sent(text)\n self.update_stats(dat,key,s)\n except:\n pass",
"def countTweetsPerDay(tweet_list):\n\n TweetsPerDay_count = {}\n \n for tweet in tweet_list:\n date = re.match(\"(.{4}-.{2}-.{2}) .*\", tweet['created_at']).group(1)\n\n # update TweetsPerDay_count\n TweetsPerDay_count[date] = TweetsPerDay_count.setdefault(date, 0)\n TweetsPerDay_count[date] += 1\n\n return TweetsPerDay_count",
"def analyze_tweet(tweet, results):\n\n ######################################\n # fields that are relevant for user-level and tweet-level analysis\n # count the number of valid Tweets here\n # if it doesn't have at least a body and an actor, it's not a tweet\n try: \n body = tweet[\"body\"]\n userid = tweet[\"actor\"][\"id\"].split(\":\")[-1]\n results[\"tweet_count\"] += 1\n except (ValueError, KeyError):\n if \"non-tweet_lines\" in results:\n results[\"non-tweet_lines\"] += 1\n return\n\n # count the number of tweets from each user\n if \"tweets_per_user\" in results:\n results[\"tweets_per_user\"][tweet[\"actor\"][\"id\"][15:]] += 1\n \n #######################################\n # fields that are relevant for the tweet-level analysis\n # ------------------> term counts\n # Tweet body term count\n if \"body_term_count\" in results:\n results[\"body_term_count\"].add(tweet[\"body\"])\n\n # count the occurences of different hashtags\n if \"hashtags\" in results:\n if \"hashtags\" in tweet[\"twitter_entities\"]:\n for h in tweet[\"twitter_entities\"][\"hashtags\"]:\n results[\"hashtags\"][h[\"text\"].lower()] += 1\n \n try:\n # count the occurences of different top-level domains\n if (\"urls\" in results) and (\"urls\" in tweet[\"gnip\"]):\n for url in tweet[\"gnip\"][\"urls\"]:\n try:\n results[\"urls\"][url[\"expanded_url\"].split(\"/\")[2]] += 1\n except (KeyError,IndexError,AttributeError):\n pass\n # and the number of links total\n if (\"number_of_links\" in results) and (\"urls\" in tweet[\"gnip\"]):\n results[\"number_of_links\"] += len(tweet[\"gnip\"][\"urls\"])\n except KeyError:\n pass\n \n # -----------> timelines\n # make a timeline of UTC day of Tweets posted\n if \"utc_timeline\" in results:\n date = tweet[\"postedTime\"][0:10]\n results[\"utc_timeline\"][date] += 1\n\n # make a timeline in normalized local time (poster's time) of all of the Tweets\n if \"local_timeline\" in results:\n utcOffset = tweet[\"actor\"][\"utcOffset\"]\n if utcOffset is not None:\n posted = tweet[\"postedTime\"]\n hour_and_minute = (datetime.datetime.strptime(posted[0:16], \"%Y-%m-%dT%H:%M\") + \n datetime.timedelta(seconds = int(utcOffset))).time().strftime(\"%H:%M\")\n results[\"local_timeline\"][hour_and_minute] += 1\n \n # ------------> mention results\n # which users are @mentioned in the Tweet\n if \"at_mentions\" in results:\n for u in tweet[\"twitter_entities\"][\"user_mentions\"]:\n # update the mentions with weight + 1 and \n # list all of the screennames (in case a name changes)\n if u[\"id_str\"] is not None:\n results[\"at_mentions\"][u[\"id_str\"]][\"weight\"] += 1 \n results[\"at_mentions\"][u[\"id_str\"]][\"screennames\"].update([u[\"screen_name\"].lower()])\n \n # count the number of times each user gets replies\n if (\"in_reply_to\" in results) and (\"inReplyTo\" in tweet):\n results[\"in_reply_to\"][tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()] += 1\n\n # --------------> RTs and quote Tweet\n # count share actions (RTs and quote-Tweets)\n # don't count self-quotes or self-RTs, because that's allowed now\n if ((\"quote_of_user\" in results) or (\"RT_of_user\" in results)) and (tweet[\"verb\"] == \"share\"):\n # if it's a quote tweet\n if (\"quote_of_user\" in results) and (\"twitter_quoted_status\" in tweet[\"object\"]):\n quoted_id = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"id\"][15:]\n quoted_name = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"preferredUsername\"]\n if quoted_id != tweet[\"actor\"][\"id\"]:\n results[\"quote_of_user\"][quoted_id][\"weight\"] += 1 \n results[\"quote_of_user\"][quoted_id][\"screennames\"].update([quoted_name])\n # if it's a RT\n elif (\"RT_of_user\" in results):\n rt_of_name = tweet[\"object\"][\"actor\"][\"preferredUsername\"].lower()\n rt_of_id = tweet[\"object\"][\"actor\"][\"id\"][15:]\n if rt_of_id != tweet[\"actor\"][\"id\"]:\n results[\"RT_of_user\"][rt_of_id][\"weight\"] += 1 \n results[\"RT_of_user\"][rt_of_id][\"screennames\"].update([rt_of_name])\n\n # Tweet expended url content term count\n if \"url_content\" in results:\n try:\n urls = tweet[\"gnip\"][\"urls\"]\n except KeyError:\n urls = []\n url_content = \"\"\n for url in urls:\n try:\n expanded_url_title = url[\"expanded_url_title\"]\n if expanded_url_title is None:\n expanded_url_title = \"\"\n except KeyError:\n expanded_url_title = \"\"\n try:\n expanded_url_description = url[\"expanded_url_description\"]\n if expanded_url_description is None:\n expanded_url_description = \"\"\n except KeyError:\n expanded_url_description = \"\"\n url_content = url_content + \" \" + expanded_url_title + \" \" + expanded_url_description\n results[\"url_content\"].add(url_content)\n \n ############################################\n # actor-property qualities\n # ------------> bio terms\n if \"bio_term_count\" in results:\n if tweet[\"actor\"][\"id\"][:15] not in results[\"tweets_per_user\"]:\n try:\n if tweet[\"actor\"][\"summary\"] is not None:\n results[\"bio_term_count\"].add(tweet[\"actor\"][\"summary\"])\n except KeyError:\n pass\n \n # ---------> profile locations\n if \"profile_locations_regions\" in results:\n # if possible, get the user's address\n try:\n address = tweet[\"gnip\"][\"profileLocations\"][0][\"address\"]\n country_key = address.get(\"country\", \"no country available\")\n region_key = address.get(\"region\", \"no region available\")\n except KeyError:\n country_key = \"no country available\"\n region_key = \"no region available\"\n results[\"profile_locations_regions\"][country_key + \" , \" + region_key] += 1",
"def parse_all_tweets(directory='./data/trump_tweet_data_archive/', output='data/raw_tweets.txt'):\n for filename in sorted(os.listdir(directory)):\n if filename.endswith('.json'):\n read_tweets(directory+filename, output)",
"def process_tweets(dataset, group_by = 'author', filter_language = 'English', extract_hashtags = True, filtersize = 3):\n import pandas as pd\n import re\n from nltk.corpus import wordnet as wn\n from collections import Counter\n\n pd.options.mode.chained_assignment = None # default='warn', suppress the setting with copy warning\n\n # Filter for languages if true \n if filter_language:\n # selecting content columns for subject categorization by language\n dataset = dataset[dataset.language == filter_language]\n cont = dataset.content\n else:\n cont = dataset.content\n \n content_filtered = cont.apply(lambda x: re.sub(r'http\\S+', '', x)).apply(lambda x: re.sub(r\"'|\\\"|`|:|\\?|~|,|\\.\", '', x))\\\n .apply(lambda x: remove_stopwords(x))\n\n\n # redefine content column for dataset\n dataset['content'] = content_filtered.values\n # Drop NaN values in content \n dataset.dropna(axis=0,subset=['content'], inplace=True)\n # Get list of words that are stop words \n en_stop = set(nltk.corpus.stopwords.words('english'))\n tokens = []\n \n ##### GROUP BY AUTHOR ######\n \n if group_by == 'author':\n tweets_concatenated = dataset.groupby('author')['content'].apply(lambda x : x.sum()\n if x.dtype=='float64' else ' '.join(x))\n content = tweets_concatenated.copy()\n if extract_hashtags == True:\n # Count the hashtag frequency for each user\n hashtag_count = tweets_concatenated.apply(lambda x: hashtag_counter(x, do='count'))\n # Extract words that are in hashtags\n hashtagged = tweets_concatenated.apply(lambda x: hashtagger(x))\n # Concatenate the words to the entire tweets\n hashtags_gone = hashtagged + tweets_concatenated\n # Remove hashtags since they are no longer needed and make all words lower case\n hashtags_gone = hashtags_gone.apply(lambda x: re.sub(r\"#\\w+\", '', x)).apply(lambda x: x.lower())\n # Convert to NumPy array\n content = hashtags_gone.values\n content_tokens = [nltk.word_tokenize(x) for x in content]\n for sublist in content_tokens:\n tokens.append([get_lemma(token) for token in sublist if token not in en_stop and len(token) > 3])\n\n return tokens, hashtag_count\n \n ##### GROUP BY HASHTAG ######\n \n if group_by == 'hashtag':\n hashtag_column = dataset['content'].apply(lambda x: hashtag_counter(x))\n df_hashtags = pd.concat([dataset['content'], hashtag_column], axis=1)\n df_hashtags.columns = ['content', 'hashtags']\n \n \n # make the series that has as the index values the hashtag and the column that has the concatenated \n # tweets.\n tweets_concatenated = df_hashtags.groupby('hashtags')['content'].apply(lambda x : x.sum()\n if x.dtype=='float64' else ' '.join(x))\n # remove the hashtag shit\n hashtags_gone = tweets_concatenated.apply(lambda x: re.sub(r\"#\\w+\", '', x)).apply(lambda x: x.lower())\n content = hashtags_gone.values\n\n\n content_tokens = [nltk.word_tokenize(x) for x in content]\n for sublist in content_tokens:\n tokens.append([get_lemma(token) for token in sublist if token not in en_stop and len(token) > 3])\n \n return tweets_concatenated, tokens, hashtag_column",
"def extractData(parsedTweet):\n\n #extract hashtags as a list\n hashtags = [x['text'] for x in parsedTweet['entities']['hashtags']]\n\n #extract created_at and convert into an integer of seconds since epoch\n timestamp = int(time.mktime(time.strptime(parsedTweet['created_at'][0:20] +\\\n parsedTweet['created_at'][26:],\n '%a %b %d %H:%M:%S %Y')))\n return hashtags, timestamp",
"def pages(year, brightness='70',\n basepath='/scratch/summit/diga9728/Moodys/Industrials/'):\n logging.basicConfig(level=logging.DEBUG)\n logging.debug(\n \"Looking for day files from year %s, at brightness %s, in %s\",\n year, brightness, basepath)\n # find all dirs that might contain .day files\n dirs = sorted(glob.glob(basepath + 'OCRrun' + year + '/[0-9][0-9][0-9]/'))\n # iter over these dirs\n for d in dirs:\n # filenames: OCRoutputIndustrial<year><fiche>-<image#><brightness>.day\n # find all filenames in given dir\n files = sorted(glob.glob(d + 'OCRoutputIndustrial' + year + '[0-9]'\n '[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]'\n + brightness + '.day'))\n # yield page by page\n for f in files:\n yield f",
"def get_week_stats_from_data(data, date, distribution=False, sampling=False, sampling_str=None, high_replies=200,\n low_replies=11):\n year = str(date[0])\n month = str(date[1]) if date[1] >= 10 else '0' + str(date[1])\n filename = 'RC_' + year + '-' + month\n distr = '_distr' if distribution is True else ''\n sample_ = '_sample' if sampling is True else ''\n with open(os.path.join(path, 'stats',\n '{0}_weeks{1}{2}.txt'.format(filename, distr, sample_)), 'w+') as fweeks, \\\n open(os.path.join(path, 'stats',\n '{0}_users{1}.txt'.format(filename, sample_)), 'w+') as fusers:\n reddits = [set(), set(), set(), set()]\n acc = [set(), set(), set(), set()]\n name = [set(), set(), set(), set()]\n accounts = set()\n subs = [defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(int)]\n users = defaultdict(lambda: defaultdict(int))\n i = [0, 0, 0, 0]\n\n for w in range(4):\n for cur_data in data[w]:\n acc[w].add(cur_data['author'])\n subreddit = cur_data['subreddit']\n reddits[w].add(subreddit)\n name[w].add(cur_data['name'])\n subs[w][subreddit] += 1\n users[cur_data['author']][cur_data['subreddit']] += 1\n users[cur_data['author']]['all'] += 1\n accounts.add(cur_data['author'])\n i[w] = len(data[w])\n\n # USERS\n low = [0 for l in range(low_replies)]\n high = 0\n fusers.write(filename)\n fusers.write('\\n----------------')\n fusers.write('\\nSampling: ' + sampling_str)\n fusers.write('\\n----------------')\n sorted_u = sorted(users.items(), key=lambda k_v: k_v[1]['all'], reverse=True)\n fusers.write('\\nWHOLE MONTH')\n for user in sorted_u:\n user = user[0]\n fusers.write(\"\\n\" + user)\n sorted_r = sorted(users[user].items(), key=lambda k_v: k_v[1], reverse=True)\n for red in sorted_r:\n red = red[0]\n replies = users[user][red]\n fusers.write(\"\\n \" + red + \": \" + str(replies))\n if red == 'all':\n for l in range(1, low_replies):\n if replies == l:\n low[l - 1] += 1\n if replies > high_replies:\n high += 1\n for l in range(1, low_replies):\n print(\"Low \", l, \" replies: \", low[l - 1], \" Percentage:\", low[l - 1] / len(accounts))\n print(\"Low all: \", sum(low), \" Percentage:\", sum(low) / len(accounts))\n print(\"High: \", high, \" Percentage:\", high / len(accounts))\n print(\"ALL: \", len(accounts))\n #\n\n nrmlz = i if distribution else [1, 1, 1, 1] # normalizers\n fweeks.write(filename)\n fweeks.write('\\n----------------')\n fweeks.write('\\nSampling: ' + sampling_str)\n fweeks.write('\\n----------------')\n for w in range(4):\n fweeks.write('\\nWeek ' + str(w))\n fweeks.write('\\n--------------')\n fweeks.write(\"\\nreddits: \" + str(len(reddits[w])))\n fweeks.write(\"\\naccounts: \" + str(len(acc[w])))\n fweeks.write(\"\\nnames: \" + str(len(name[w])))\n fweeks.write('\\nall posts: ' + str(i[w]) + '\\n')\n fweeks.write('\\nSubreddits: posts +- from previous week\\n')\n sorted_reds = sorted(subs[w].items(), key=lambda k_v: k_v[1], reverse=True)\n for sr in sorted_reds:\n diff = ''\n prev = 0\n if w > 0:\n if sr[0] in subs[w - 1]: # check so that it won't create a new key with 0 value\n prev = subs[w - 1][sr[0]] / nrmlz[w - 1]\n diff = int(sr[1] - prev) if nrmlz[w] == 1 else sr[1] / nrmlz[w] - prev\n diff = str(diff) if diff < 0 else '+' + str(diff)\n cc = sr[1] if nrmlz[w] == 1 else sr[1] / nrmlz[w]\n fweeks.write('\\t' + sr[0] + \": \" + str(cc) + ' ' + diff + '\\n')\n\n # NEW and DEAD\n if w > 0:\n new = set(subs[w].keys()) - set(subs[w - 1].keys())\n dead = set(subs[w - 1].keys()) - set(subs[w].keys())\n print('NEW ', \"Week \", w, \"|| Num: \", len(new))\n for k, _ in sorted_reds:\n if k in new:\n print(k, \": \", subs[w][k], end=', ')\n print()\n print('----')\n print('DEAD ', \"Week \", w, \"|| Num: \", len(dead))\n for k, _ in prev_sorted_reds:\n if k in dead:\n print(k, \": \", subs[w - 1][k], end=', ')\n print()\n print()\n prev_sorted_reds = sorted_reds\n fweeks.write('all: ' + str(sum(i)))",
"def tweets(self):\n tweet=[] # creating a list to add all of the tweets text to\n for json_file in self.data:\n tweet.append(json_file[\"text\"])# adding the text of the tweets to the list\n return tweet # returning the list of tweets so that I can use this function tweets and apply it",
"def compile_monogram_dictionaries(tweets, filename):\n positive_dict = []\n negative_dict = []\n\n # positive\n for text in tweets[0]:\n positive_dict.extend(text.split(\" \"))\n # negative\n for text in tweets[1]:\n negative_dict.extend(text.split(\" \"))\n\n filename += \"-monogram\"\n save_dictionaries(positive_dict, negative_dict, filename)\n return",
"def collect_day_conf_all_years(tiles, **kwargs):\n\n root = kwargs[\"root\"]\n years = kwargs[\"years\"]\n preprocessed_years = kwargs[\"preprocessed_years\"]\n # TODO: make sure that the right years are returned\n # exclude_years = years + preprocessed_years\n preprocessed_tiles = get_preprocessed_tiles(root, exclude_years=preprocessed_years)\n\n tile_dicts = dict()\n for tile in tiles:\n basedir = PurePath(tile).parent.parent.as_posix()\n year = PurePath(tile).parts[-2]\n\n tile_dicts = add_tile_to_dict(tile_dicts, basedir, year, tile)\n\n tile_dicts = add_preprocessed_tile_to_dict(\n tile_dicts, basedir, preprocessed_tiles\n )\n\n if len(tile_dicts[basedir]) == len(years) + 1:\n logging.info(\"Created pairs for: \" + basedir)\n yield tile_dicts[basedir]\n\n for key, value in tile_dicts.items():\n if len(value) < len(years) + 1:\n logging.warning(\"Could not create pair for: \" + key)",
"def twitter_posts_stats(file_path: str):\n orig_data = get_data(file_path)\n tweet_id = []\n tweet_conversation_id = []\n tweet_impression_count = []\n tweet_user_profile_click = []\n tweet_like_count = []\n tweet_quote_count = []\n tweet_reply_count = []\n tweet_retweet_count = []\n tweet_referenced_count = []\n for tweet in orig_data:\n tweet_id.append(tweet.get(\"id\", \"0\"))\n tweet_conversation_id.append(tweet.get(\"conversation_id\", \"0\"))\n if \"non_public_metrics\" in tweet.keys():\n tweet_impression_count.append(\n tweet[\"non_public_metrics\"].get(\"impression_count\", 0)\n )\n tweet_user_profile_click.append(\n tweet[\"non_public_metrics\"].get(\"user_profile_clicks\", 0)\n )\n if \"public_metrics\" in tweet.keys():\n tweet_like_count.append(\n tweet[\"public_metrics\"].get(\"like_count\", 0)\n )\n tweet_quote_count.append(\n tweet[\"public_metrics\"].get(\"quote_count\", 0)\n )\n tweet_reply_count.append(\n tweet[\"public_metrics\"].get(\"reply_count\", 0)\n )\n tweet_retweet_count.append(\n tweet[\"public_metrics\"].get(\"retweet_count\", 0)\n )\n if \"referenced_tweets\" in tweet.keys():\n tweet_referenced_count.append(len(tweet[\"referenced_tweets\"]))\n else:\n tweet_referenced_count.append(0)\n return {\n \"tweet_id\": tweet_id,\n \"tweet_conversation_id\": tweet_conversation_id,\n \"tweet_impression_count\": tweet_impression_count,\n \"tweet_user_profile_click\": tweet_user_profile_click,\n \"tweet_like_count\": tweet_like_count,\n \"tweet_quote_count\": tweet_quote_count,\n \"tweet_reply_count\": tweet_reply_count,\n \"tweet_retweet_count\": tweet_retweet_count,\n \"tweet_referenced_count\": tweet_referenced_count,\n }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the frequence of words used. Then returns a dict as output and stores the result dict in a local data. Try to import NLTK package to throw out those stopword, then we can get more intressting data. And use NLTK to tokenize words, and clean the shorturl or something not import.
|
def computeFreqOfWords(inputData):
import pickle
data = None
result = {}
wordlist = []
with open(inputData,"rb") as w:
data = pickle.load(w)
for t in data:
sent = t[1]
words = sent.split(" ")
try:
import nltk
from nltk.tokenize import RegexpTokenizer
stopWords = set(nltk.corpus.stopwords.words( 'english' ))
tokenizer = RegexpTokenizer(r'\w+')
tokenWords = tokenizer.tokenize(sent)
networds = set(["http", "co","i"])
words = list(set(tokenWords) - stopWords-networds)
except:
continue
finally:
wordlist.extend(words)
for word in wordlist:
if len(word) < 3:
wordlist.remove(word)
for word in wordlist:
if word in result.keys():
result[word] = result[word] + 1
else:
result[word] = 1
# with open("data/"+ inputData + "_FreqOfWords.db","wb") as f:
# pickle.dump(result,f)
return result
|
[
"def set_freq(self):\n for site, tags in self.words_by_site.items():\n self.word_frequency[site] = defaultdict(int)\n words = tags.split(\" \")\n for word in words:\n # Save words containing no punctuation characters.\n match = [char in word for char in string.punctuation]\n if all(m is False for m in match) and len(word) > 3:\n self.word_frequency[site][word] += 1\n dump_as_json(self.word_frequency, self.freqs_file_path)\n return self.word_frequency",
"def _compute_global_tf(self, data):\n word_freq_per_document = {}\n if isinstance(data, Document):\n list_of_sentences = data.sentences\n else:\n list_of_sentences = data\n for sentence in list_of_sentences:\n words_in_sent = set()\n document_frequency = term_frequency(sentence, self.ignore_tokens, self.lower_case)\n for word in document_frequency:\n if not word in words_in_sent:\n word_freq_per_document[word] = word_freq_per_document.get(word, 0)+1\n words_in_sent.add(word)\n return word_freq_per_document",
"def load_words_and_counts(lang):\n word_counts = {}\n num_read = 0\n num_filtered_long = 0\n num_filtered_punct = 0\n with open(f\"{WIKISPELL_ROOT.value}/{lang}.word_counts.tsv\") as f:\n for line in f:\n num_read += 1\n word, count = line.strip().split(\"\\t\")\n if (\n MAX_CHAR_LENGTH.value is not None\n and len(word) > MAX_CHAR_LENGTH.value\n ):\n num_filtered_long += 1\n continue\n if all(c in UNICODE_SYMBOL for c in word):\n num_filtered_punct += 1\n continue\n word_counts[word] = int(count)\n\n if MAX_CHAR_LENGTH.value is not None:\n print(\n f\" Filtered out entries with >{MAX_CHAR_LENGTH.value} chars: \"\n f\"{format_fraction(num_filtered_long, num_read)}\"\n )\n\n if num_filtered_punct:\n print(\n \" Filtered out all-punctuation/symbol entries: \"\n f\"{format_fraction(num_filtered_punct, num_read)}\"\n )\n\n num_zeros = sum(int(c == 0) for c in word_counts.values())\n print(\n \" Number of zero-count words: \"\n f\"{format_fraction(num_zeros, len(word_counts))}\"\n )\n\n return word_counts",
"def initialize_terms_and_postings():\n global dictionary, postings\n stop_words= set(stopwords.words('english'))\n\n for id in corpus_files:\n f = open(corpus_files[id],'r')\n document = f.read()\n f.close()\n \n terms = tokenize(document) \n stopped_tokens = [i for i in terms if not i in stop_words]\n \n unique_terms = set(stopped_tokens)\n dictionary = dictionary.union(unique_terms)\n for term in unique_terms:\n \n postings[term][id] = terms.count(term) # the value is the frequency of the term in the document\n\n #print(postings)",
"def gentermfreq(doclist, folderpath):\n wordcounts = dict()\n for document in doclist:\n path = folderpath + \"/\" + document\n file = open(path)\n txt = file.read()\n cleanedtxt = cleantext(txt) # returns the document term frequency\n wordcounts[document] = cleanedtxt\n return wordcounts",
"def get_word_freq_stats(searchType='word'):\n htmlQuery = copy_request_args()\n change_display_options(htmlQuery)\n langID = 0\n nWords = 1\n if 'n_words' in htmlQuery and int(htmlQuery['n_words']) > 1:\n nWords = int(htmlQuery['n_words'])\n if nWords > 10:\n nWords = 10\n if searchType not in ('word', 'lemma'):\n searchType = 'word'\n if 'lang1' in htmlQuery and htmlQuery['lang1'] in settings.languages:\n langID = settings.languages.index(htmlQuery['lang1'])\n else:\n return jsonify([])\n results = []\n for iWord in range(1, nWords + 1):\n htmlQuery['lang' + str(iWord)] = htmlQuery['lang1']\n partHtmlQuery = sc.qp.swap_query_words(1, iWord, copy.deepcopy(htmlQuery))\n esQuery = sc.qp.word_freqs_query(partHtmlQuery, searchType=searchType)\n # print(esQuery)\n hits = sc.get_words(esQuery)\n # return jsonify(hits)\n curFreqByRank = sentView.extract_cumulative_freq_by_rank(hits)\n buckets = []\n prevFreq = 0\n if searchType == 'lemma':\n freq_by_rank = settings.lemma_freq_by_rank\n else:\n freq_by_rank = settings.word_freq_by_rank\n for freqRank in sorted(freq_by_rank[langID]):\n bucket = {\n 'name': freqRank,\n 'n_words': 0\n }\n if freqRank in curFreqByRank:\n bucket['n_words'] = curFreqByRank[freqRank] / freq_by_rank[langID][freqRank]\n prevFreq = curFreqByRank[freqRank]\n else:\n bucket['n_words'] = prevFreq / freq_by_rank[langID][freqRank]\n buckets.append(bucket)\n results.append(buckets)\n return jsonify(results)",
"def get_words(doc):\n\n normalized = clean(doc)\n tokenized = tokenize(normalized)\n # Things to consider:\n # Accuracy is 59.8% when stopwords are removed (compare to 59.1%)\n # However, the classifier predicts \"I'm not happy\" as positive with\n # stopwords removed\n # and \"negative\" when they are left in. \n words = remove_stopwords(tokenized)\n # Return the unique set of words only\n return dict([(w,1) for w in words])",
"def get_word_frequencies(self):\n words = Counter()\n for ctree in self.get_ctrees():\n if 'word' in ctree.results:\n for word in ctree.results['word']['frequencies']:\n words.update({word['word'], int(word['count'])})\n return words",
"def _collect_words(self, data, init_words=None):\n logging.info('Building word list...')\n words = init_words if init_words is not None else {}\n for sample in tqdm(data['data']):\n for paragraph in sample['paragraphs']:\n # collect words in context\n for word in paragraph['context']:\n if word.text not in words:\n words[word.text] = 0\n else:\n words[word.text] += 1\n\n # collect words in question\n for qa in paragraph['qas']:\n for word in qa['question']:\n if word.text not in words:\n words[word.text] = 0\n else:\n words[word.text] += 1\n\n return words",
"def compute_word_freq(all_words):\n if len(all_words) < 1:\n print('Warning, empty corpus !')\n return {}\n\n unique_words = list(set(all_words.split(\" \")))\n n = len(unique_words)\n freq_dict = OrderedDict()\n for a_word in unique_words:\n freq = all_words.count(a_word) / n\n freq_dict[a_word] = freq\n\n return freq_dict",
"def gen_words(self, doc):\r\n doc = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#”“¥:%……&*()]+\".decode(\"utf8\"),\r\n \"\".decode(\"utf8\"), doc.decode('utf8'))\r\n suffix_indexes = extract_cand_words(doc, self.max_word_len)\r\n word_cands = {}\r\n # compute frequency and neighbors\r\n for suf in suffix_indexes:\r\n word = doc[suf[0]:suf[1]]\r\n if word not in word_cands:\r\n word_cands[word] = GetWordInfo(word)\r\n word_cands[word].update_att(doc[suf[0]-1:suf[0]], doc[suf[1]:suf[1]+1])\r\n\r\n # compute the tf and info_entropy\r\n doc_lens = len(doc)\r\n for word in word_cands:\r\n word_cands[word].compute_indexes(doc_lens)\r\n\r\n # compute PMI for every word, if len(word)>1\r\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\r\n\r\n for v in values:\r\n if len(v.text) == 1:\r\n continue\r\n v.compute_info_entropy(word_cands)\r\n return sorted(values, key=lambda v: v.freq, reverse=True)",
"def init_dic(self):\n self.word_dic = {}\n self.bigram = 0 # count counts the number of bigrams for Laplace smoothing\n for i in range(len(self.corpus)):\n ch = self.corpus[i]\n if ch not in self.word_dic:\n self.word_dic[ch] = {}\n # The number of times the word appears independently\n self.word_dic[ch][ch] = 1 + self.word_dic[ch].get(ch, 0)\n if i != len(self.corpus) - 1:\n ch_next = self.corpus[i + 1]\n # Count the frequency of occurrence of the word and the following word\n self.word_dic[ch][ch_next] = 1 + self.word_dic[ch].get(ch_next, 0)\n\n for key in self.word_dic.keys():\n self.bigram += len(self.word_dic[key].keys()) - 1 # Count the total number of all bigrams",
"def count_words(cleaned_corpus):\n unique_words = set(cleaned_corpus.split())\n word_frequency = {}\n for word in unique_words:\n word = word.lower()\n count = cleaned_corpus.count(word)\n word_frequency[word] = count\n return(word_frequency)",
"def words_frequency_dict(self, text, limit=False):\n\t\t\n\t\tif type(text) is str:\n\n\t\t\twords_dict = {}\n\t\t\twords_list = text.split()\n\t\t\twords_counter = Counter(words_list)\n\n\t\t\tif(limit != False):\n\n\t\t\t\twords_of_limit = words_counter.most_common(limit)\n\n\t\t\t\tfor i in range(len(words_of_limit)):\n\n\t\t\t\t\twords_dict[words_of_limit[i][0]] = words_of_limit[i][1]\n\n\t\t\telse:\n\n\t\t\t\twords_dict = dict(words_counter)\n\t\t\t\n\t\t\treturn words_dict\n\n\t\treturn",
"def get_freq(words):\r\n dic = {words[i]:0 for i in range(len(words))}\r\n for i in range(len(words)):\r\n dic[words[i]] = dic[words[i]] + 1\r\n return dic",
"def create_dictionary(messages):\n\n # *** START CODE HERE ***\n\n # create a frequency map\n freq_map = {}\n\n for message in messages:\n words = set(get_words(message))\n for word in words:\n if word not in freq_map:\n freq_map[word] = 0\n freq_map[word] += 1\n\n # get list of frequent words\n min_occurrence = 100\n frequent_words = [word for word, frequency in freq_map.items()\n if frequency >= min_occurrence]\n return {word: i for i, word in enumerate(frequent_words)}\n\n\n # *** END CODE HERE ***",
"def extract_frequent_words(self, number_of_words=0):\n if not self.content:\n logging.info('%s:FileInterpreter: Cannot compute frequency '\\\n 'of words, file %s content is empty.',\n script_name, self.filename)\n return\n\n # Parse content and separate words (tokenise words)\n # To avoid duplicates, we transform them all to lower case.\n tokenized_words = nltk.tokenize.word_tokenize( self.content.lower() )\n # Since we are using English, we use stopwords\n # defined in the english language.\n english_stopwords = nltk.corpus.stopwords.words('english')\n\n # Clean our word collection.\n # Exclude stopwords, single-character words and\n # non alphabetic.\n clean_tokenized_words = ( w.lower()\n for w in tokenized_words \n if w.isalpha()\n if len(w)>1\n if w.lower() not in english_stopwords )\n\n # Compute frequency of our clean word collection.\n frequency_words = nltk.FreqDist( w.lower()\n for w in clean_tokenized_words )\n\n # If a number of words to return is given (n),\n if (number_of_words):\n # then return the (n) most common words\n self.frequent_words = frequency_words.most_common(number_of_words)\n else:\n # otherwise return all words in ascending order of higher frequency.\n self.frequent_words = frequency_words.most_common()",
"def get_frequency(processed_text_list):\n # prop_dict - A dictionary of tokens and their respective proportions as a fraction of the total corpus\n # combined_dict - A dictionary whose values are both frequencies and proportions combined within a list\n # \"\"\"\n\n word_frequency = FreqDist(word for word in processed_text_list)\n\n# sorted_counts = sorted(word_frequency.items(), key = lambda x: x[1], reverse = True)\n# freq_dict = dict(sorted_counts)\n freq_dict = dict(word_frequency)\n# prop_dict = {key : freq_dict[key] * 1.0 / sum(freq_dict.values()) for key, value in freq_dict.items()}\n# combined_dict = {key : [freq_dict[key], freq_dict[key] * 1.0 / sum(freq_dict.values())] for key, value in freq_dict.items()}\n\n return freq_dict # , prop_dict, combined_dict",
"def word_frequencies(url):\n # open the url (html page)\n html = urlopen(url).read()\n\n # use beautifulsoup library to process the html\n soup = BeautifulSoup(html)\n\n # parse the text from html\n for script in soup([\"script\", \"style\"]):\n script.decompose()\n strips = list(soup.stripped_strings)\n\n # regular expression to only take character with letter only from parsed text\n regex = '[a-zA-Z]+'\n result = {}\n\n # loop each pared text in set of strips so we only loop a set of unique word\n for strip in set(strips):\n # check if the value is a word (contains letter only)\n if re.search(regex, strip):\n word = re.search(regex, strip).group()\n # count the word in the strips array and append it to the result dict\n result[word] = strips.count(strip)\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the frequence of hashtags used. Then returns a dict as output and stores the result dict in a local data.
|
def computeFreqOfHashtags(inputData):
import pickle
with open(inputData,"rb") as r:
data = pickle.load(r)
hashlist = []
result = {}
for t in data:
h = t[2]
hashlist.extend(h)
for h in hashlist:
if h in result:
atv = result[h]
result[h] = atv + 1
else:
result[h] = 1
# with open("data/"+ inputData + "_FreqOfHashtags.db", "wb") as r:
# pickle.dump(result, r)
# print(inputData + "_FreqOfHashtags.db was stored!")
return result
|
[
"def hashtagCount(words):\n htc=words.map(lambda w:w.lower()).filter(lambda x:len(x)>2).filter(lambda x:x[0]=='#')\n htc=htc.map(lambda x:(x,1))\n htc_one=htc.reduceByKey(lambda x,y:x+y)\n htc_total=htc_one.updateStateByKey(lambda x,y:sum(x+(y or 0)))\n return htc_total",
"def hashtags_distribution(data):\n\n tags_count = {}\n tags_to_objectid = {}\n hashtag_counts = []\n\n n = len(data['results'])\n avg = 0\n\n for row in data['results']:\n num_tags = len(row['tags'])\n\n if num_tags not in tags_to_objectid:\n tags_to_objectid[num_tags] = []\n tags_count[num_tags] = 0\n\n tags_to_objectid[num_tags].append(row['objectId'])\n tags_count[num_tags] += 1\n\n avg += num_tags\n hashtag_counts.append(num_tags)\n\n for k, v in tags_count.items():\n print \"%d hashtags: %d rows\" % (k, v)\n\n # compute average\n avg = avg / n\n\n sorted(hashtag_counts)\n\n print \"Total rows: %d\" % n\n print \"Average # of hashtags: %d\" % avg\n print \"Median # of hashtags: %d\" % median(hashtag_counts)",
"def hashtag_counter(text, do = 'extract'):\n import re\n from collections import Counter\n tags = re.findall(r'#\\w+', text)\n tags = \" \".join(tags)\n if do == 'count':\n hashtag_count = Counter(tags.split())\n return hashtag_count\n else: \n return tags",
"def freq_dict(self, text):\n freq = {}\n for char in text:\n if not char in freq:\n freq[char] = 0\n freq[char] += 1\n return freq",
"def set_freq(self):\n for site, tags in self.words_by_site.items():\n self.word_frequency[site] = defaultdict(int)\n words = tags.split(\" \")\n for word in words:\n # Save words containing no punctuation characters.\n match = [char in word for char in string.punctuation]\n if all(m is False for m in match) and len(word) > 3:\n self.word_frequency[site][word] += 1\n dump_as_json(self.word_frequency, self.freqs_file_path)\n return self.word_frequency",
"def popular_hashtags(terms):\n \n counter = Counter()\n\n terms_hash = [term for term in terms\n if term.startswith('#') and len(term) > 1]\n \n counter.update(terms_hash)\n \n # Print the first 5 most frequent words\n return counter.most_common(20)",
"def generate_hash_map(self):\n\n # clear the hash map\n self._hash_map.clear()\n\n for line in self._document_content:\n\n line = line.encode('utf-8')\n\n line = str(line).translate(PUNCTUATION_TRANS)\n words = line.split()\n\n for word in words:\n\n word = word.decode('utf-8-sig')\n word = PorterStemmer().stem(word)\n word = word.lower()\n\n if word.isalpha():\n if not self._is_stop_word(word):\n\n # if the word is not in hash\n if word not in self._hash_map:\n self._hash_map[word] = 1\n else:\n self._hash_map[word] += 1",
"def makeFreqMap(self, text: str) -> dict:\n freqMap = {}\n for char in text:\n if char not in freqMap.keys():\n freqMap[char] = 1\n else:\n freqMap[char] += 1\n\n return freqMap",
"def generate_freq_dict(input_text):\n result_dict = {}\n for word in input_text:\n word = word.lower()\n if word not in result_dict:\n result_dict[word] = 1\n else:\n result_dict[word] += 1\n return(result_dict)",
"def computeCountDict():\n countDict = {}\n # Run through each review's tf dictionary and increment countDict's (word, doc) pair\n for review in tfDict:\n # print(review)\n for word in review:\n if word in countDict:\n countDict[word] += 1\n else:\n countDict[word] = 1\n return countDict",
"def create_frequency_table(self, text) -> dict:\r\n words = self.word_tokenize_preprocessed(text)\r\n freqTable = dict()\r\n\r\n for word in words:\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n\r\n return freqTable",
"def countTags(tweet_list):\n\n hashtags_count = {}\n \n for tweet in tweet_list:\n for tag in tweet[\"entities.hashtags\"]:\n # update hashtag_count\n hashtags_count[tag[\"text\"]] = hashtags_count.setdefault(tag[\"text\"], 0)\n hashtags_count[tag[\"text\"]] += 1\n return hashtags_count",
"def create_frequency_table(self):\n freq_table = {}\n for tok in self.tok_arr:\n entry = {}\n s_freq = self.spam_table.get(tok, 0)\n entry[\"spam_freq\"] = s_freq\n h_freq = self.ham_table.get(tok, 0)\n entry[\"ham_freq\"] = h_freq\n s_prob = (s_freq + 1 / float(self.uniq_s_toks)) / (self.total_s_toks + 1)\n entry[\"prob_spam\"] = s_prob\n h_prob = (h_freq + 1 / float(self.uniq_h_toks)) / (self.total_h_toks + 1)\n entry[\"prob_ham\"] = h_prob\n freq_table[tok] = entry\n return freq_table",
"def _count_frequencies(self, tokens: list) -> dict:\n frequencies = defaultdict(lambda: 0)\n\n for token in tokens:\n frequencies[token] += 1\n\n return frequencies",
"def get_freq(words):\r\n dic = {words[i]:0 for i in range(len(words))}\r\n for i in range(len(words)):\r\n dic[words[i]] = dic[words[i]] + 1\r\n return dic",
"def getUnigramFreq(unigrams): \n \n # init dict with tokens as the keys\n wordFreqDict = dict()\n for word in unigrams:\n if word in wordFreqDict.keys():\n wordFreqDict[word] = wordFreqDict[word] + 1 \n else:\n wordFreqDict[word] = 1\n\n return wordFreqDict",
"def compute_name_frequencies():\n # Count how often each name part (i.e. token) shows up across\n # the whole of the dataset or a sample.\n # This is very memory-intense and could be sent out to redis.\n # Doing it in redis is also icky because of the need to iterate\n # the data later, and because it would need to be fully reset\n # before each run of this. Maybe a hash would be a useful\n # structure here?\n pipe = kv.pipeline(transaction=False)\n pipe.delete(TOKEN_KEY)\n names_count = 0\n for idx, token in enumerate(iter_tokens()):\n pipe.hincrby(TOKEN_KEY, token, 1)\n names_count += 1\n if idx > 0 and idx % 10000 == 0:\n pipe.execute()\n pipe = kv.pipeline(transaction=False)\n pipe.execute()\n log.info(\"Names: %d, unique: %d\", names_count, kv.hlen(TOKEN_KEY))\n\n # Next, count how often each count occurs, i.e. make a histogram\n # of name frequency.\n counts = {}\n max_count = 0\n for _, count in kv.hscan_iter(TOKEN_KEY):\n count = int(count)\n # Leave out one-offs because they skew and aren't really\n # useful in any way.\n if count == 1:\n continue\n if count not in counts:\n counts[count] = 0\n counts[count] += 1\n # Find out what the maximum count is.\n max_count = max((count, max_count))\n\n log.info(\"Counts: %d, max: %d\", len(counts), max_count)\n total = 0\n pipe = kv.pipeline(transaction=False)\n pipe.delete(DIST_KEY)\n for idx in range(max_count, 1, -1):\n total += counts.get(idx, 0)\n pipe.hset(DIST_KEY, idx, total)\n if idx > 0 and idx % 10000 == 0:\n pipe.execute()\n pipe = kv.pipeline(transaction=False)\n log.info(\"Total: %d\", total)\n pipe.set(TOTAL_KEY, total)\n pipe.execute()",
"def countTags(file):\n\n with open(file, \"r\") as tweet_corpus:\n hashtags_count = {}\n \n for line in tweet_corpus.readlines():\n collected_tweet = json.loads(line)\n for tag in collected_tweet[\"entities.hashtags\"]:\n # update hashtag_count\n hashtags_count[tag[\"text\"]] = hashtags_count.setdefault(tag[\"text\"], 0)\n hashtags_count[tag[\"text\"]] += 1\n return hashtags_count",
"def make_freq_dict(s):\r\n s = normalize(s)\r\n words = s.split() \r\n d = {}\r\n for w in words:\r\n if w in d:\r\n d[w] +=1\r\n # print(w+' '+str(d[w]))\r\n else:\r\n d[w] =1 \r\n return d"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute die Frequence of Client, eg, iPad, web. Then return a dict as putput and store the result dict in a local data.
|
def computeFreqOfClient(inputData):
import pickle
with open(inputData,"rb") as f:
data = pickle.load(f)
result = {}
for tweet in data:
client = tweet[4]
if client in result.keys():
result[client] = result[client] + 1
else:
result[client] = 1
# with open("data/"+ inputData + "_FreqOfClient.db", "wb") as f:
# pickle.dump(result, f)
# print(inputData + "_FreuOfClient.db was stored!")
return result
|
[
"def Histogram(self):\n\n hist = {}\n\n hunt = aff4.FACTORY.Open(\"aff4:/hunts/%s\" % self.session_id,\n age=aff4.ALL_TIMES, token=self.token)\n\n log = hunt.GetValuesForAttribute(hunt.Schema.LOG)\n\n client_ids = [l.client_id for l in log]\n\n to_read = []\n\n while client_ids:\n clients = aff4.FACTORY.MultiOpen(\n [\"aff4:/%s\" % client_id for client_id in client_ids[:1000]])\n client_ids = client_ids[1000:]\n\n for client in clients:\n for user in client.Get(client.Schema.USER):\n to_read.append(\"aff4:/%s/analysis/RunKeys/%s/RunOnce\" %\n (client.client_id, user.username))\n to_read.append(\"aff4:/%s/analysis/RunKeys/%s/Run\" %\n (client.client_id, user.username))\n to_read.append(\"aff4:/%s/analysis/RunKeys/System/RunOnce\" %\n client.client_id)\n to_read.append(\"aff4:/%s/analysis/RunKeys/System/Run\" %\n client.client_id)\n\n print \"Processing %d collections.\" % len(to_read)\n collections_done = 0\n\n while to_read:\n # Only do 1000 at a time.\n collections_done += len(to_read[:1000])\n collections = aff4.FACTORY.MultiOpen(to_read[:1000], token=self.token)\n to_read = to_read[1000:]\n\n for collection in collections:\n try:\n for runkey in collection:\n key = runkey.filepath.replace(\"\\\"\", \"\")\n key = re.sub(r\"Users\\\\[^\\\\]+\\\\\", r\"Users\\\\USER\\\\\", key)\n hist.setdefault(key, set()).add(str(collection.urn)[6:6+18])\n except AttributeError:\n pass\n\n print \"%d collections done.\" % collections_done\n\n rk_list = sorted(hist.iteritems(), reverse=True, key=lambda (k, v): len(v))\n for rk, freq in rk_list:\n print \"%d %s\" % (len(freq), rk)\n\n return rk_list",
"def ip_frequencies(self):\n frequencies = {}\n for ip in self.data.keys():\n frequency = 0\n ip_info = self.data[ip] # Instance of IpInfo\n for line_type in ip_info.data.keys():\n if isinstance(ip_info.data[line_type], int):\n frequency += ip_info.data[line_type]\n else: # the None key\n frequency += len(ip_info.data[line_type])\n frequencies[ip] = frequency\n return frequencies",
"def estimate_number(self, client_users, healer):\n\t\tnow = settings.GET_NOW().date()\n\t\tappts = Appointment.objects.filter(client__user__in=client_users, healer=healer).before_date(now)\n\t\tappts_count = {}\n\t\tfor appt in appts:\n\t\t\tif not appt.client.user in appts_count:\n\t\t\t\tappts_count[appt.client.user] = 0\n\t\t\tif appt.is_single():\n\t\t\t\tappts_count[appt.client.user] += 1\n\t\t\telse:\n\t\t\t\tend_date = appt.end_date if appt.is_finite() and appt.end_date<now else now\n\t\t\t\tif appt.repeat_period == rrule.DAILY:\n\t\t\t\t\tappts_count[appt.client.user] += (end_date - appt.start_date).days\n\t\t\t\tif appt.repeat_period == rrule.WEEKLY:\n\t\t\t\t\tappts_count[appt.client.user] += (end_date - appt.start_date).days/7\n\t\treturn appts_count",
"def frequencies(seq): # real signature unknown; restored from __doc__\n return {}",
"def calculateFrequency(self):\n repeat = 0\n f =0.0\n with i2clib.I2CMaster() as b:\n results = b.transaction(\n reading(self.add, 5)\n )\n\n uF = results[0][0]&0x3F\n lF = results[0][1]\n # this is probably not the best way of doing this but I was having issues with the\n # frequency being off by as much as 1.5 MHz\n current_freq = round((float(round(int(((int(uF)<<8)+int(lF))*cof/4-22500)/100000)/10)-.2)*10)/10\n return current_freq",
"def GhoseCrippenFingerprint(mol, count=False):\n order, patts = _ReadPatts(\n os.path.dirname(os.path.abspath(__file__)) + \"/Crippen.txt\"\n )\n\n GCres = dict()\n for sma in patts:\n match = mol.GetSubstructMatches(patts[sma][0][1], False, False)\n temp = len([i[0] for i in match])\n GCres.update({sma: temp})\n\n res = {}\n if count == False:\n for i in GCres:\n if GCres[i] > 0:\n res.update({i: 1})\n else:\n res.update({i: 0})\n else:\n res = GCres\n\n return res",
"def calculate_query_idfi(self,query):\n \n query_dic = {q: {\n \"n_i\":query.count(q),\n \"idfi\": 0.0 if(self.archive['vocabulary'].get(q) and self.archive['vocabulary'][q]['n_i']>=len(self.archive['documents'])/2)\n else math.log((len(self.archive['documents'])- query.count(q) +0.5) /\n (query.count(q)+0.5),2)\n } for q in set(process_line(query,self.archive['stopwords']))}\n return query_dic",
"def generate_data_client(min=1000, max=1100):\n percentage = np.random.uniform()\n client_id = np.random.randint(min, max)\n return {\n \"clientid\": f\"{client_id}\".zfill(10),\n \"pageGender\": random.choices(['M', 'F'], [percentage, 1 - percentage])[0],\n #\"timestamp\": str(datetime.datetime.now())\n }",
"def generate_freq_dict(input_text):\n result_dict = {}\n for word in input_text:\n word = word.lower()\n if word not in result_dict:\n result_dict[word] = 1\n else:\n result_dict[word] += 1\n return(result_dict)",
"def freq_dict(self, text):\n freq = {}\n for char in text:\n if not char in freq:\n freq[char] = 0\n freq[char] += 1\n return freq",
"def get_freq(words):\r\n dic = {words[i]:0 for i in range(len(words))}\r\n for i in range(len(words)):\r\n dic[words[i]] = dic[words[i]] + 1\r\n return dic",
"def get_frequency(processed_text_list):\n # prop_dict - A dictionary of tokens and their respective proportions as a fraction of the total corpus\n # combined_dict - A dictionary whose values are both frequencies and proportions combined within a list\n # \"\"\"\n\n word_frequency = FreqDist(word for word in processed_text_list)\n\n# sorted_counts = sorted(word_frequency.items(), key = lambda x: x[1], reverse = True)\n# freq_dict = dict(sorted_counts)\n freq_dict = dict(word_frequency)\n# prop_dict = {key : freq_dict[key] * 1.0 / sum(freq_dict.values()) for key, value in freq_dict.items()}\n# combined_dict = {key : [freq_dict[key], freq_dict[key] * 1.0 / sum(freq_dict.values())] for key, value in freq_dict.items()}\n\n return freq_dict # , prop_dict, combined_dict",
"def getFlightDict():\n \n d1 = d2 = {}\n \n f = shelve.open(filename3) \n d1 = f\n for sk in d1.keys():\n k = int(sk) # convert the string key in the shelve\n d2[k] = d1[sk] # to an int for the dictionary\n\n return d2",
"def set_freq(self):\n for site, tags in self.words_by_site.items():\n self.word_frequency[site] = defaultdict(int)\n words = tags.split(\" \")\n for word in words:\n # Save words containing no punctuation characters.\n match = [char in word for char in string.punctuation]\n if all(m is False for m in match) and len(word) > 3:\n self.word_frequency[site][word] += 1\n dump_as_json(self.word_frequency, self.freqs_file_path)\n return self.word_frequency",
"def computeCountDict():\n countDict = {}\n # Run through each review's tf dictionary and increment countDict's (word, doc) pair\n for review in tfDict:\n # print(review)\n for word in review:\n if word in countDict:\n countDict[word] += 1\n else:\n countDict[word] = 1\n return countDict",
"def count_freq(self):\n freq = {}\n for n in self.ngrams:\n freq[n] = freq.get(n, 0) + 1\n return freq",
"def dist_by_genres(self):\n genres = {}\n for record in self.data:\n for genre in record[2]:\n genres[genre] = genres.setdefault(genre, 0) + 1\n tmp_genres = [[x, genres[x]] for x in genres]\n tmp_genres.sort(key=lambda x: -int(x[1]))\n return collections.OrderedDict(tmp_genres)",
"def make_freq_dict(s):\r\n s = normalize(s)\r\n words = s.split() \r\n d = {}\r\n for w in words:\r\n if w in d:\r\n d[w] +=1\r\n # print(w+' '+str(d[w]))\r\n else:\r\n d[w] =1 \r\n return d",
"def get_count_residu(liste):\n i = 0\n dico_count = {}\n for i in range(len(liste)) :\n if (str(liste[i][\"residu\"])) not in dico_count :\n dico_count[str(liste[i][\"residu\"])] = 1\n else :\n dico_count[str(liste[i][\"residu\"])] += 1\n i += 1\n return dico_count"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the lambda function alias data
|
def get_function_alias_data(function_name, env):
lambda_client = _get_lambda()
function_name = function_name.format(ENV=f'{env}')
function_alias_data = {
'FunctionName': f'{function_name}',
'Name': f'{env}'
}
function_alias_data = lambda_client.get_alias(**function_alias_data)
return function_alias_data
|
[
"def get_function_aliases(trend_type):\n return {\n \"trend_percentage()\": Alias(\n lambda aggregate_filter: [\n \"trend_percentage\",\n CORRESPONDENCE_MAP[aggregate_filter.operator]\n if trend_type == IMPROVED\n else aggregate_filter.operator,\n 1 + (aggregate_filter.value.value * (-1 if trend_type == IMPROVED else 1)),\n ],\n [\"percentage\", \"transaction.duration\"],\n ),\n \"trend_difference()\": Alias(\n lambda aggregate_filter: [\n \"trend_difference\",\n CORRESPONDENCE_MAP[aggregate_filter.operator]\n if trend_type == IMPROVED\n else aggregate_filter.operator,\n -1 * aggregate_filter.value.value\n if trend_type == IMPROVED\n else aggregate_filter.value.value,\n ],\n [\"minus\", \"transaction.duration\"],\n ),\n \"t_test()\": Alias(\n lambda aggregate_filter: [\n \"t_test\",\n aggregate_filter.operator,\n aggregate_filter.value.value,\n ],\n None,\n ),\n \"count_percentage()\": Alias(\n lambda aggregate_filter: [\n \"count_percentage\",\n aggregate_filter.operator,\n aggregate_filter.value.value,\n ],\n [\"percentage\", \"count\"],\n ),\n }",
"def get_lambda(self, volume):\n return self._lambda_dict[volume]",
"def getrawdata(self):\n return lambda wildcards: self.samples[wildcards.sample]",
"def invoked_function_arn(self):\n return 'arn:aws:lambda:{0}:{1}:function:func-name'.format(\n 'us-west-2', Utility.aws_account_id())",
"def alias():\n\n self = object.__new__(RpiDns)\n self.__init__()\n self.cur.execute(\"SELECT alias FROM dns\")\n data = tuple((x[0] for x in self.cur.fetchall()))\n self.close()\n\n return data",
"def get_aliases(self):\n return self.aliases",
"def get_ast_data(self, orig_sig, token_value_key=None):\n dict_keys = self._get_dict_keys(orig_sig, token_value_key)\n return self.eval_fun_dict.get(dict_keys[0], {}).get(dict_keys[1], None)",
"def compiled_function(self):\n try:\n return self._compiled_function\n except AttributeError:\n from .general import NameLookUp\n arg_defs = ', '.join(NameLookUp.pythonize_name(name) for name in self.used_variable_list)\n f = self._compiled_function = eval('lambda ' + arg_defs + ': ' + self.compiled, self.eval_globals)\n return f",
"def _gen_alias(self):\n ss = '__Alias{0}'.format(self.anon_alias_ix)\n self.anon_alias_ix += 1\n return ss",
"def get_all_lambdas():\n return lc.list_functions()['Functions']",
"def get_function_name_at(self, _ea):\n\t\treturn GetFunctionName(_ea)",
"def func_info(self):\n return self._func_info",
"def alias(self):\n return self.sys_info['alias']",
"def get_numpy_function_call(self, spec, params, alias=None):\n function = spec['callee']['name']\n if alias is not None and alias != function:\n function = alias\n\n function = self.translate_functions[\n function] if function in self.translate_functions else function\n # Evaluates if column name is wrapped in a col() function call\n arguments = ', '.join(\n [self.parse(x, params) for x in spec['arguments']])\n # function_name = spec['callee']['name']\n result = \" np.{}({})\".format(function, arguments)\n return result",
"def rgetLambda(self):\n return _core.CGPSumCache_rgetLambda(self)",
"def extract_lambda_source(f):\n argspec = getfullargspec(f)\n arg_strings = []\n # In Python 2 you can have destructuring arguments to functions. This\n # results in an argspec with non-string values. I'm not very interested in\n # handling these properly, but it's important to not crash on them.\n bad_lambda = False\n for a in argspec.args:\n if isinstance(a, (tuple, list)): # pragma: no cover\n arg_strings.append(\"(%s)\" % (\", \".join(a),))\n bad_lambda = True\n else:\n assert isinstance(a, str)\n arg_strings.append(a)\n if argspec.varargs:\n arg_strings.append(\"*\" + argspec.varargs)\n elif argspec.kwonlyargs:\n arg_strings.append(\"*\")\n for a in argspec.kwonlyargs or []:\n default = (argspec.kwonlydefaults or {}).get(a)\n if default:\n arg_strings.append(\"{}={}\".format(a, default))\n else:\n arg_strings.append(a)\n\n if arg_strings:\n if_confused = \"lambda %s: <unknown>\" % (\", \".join(arg_strings),)\n else:\n if_confused = \"lambda: <unknown>\"\n if bad_lambda: # pragma: no cover\n return if_confused\n try:\n source = inspect.getsource(f)\n except IOError:\n return if_confused\n\n source = LINE_CONTINUATION.sub(\" \", source)\n source = WHITESPACE.sub(\" \", source)\n source = source.strip()\n assert \"lambda\" in source\n\n tree = None\n\n try:\n tree = ast.parse(source)\n except SyntaxError:\n for i in hrange(len(source) - 1, len(\"lambda\"), -1):\n prefix = source[:i]\n if \"lambda\" not in prefix:\n break\n try:\n tree = ast.parse(prefix)\n source = prefix\n break\n except SyntaxError:\n continue\n if tree is None:\n if source.startswith(\"@\"):\n # This will always eventually find a valid expression because\n # the decorator must be a valid Python function call, so will\n # eventually be syntactically valid and break out of the loop. Thus\n # this loop can never terminate normally, so a no branch pragma is\n # appropriate.\n for i in hrange(len(source) + 1): # pragma: no branch\n p = source[1:i]\n if \"lambda\" in p:\n try:\n tree = ast.parse(p)\n source = p\n break\n except SyntaxError:\n pass\n\n if tree is None:\n return if_confused\n\n all_lambdas = extract_all_lambdas(tree)\n aligned_lambdas = [l for l in all_lambdas if args_for_lambda_ast(l) == argspec.args]\n if len(aligned_lambdas) != 1:\n return if_confused\n lambda_ast = aligned_lambdas[0]\n assert lambda_ast.lineno == 1\n\n # If the source code contains Unicode characters, the bytes of the original\n # file don't line up with the string indexes, and `col_offset` doesn't match\n # the string we're using. We need to convert the source code into bytes\n # before slicing.\n #\n # Under the hood, the inspect module is using `tokenize.detect_encoding` to\n # detect the encoding of the original source file. We'll use the same\n # approach to get the source code as bytes.\n #\n # See https://github.com/HypothesisWorks/hypothesis/issues/1700 for an\n # example of what happens if you don't correct for this.\n #\n # Note: if the code doesn't come from a file (but, for example, a doctest),\n # `getsourcefile` will return `None` and the `open()` call will fail with\n # an OSError. Or if `f` is a built-in function, in which case we get a\n # TypeError. In both cases, fall back to splitting the Unicode string.\n # It's not perfect, but it's the best we can do.\n #\n # Note 2: You can only detect the encoding with `tokenize.detect_encoding`\n # in Python 3.2 or later. But that's okay, because the only version that\n # affects for us is Python 2.7, and 2.7 doesn't support non-ASCII identifiers:\n # https://www.python.org/dev/peps/pep-3131/. In this case we'll get an\n # TypeError again because we set detect_encoding to None above.\n #\n try:\n with open(inspect.getsourcefile(f), \"rb\") as src_f:\n encoding, _ = detect_encoding(src_f.readline)\n\n source_bytes = source.encode(encoding)\n source_bytes = source_bytes[lambda_ast.col_offset :].strip()\n source = source_bytes.decode(encoding)\n except (OSError, TypeError, IOError):\n source = source[lambda_ast.col_offset :].strip()\n\n # This ValueError can be thrown in Python 3 if:\n #\n # - There's a Unicode character in the line before the Lambda, and\n # - For some reason we can't detect the source encoding of the file\n #\n # because slicing on `lambda_ast.col_offset` will account for bytes, but\n # the slice will be on Unicode characters.\n #\n # In practice this seems relatively rare, so we just give up rather than\n # trying to recover.\n try:\n source = source[source.index(\"lambda\") :]\n except ValueError:\n return if_confused\n\n for i in hrange(len(source), len(\"lambda\"), -1): # pragma: no branch\n try:\n parsed = ast.parse(source[:i])\n assert len(parsed.body) == 1\n assert parsed.body\n if isinstance(parsed.body[0].value, ast.Lambda):\n source = source[:i]\n break\n except SyntaxError:\n pass\n lines = source.split(\"\\n\")\n lines = [PROBABLY_A_COMMENT.sub(\"\", l) for l in lines]\n source = \"\\n\".join(lines)\n\n source = WHITESPACE.sub(\" \", source)\n source = SPACE_FOLLOWS_OPEN_BRACKET.sub(\"(\", source)\n source = SPACE_PRECEDES_CLOSE_BRACKET.sub(\")\", source)\n source = source.strip()\n return source",
"def get(self, name):\n\n return self._aliases[name]",
"def getAliases(self):\n return self.__aliases;",
"def lambda_(self) -> float:\n return self._lambda"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the lambda function configuration and package to clone and saved in a pickle file
|
def pull(function_name: str, env: str):
try:
lambda_client = _get_lambda()
function_name = function_name.format(ENV=f'{env}')
function_alias_data = get_function_alias_data(function_name, f'{env}')
function_data = lambda_client.get_function(
FunctionName=f"{function_alias_data['AliasArn']}",
Qualifier=f'{env}'
)
function_data.pop("ResponseMetadata")
code_url = function_data.pop("Code")["Location"]
http = urllib3.PoolManager()
response = http.request("GET", code_url)
if not 200 <= response.status < 300:
raise Exception(f"Failed to download function code: {response}")
function_code = response.data
new_function_data = {
n: v
for n, v in function_data["Configuration"].items()
if n in (
"Runtime",
"Role",
"Handler",
"Description",
"Timeout",
"MemorySize",
"Publish",
"Environment",
)
}
# Provide function code zip data
new_function_data["Code"] = {"ZipFile": function_code}
with open(os.path.join(dir_path, "dict.pickle"), "wb+") as pickle_out:
pickle.dump(new_function_data, pickle_out)
except Exception as e:
print(e)
|
[
"def build_archive(mod, cache):\n\n mod_pathname = os.path.abspath(os.path.dirname(__file__) + \"/../lambdas/{}.py\".format(mod))\n awsflow_basedir = os.path.abspath(os.path.dirname(__file__) + \"/../../\")\n\n pkg_dir_suffix = \".lambda\"\n\n if cache:\n # Instead of generating a new temporary directory, reuse the existing one if existing,\n # so that we can avoid re-downloading all the dependencies again. this saves lots of time.\n # The cache is valid for any lamda function defined internally in the awsflow package.\n pkg_dir = \"/tmp/awsflow{}-{}\".format(pkg_dir_suffix, cache)\n\n # check if package directory is empty.\n pkg_dir_empty = not os.path.exists(pkg_dir)\n\n # make sure that the directory exists.\n local(\"mkdir -p {}\".format(pkg_dir))\n else:\n pkg_dir = mkdtemp(pkg_dir_suffix)\n\n logging.info(\"Assembling archive for lambda function ...\")\n\n local('cp {mod_pathname} {pkg_dir}'.format(mod_pathname=mod_pathname, pkg_dir=pkg_dir))\n\n if not cache or pkg_dir_empty:\n local('pip-3.6 install {awsflow_basedir} --find-links {awsflow_basedir} --target {pkg_dir} --upgrade'.format(\n awsflow_basedir=awsflow_basedir, pkg_dir=pkg_dir))\n else:\n logging.info(\"Using cached package directory\")\n\n local('cp -r {awsflow_basedir}/awsflow {pkg_dir}'.format(awsflow_basedir=awsflow_basedir,\n pkg_dir=pkg_dir))\n make_archive(base_name=pkg_dir, format='zip', root_dir=pkg_dir)\n\n logging.info(\"Archive ready.\")\n\n archive_contents = open('{}.zip'.format(pkg_dir), \"rb\").read()\n\n if not cache:\n local(\"rm -rf {pkg_dir}.zip {pkg_dir}\".format(pkg_dir=pkg_dir))\n\n return archive_contents",
"def _generate_lambda(appName, _lambda, roleARN, config, projPath):\n\n if( not os.path.exists(projPath+'/.tmp') ):\n os.mkdir(projPath+'/.tmp')\n\n if( not os.path.isfile(projPath+'/.tmp/dist.zip') ):\n AWSSetup._compress_app_package(\n projPath+'/.tmp/dist',\n projPath+'/.tmp/dist.zip',\n ['.git/']\n )\n\n funcName = appName+'-uxy-app-'+config['app:stage']\n zipFile = open(projPath+'/.tmp/dist.zip', 'rb')\n zipFileBin = zipFile.read()\n zipFile.close()\n\n statusCode = AWSSetup._function_exists(funcName, _lambda)\n if( statusCode == AWSSetup.FUNCTION_NOT_FOUND ):\n runtime = None\n if( config['app:runtime'] == 'go' ):\n runtime = 'go1.x'\n if( config['app:runtime'] == 'python' ):\n runtime = 'python3.9'\n\n AWSSetup._log(\"+ Creating lambda function...\")\n AWSSetup._log(\"+ Runtime: \"+runtime)\n response = _lambda.create_function(\n FunctionName = funcName,\n Runtime = runtime,\n Role = roleARN,\n Handler = config['aws:config']['lambda:handler'],\n Code = {\n 'ZipFile' : zipFileBin\n },\n Timeout = config['aws:config']['lambda:timeout']\n )\n AWSSetup._log(\"=> Lambda package deployed\")\n AWSSetup._add_function_permission(appName, _lambda, config)\n elif ( statusCode == AWSSetup.FUNCTION_FOUND ):\n AWSSetup._log('+ Updating lambda function...')\n response = _lambda.update_function_code(\n FunctionName = funcName,\n ZipFile = zipFileBin\n )\n AWSSetup._log(\"=> Lambda package deployed\")\n AWSSetup._add_function_permission(appName, _lambda, config)\n else:\n AWSSetup._log('=> ERROR: error getting lambda function')\n response = {}\n\n\n return response",
"def build_lambda():\n\n try:\n os.system(\"mkdir -p ./build\")\n os.system(\"cp -r ./lambda ./build\")\n os.system(\"pip3 install -r ./build/lambda/requirements.txt -t ./build/lambda\")\n shutil.make_archive(\"./build/lambda\", 'zip', \"./build/lambda\")\n os.system(\"rm -rf ./build/lambda\")\n\n print(\"Lambda deployment package built!\")\n\n except Exception as e:\n print(f\"Error building deployment package. Exception: {e}.\")",
"def build(function_name=None):\n if not function_name:\n abort('Must provide function_name')\n\n lambda_root = os.path.join(LAMBDA_DIR, function_name)\n module_dir = os.path.join(lambda_root, function_name)\n lambda_config_dir = os.path.join(lambda_root, LAMBDA_CONFIG_SUBDIR)\n staging_dir = os.path.join(lambda_root, STAGING_SUBDIR)\n builds_dir = os.path.join(lambda_root, BUILDS_SUBDIR)\n build_filename = '{0}-{1}.zip'.format(\n datetime.datetime.now().isoformat().replace(':', '.'), function_name)\n\n # Erase previous runs of the build task.\n local('rm -rf {0}'.format(staging_dir))\n\n # Set up staging and builds directories.\n local('mkdir -p {0}'.format(staging_dir))\n local('mkdir -p {0}'.format(builds_dir))\n\n # Install the lambda specific requirements.\n local('pip install -r {0}/requirements.txt -t {1}'.format(lambda_root, staging_dir))\n\n # Copy the top level *.py (e.g. index.py) and lambda_config dir into the staging_dir.\n local('cp -R {0}/*.py {1}'.format(lambda_root, staging_dir))\n local('cp -R {0} {1}'.format(lambda_config_dir, staging_dir))\n\n # Copy the module directory into the staging dir.\n local('cp -R {0} {1}'.format(module_dir, staging_dir))\n\n # Zip the whole thing up, and move it to the builds dir.\n local('cd {0}; zip -r {1} ./*; mv {1} {2}'.format(staging_dir, build_filename, builds_dir))",
"def store_lambda_constants(self):\n with open (\"lambda/constants.json\", \"w\") as constants_file:\n constants_file.write(json.dumps(self.constants, indent=4,\n sort_keys=True))",
"def zip_lambda():\n # Don't zip these files\n ignore_files = [\"controller.zip\", \"role_policy.json\"] \n \n # Zip the files and store them in a buffer\n zip_data = BytesIO()\n zipf = zipfile.ZipFile(zip_data, \"w\")\n for root, dirs, files in os.walk(\"lambda\"):\n for fl in files:\n if fl not in ignore_files:\n path_to_file = os.path.join(root, fl)\n file_key = path_to_file[7:]\n zipf.write(path_to_file, arcname=file_key)\n zipf.close()\n \n # Write the buffer to a variable and return it\n zip_data.seek(0)\n data = zip_data.read()\n zip_data.close()\n return data",
"def lambda_module(request):\n\n # Inject environment variables\n backup_environ = {}\n for key, value in request.param.get(\"environ\", {}).items():\n if key in os.environ:\n backup_environ[key] = os.environ[key]\n os.environ[key] = value\n\n # Add path for Lambda function\n sys.path.insert(\n 0,\n os.path.join(\n os.environ[\"LAMBDA_DIR\"],\n request.param[\"function_dir\"],\n ),\n )\n\n # Save the list of previously loaded modules\n prev_modules = list(sys.modules.keys())\n\n # Return the function module\n module = importlib.import_module(request.param[\"module_name\"])\n yield module\n\n # Delete newly loaded modules\n new_keys = list(sys.modules.keys())\n for key in new_keys:\n if key not in prev_modules:\n del sys.modules[key]\n\n # Delete function module\n del module\n\n # Remove the Lambda function from path\n sys.path.pop(0)\n\n # Restore environment variables\n for key in request.param.get(\"environ\", {}).keys():\n if key in backup_environ:\n os.environ[key] = backup_environ[key]\n else:\n del os.environ[key]",
"def upload_lambda():\n\n s3 = session.resource('s3')\n\n try:\n s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\").upload_file('./build/lambda.zip', 'lambda.zip')\n print(\"Lambda deployment package uploaded to S3!\")\n\n except Exception as e:\n print(f\"Error uploading deployment package. Exception: {e}.\")",
"def download_lambda_handler(event: Dict[str, Any], _) -> str:\n LOGGER.info('Invoked with event %s', event)\n\n binary = CARBON_BLACK.select(Binary, event['md5'])\n download_path = _download_from_carbon_black(binary)\n metadata = _build_metadata(binary)\n s3_object_key = _upload_to_s3(binary.md5, download_path, metadata)\n\n # Truncate and remove the downloaded file (os.remove does not work as expected in Lambda).\n with open(download_path, 'w') as file:\n file.truncate()\n os.remove(download_path)\n\n return s3_object_key",
"def task_update(mod, func, cache):\n\n cli = boto3.client('lambda')\n\n archive_contents = build_archive(mod, cache)\n\n try:\n res = cli.update_function_code(FunctionName=func,\n ZipFile=archive_contents)\n except Exception as e:\n fatal(\"Operation failed: {}\".format(e))\n\n logging.info(\"Operation completed: {}\".format(res[\"FunctionArn\"]))",
"def backup_config():\n global backup_filepath, kwargs_backup\n kwargs_backup = sg_kwargs.copy()\n if sg_kwargs.get(\"title\"):\n del sg_kwargs[\"title\"]\n backup_filepath = Package.config_filepath.with_name(\"config_backup.json\")\n try:\n Package.config_filepath.replace(backup_filepath)\n print(f\"\\n ⓘ config.json moved to {backup_filepath}\")\n except FileNotFoundError:\n print(\n f\"\\n ⓘ {Package.config_filepath} doesn't exist yet - no backup required.\"\n )",
"def update_lambda():\n\n client = session.client('lambda')\n\n try:\n client.update_function_code(\n FunctionName='process_csv',\n S3Key='lambda.csv',\n S3Bucket=f\"lambda-source-{os.environ['AWS_ACCOUNT']}\",\n Publish=True\n )\n print(\"Lambda function published!\")\n\n except Exception as e:\n print(f\"Error publishing lambda. Exception: {e}.\")",
"def create_lambda_zip(self, prefix='lambda_package', handler_file=None,\n minify=True, exclude=None, use_precompiled_packages=True, include=None, venv=None):\n import pip\n\n print(\"Packaging project as zip...\")\n\n if not venv:\n if 'VIRTUAL_ENV' in os.environ:\n venv = os.environ['VIRTUAL_ENV']\n elif os.path.exists('.python-version'): # pragma: no cover\n logger.debug(\"Pyenv's local virtualenv detected.\")\n try:\n subprocess.check_output('pyenv', stderr=subprocess.STDOUT)\n except OSError:\n print(\"This directory seems to have pyenv's local venv\"\n \"but pyenv executable was not found.\")\n with open('.python-version', 'r') as f:\n env_name = f.read()[:-1]\n logger.debug('env name = {}'.format(env_name))\n bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')\n venv = bin_path[:bin_path.rfind(env_name)] + env_name\n logger.debug('env path = {}'.format(venv))\n else: # pragma: no cover\n print(\"Zappa requires an active virtual environment.\")\n quit()\n\n cwd = os.getcwd()\n zip_fname = prefix + '-' + str(int(time.time())) + '.zip'\n zip_path = os.path.join(cwd, zip_fname)\n\n # Files that should be excluded from the zip\n if exclude is None:\n exclude = list()\n\n # Exclude the zip itself\n exclude.append(zip_path)\n\n def splitpath(path):\n parts = []\n (path, tail) = os.path.split(path)\n while path and tail:\n parts.append(tail)\n (path, tail) = os.path.split(path)\n parts.append(os.path.join(path, tail))\n return map(os.path.normpath, parts)[::-1]\n split_venv = splitpath(venv)\n split_cwd = splitpath(cwd)\n\n # Ideally this should be avoided automatically,\n # but this serves as an okay stop-gap measure.\n if split_venv[-1] == split_cwd[-1]: # pragma: no cover\n print(\n \"Warning! Your project and virtualenv have the same name! You may want \"\n \"to re-create your venv with a new name, or explicitly define a \"\n \"'project_name', as this may cause errors.\"\n )\n\n # First, do the project..\n temp_project_path = os.path.join(tempfile.gettempdir(), str(int(time.time())))\n\n if minify:\n excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]\n copytree(cwd, temp_project_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(cwd, temp_project_path, symlinks=False)\n\n # Then, do the site-packages..\n temp_package_path = os.path.join(tempfile.gettempdir(), str(int(time.time() + 1)))\n if os.sys.platform == 'win32':\n site_packages = os.path.join(venv, 'Lib', 'site-packages')\n else:\n site_packages = os.path.join(venv, 'lib', 'python2.7', 'site-packages')\n if minify:\n excludes = ZIP_EXCLUDES + exclude\n copytree(site_packages, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(site_packages, temp_package_path, symlinks=False)\n\n # We may have 64-bin specific packages too.\n site_packages_64 = os.path.join(venv, 'lib64', 'python2.7', 'site-packages')\n if os.path.exists(site_packages_64):\n if minify:\n excludes = ZIP_EXCLUDES + exclude\n copytree(site_packages_64, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(site_packages_64, temp_package_path, symlinks=False)\n\n copy_tree(temp_package_path, temp_project_path, update=True)\n\n # Then the pre-compiled packages..\n if use_precompiled_packages:\n installed_packages_name_set = {package.project_name.lower() for package in\n pip.get_installed_distributions()}\n\n for name, details in lambda_packages.items():\n if name.lower() in installed_packages_name_set:\n tar = tarfile.open(details['path'], mode=\"r:gz\")\n for member in tar.getmembers():\n # If we can, trash the local version.\n if member.isdir():\n shutil.rmtree(os.path.join(temp_project_path, member.name), ignore_errors=True)\n continue\n\n tar.extract(member, temp_project_path)\n\n # If a handler_file is supplied, copy that to the root of the package,\n # because that's where AWS Lambda looks for it. It can't be inside a package.\n if handler_file:\n filename = handler_file.split(os.sep)[-1]\n shutil.copy(handler_file, os.path.join(temp_project_path, filename))\n\n # Then zip it all up..\n try:\n # import zlib\n compression_method = zipfile.ZIP_DEFLATED\n except ImportError: # pragma: no cover\n compression_method = zipfile.ZIP_STORED\n\n zipf = zipfile.ZipFile(zip_path, 'w', compression_method)\n for root, dirs, files in os.walk(temp_project_path):\n\n for filename in files:\n\n # If there is a .pyc file in this package,\n # we can skip the python source code as we'll just\n # use the compiled bytecode anyway..\n if filename[-3:] == '.py':\n abs_filname = os.path.join(root, filename)\n abs_pyc_filename = abs_filname + 'c'\n if os.path.isfile(abs_pyc_filename):\n\n # but only if the pyc is older than the py,\n # otherwise we'll deploy outdated code!\n py_time = os.stat(abs_filname).st_mtime\n pyc_time = os.stat(abs_pyc_filename).st_mtime\n\n if pyc_time > py_time:\n continue\n\n zipf.write(os.path.join(root, filename), os.path.join(root.replace(temp_project_path, ''), filename))\n\n if '__init__.py' not in files:\n tmp_init = os.path.join(temp_project_path, '__init__.py')\n open(tmp_init, 'a').close()\n zipf.write(tmp_init, os.path.join(root.replace(temp_project_path, ''), os.path.join(root.replace(temp_project_path, ''), '__init__.py')))\n\n # And, we're done!\n zipf.close()\n\n # Trash the temp directory\n shutil.rmtree(temp_project_path)\n shutil.rmtree(temp_package_path)\n\n # Warn if this is too large for Lambda.\n file_stats = os.stat(zip_path)\n if file_stats.st_size > 52428800: # pragma: no cover\n print(\"\\n\\nWarning: Application zip package is likely to be too large for AWS Lambda.\\n\\n\")\n\n return zip_fname",
"def restore_config():\n global sg_kwargs\n sg_kwargs = kwargs_backup.copy()\n backup_filepath.replace(Package.config_filepath)\n print(\"\\n ⓘ Original config.json restored.\")",
"def update_lambda(self):\n\n try:\n AWSSetup._update_lambda(self.appName, self._lambda, self.config)\n except Exception as e:\n AWSSetup._log(str(e))\n AWSSetup._log(\"Failed to update application code.\")",
"def test_observation_pickle(observation_function, model):\n obs = make_obs(observation_function, model)\n blob = pickle.dumps(obs)\n obs_copy = pickle.loads(blob)",
"def to_pickle(self): # pragma: no cover\n raise NotImplementedError(\n \"Pickling is not implemented for FunctionNode. \"\n \"Consider subclassing flowpipe.node.INode to pickle nodes.\"\n )",
"def get_function_configuration(self):\n try:\n logging.info(GET_LAMBDA_FUNCTION_CONFIG)\n response = self.lambda_client.get_function_configuration(\n FunctionName=self.function_name\n )\n logging.debug(json.dumps(response))\n del response[\"ResponseMetadata\"]\n return json.dumps(response, indent=4)\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n return False",
"def lambda_handler(event, context):\n \n try:\n repo_name = event['Records'][0]['eventSourceARN'].split(':')[-1]\n reference = event['Records'][0]['codecommit']['references'][0]\n commit_id = reference['commit']\n ref = os.path.split(reference[\"ref\"])\n root = os.path.basename(ref[0])\n created = reference.get(\"created\")\n deleted = reference.get(\"deleted\")\n if created and root == \"heads\" and ref[1] and ref[1] != \"master\":\n data = json.loads(event['Records'][0]['customData'])\n logger.info('Putting updates trigger for branch %s' % ref[1])\n put_trigger(repo_name, ref[1], data)\n pipeline_name = data[\"pipeline_name\"]\n bucket = data[\"bucket\"]\n logger.info('Getting and archiving codecommit repository content')\n codecommit = AWSCodeCommit(cc_client, repo_name, logger)\n commit_info = cc_client.get_commit(\n repositoryName=repo_name, \n commitId=commit_id\n )\n commit_info['commit']['branchName'] = ref[1]\n commit_info['commit']['RepositoryName'] = repo_name\n codecommit.archive(commit_id, {\"commit_info.json\": json.dumps(commit_info, indent=4)})\n s3_client.put_object(Bucket=bucket,\n Key=\"artifacts/%s\" % pipeline_name,\n Body=codecommit.content)\n logger.info('Starting pipeline execution')\n cp_client.start_pipeline_execution(name=pipeline_name)\n if deleted and root == \"heads\" and ref[1] and ref[1] != \"master\":\n logger.info('Poping updates trigger for branch %s' % ref[1])\n pop_trigger(repo_name, ref[1])\n except Exception as e:\n logger.exception(\"An error occured when processing codecommit trigger event : %s\" % str(e), exc_info=1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Breaks it wordwise when going further than ``limit`` chars.
|
def text_wrap(*args, delimiter=' ', end='\n', limit=80):
output = delimiter.join(args)
lines = output.splitlines(keepends=True)
results = []
for line in lines:
curr_print = line
while len(curr_print.rstrip('\n')) > limit:
splitpos = curr_print[:limit].rfind(' ')
if splitpos < 0:
# Word too long, search for a space left from limit at least
splitpos = curr_print.find(' ')
if splitpos < 0:
break # Break out and add the long thing in the next line
results.append(curr_print[:splitpos])
curr_print = curr_print[splitpos+1:]
results.append(curr_print)
return results
|
[
"def max_num_words(original:str, limit:str):\r\n words = original.split()\r\n if len(words) <= int(limit):\r\n return True\r\n else:\r\n return False",
"def smart_split(text, limit=100):\n prefix = \"\"\n while text:\n chunk = text[:limit]\n text = text[limit:]\n\n if not any(chunk.endswith(x) for x in [\" \", \"\\t\", \"\\n\"]):\n try:\n chunk, prefix = chunk.rsplit(maxsplit=1)\n except ValueError:\n pass\n else:\n text = prefix + text\n if len(\" \".join([chunk, text])) <= limit:\n chunk = \" \".join([chunk, text])\n text = \"\"\n\n yield chunk.strip()",
"def BreakString( s, maxLen ):\n\n quot, rem = divmod( len( s ), maxLen )\n r = [ s[i*maxLen:(i+1)*maxLen] for i in range( quot ) ]\n if len( s ) % maxLen:\n r.append( s[-rem:] )\n\n return r",
"def _truncate(s, limit): \n\ts = force_unicode(s) \n\tif len(s) <= limit: \n\t\treturn s \n\treturn '%s...' % s[:max(1, limit - 3)] \n\ttruncate = allow_lazy(truncate, unicode)",
"def smart_truncate(text, limit=100, suffix='...'):\n if len(text) <= limit:\n return text\n\n return text[:limit].rsplit(' ', 1)[0]+suffix",
"def truncate(self, limit):\n if self.length() > limit:\n return self.normalize() * limit\n return self",
"def drop_long_words(string, num=12):\n new_word = []\n for word in string.split():\n if len(word) <= num:\n new_word.append(word)\n new_word = \" \".join(new_word)\n return new_word",
"def truncate(text, max_len=250, suffix=\"...\"):\n stripped = text.strip()\n if len(stripped) <= max_len:\n return stripped\n substr = stripped[0:max_len + 1]\n words = \" \".join(re.split(r\"\\s+\", substr)[0:-1])\n return words + suffix",
"def truncate(msg, limit):\n if len(msg) <= limit:\n return msg\n\n half = limit // 2\n return '\\n'.join([\n msg[:half],\n '...%d characters truncated...' % (len(msg) - limit), msg[-half:]\n ])",
"def LimitString(length=80, endchar='...'):\n def _Limit(string, length=length, endchar=endchar):\n if len(string) > length:\n return string[:length] + endchar\n return string\n return _Limit",
"def trim_words(s, max_chars, separator=\" \"):\n\n if max_chars and len(s) >= max_chars:\n head, sep, tail = s[:max_chars].rpartition(separator)\n return (head or tail) + \"...\"\n\n return s",
"def cut_words(lb, max_len=20):\n words = lb.split(', ')\n new_lb = ''\n for word in words:\n if len(new_lb + ', ' + word) > max_len:\n break\n new_lb += ', ' + word\n new_lb = new_lb[2 :]\n\n if len(new_lb) == 0:\n new_lb = words[0]\n \n return new_lb",
"def scale_to_word_width(img, word, limit):\n width = min(limit, len(word) * average_char_width)\n\n return scale_to_width(img, width)",
"def wrap(text, maxlen=76, wrapstr=\" \"):\n\n assert \"\\n\" not in text\n return wrapstr + wrapstr.join([text[0 + i:maxlen + i]\n for i in range(0, len(text), maxlen)])",
"def breakline(text,W=0):\n if W == 0: W,Y = console.getTerminalSize()\n final_text = ''\n current_sentence = ''\n for w in text.split():\n if len(current_sentence+w) >= W:\n final_text += current_sentence + '\\n'\n current_sentence = ''\n else: \n current_sentence += w + ' '\n return final_text",
"def give_truncator(lim):\n\n def truncator(val):\n \"\"\"this closure based function truncates a string (val)\n to a max limit of lim\"\"\"\n return val[:lim]\n return truncator",
"def wordWrap (text, lineWidth, gc):\r\n words = text.split()\r\n lines = []\r\n currentWidth = 0\r\n currentLine = ''\r\n \r\n for word in words:\r\n wordWidth = gc.GetTextExtent(word + ' ')[0]\r\n if currentWidth + wordWidth < lineWidth:\r\n currentLine += word + ' '\r\n currentWidth += wordWidth\r\n else:\r\n lines.append(currentLine)\r\n currentLine = word + ' '\r\n currentWidth = wordWidth\r\n \r\n lines.append(currentLine)\r\n return lines",
"def truncate_text(text, max_chars):\n\n if len(text) > max_chars: # If the power is more then 12 characters cut it to 10 and add '...'\n new_text = text[0:(max_chars - 2)] + '...'\n else:\n new_text = text\n return new_text",
"def ellipsize(str, words=14, chars=140):\n split = str.split()\n if len(split) <= words and len(str) <= chars:\n return str\n return ' '.join(split[:words])[:chars-3] + '...'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove columns which have a single label for the entire input dataset, i.e. categories that have only zeros or only ones.
|
def remove_non_informative_categories(categories_df):
columns_only_zeros = categories_df.columns[categories_df.sum(axis=0) == 0].values
columns_only_ones = categories_df.columns[categories_df.sum(axis=0) == len(categories_df)].values
categories_df = categories_df.drop(columns=np.concatenate((columns_only_ones, columns_only_zeros)), axis=1)
return categories_df
|
[
"def drop_constant_columns(df):\n\n cols=df.columns\n counts=[[],[]]\n for c in cols:\n typ = df[c].dtypes\n uniq = len(df[c].unique())\n if uniq == 2 and typ == np.float64:\n counts[1].append(c)\n elif uniq == 1:\n counts[0].append(c)\n print('Constant Column Count: {} \\nBinary Column Count: {} \\n'.format(*[len(c) for c in counts]))\n print('Dropped Constant columns: ')\n print(*counts[0],sep = \", \")\n print('\\nDropped Binary columns: ') # Binary olmadigi icin silinebilir\n print(*counts[1],sep = \", \")\n\n df=df.drop(columns=counts[0])\n df=df.drop(columns=counts[1])\n print(\"\\nShape: \",df.shape)\n \n return(df)",
"def strip_labels(data: Union[pd.DataFrame, pd.Series], label_col: Optional[str] = None) -> np.ndarray:\n # TODO change to dataframe column\n if label_col is None:\n label_col = \"label\"\n if isinstance(data, pd.DataFrame):\n data = data.index.get_level_values(label_col)\n return np.array(data)",
"def trim_zero_columns(self):\n try:\n self.coulomb_column_array = self.coulomb_column_array[:, (self.coulomb_column_array == 0).sum(axis=0) != self.coulomb_column_array.shape[0]]\n except AttributeError:\n pass",
"def remove_labels(self, indices):\n _labels, mask = removeind(self, \"_labels\", indices)\n self._register_labels(_labels)\n return mask",
"def cont(X):\n if not hasattr(X, \"dtypes\"):\n raise AttributeError(\"Not a Pandas DataFrame with 'dtypes' as attribute!\")\n return X.dtypes != \"category\"",
"def testWithNoLabel(self):\n record_defaults = [\n constant_op.constant([], dtypes.int32),\n constant_op.constant([], dtypes.int64),\n constant_op.constant([], dtypes.float32),\n constant_op.constant([], dtypes.float64),\n constant_op.constant([], dtypes.string)\n ]\n\n column_names = [\"col%d\" % i for i in range(5)]\n inputs = [[\",\".join(x for x in column_names), \"0,1,2,3,4\", \"5,6,7,8,9\"], [\n \",\".join(x for x in column_names), \"10,11,12,13,14\", \"15,16,17,18,19\"\n ]]\n expected_output = [[0, 1, 2, 3, b\"4\"], [5, 6, 7, 8, b\"9\"],\n [10, 11, 12, 13, b\"14\"], [15, 16, 17, 18, b\"19\"]]\n\n self._test_dataset(\n inputs,\n expected_output=expected_output,\n expected_keys=column_names,\n column_names=column_names,\n batch_size=1,\n num_epochs=1,\n shuffle=False,\n header=True,\n column_defaults=record_defaults,\n )",
"def cut_labels(labels, min_hits=3):\n\n new_labels = labels.copy()\n\n unique, counts = numpy.unique(labels, return_counts=True)\n for lab in unique[counts < min_hits]:\n new_labels[new_labels == lab] = -1\n\n return new_labels",
"def test_drop_single_label(self):\n self.stack.drop(\"lsat7_2002_70@PERMANENT\", in_place=True)\n self.assertListEqual(self.stack.names, self.predictors[0:5])",
"def cut_labels(self, labels, min_hits=3):\n\n new_labels = labels.copy()\n\n unique, counts = numpy.unique(labels, return_counts=True)\n for lab in unique[counts < min_hits]:\n new_labels[new_labels == lab] = -1\n\n return new_labels",
"def clean_dataset(df, subset=None, drop_cols=False, thresh=0.7):\n \n df = df.dropna(how='all', subset=subset)\n df = df.dropDuplicates(subset)\n \n if drop_cols:\n total_row_count = df.count()\n col_nulls_df = df.select([count(when(isnan(c) | col(c).isNull(), c)).alias(c) for c in df.columns]).toPandas()\n col_nulls_df = pd.melt(col_nulls_df, var_name='Column Name', value_name='Null Count')\n col_nulls_df['Ratio to total'] = (col_nulls_df['Null Count']/total_row_count).round(3)\n cols = col_nulls_df.loc[col_nulls_df['Ratio to total'] > 0.7].columns.tolist()\n df = df.drop(*cols)\n \n return df",
"def drop_constant_columns(dataframe):\n keep_columns = [col for col in dataframe.columns if len(dataframe[col].unique()) > 1]\n return dataframe[keep_columns].copy()",
"def _onehot_labels(self, labels):\n labels_full = np.zeros((self.num_classes, self.num_rois))\n for idx, l in enumerate(labels):\n labels_full[int(l), idx] = 1\n return labels_full",
"def prune_rare_cats(df):\n\n new_df = df.copy()\n categories = []\n [categories.append(item) for item in list(df.columns) if 'category_' in item] \n \n [new_df.drop(columns=category, inplace=True) for category in categories if df[category].sum() < 5]\n \n return new_df",
"def find_binary(self):\n binary=[]\n for col in self.categorical_variables:\n if len(self.data[col].value_counts())==2:\n binary.append(col)\n return binary",
"def get_labels(self):\n return np.unique(self.labeled_feature[self.labeled_feature != 0])",
"def dummization(self):\n #TODO: use sklearn ColumnTransformer instead\n\n return pd.get_dummies(\n self.simple_imputer(),\n prefix_sep='_',\n prefix=self.categorical_cols,\n columns=self.categorical_cols,\n drop_first=False\n )",
"def _preprocess_labels(labels):\n BAD_LABEL = 999\n # iterate through each label\n for i, label in enumerate(labels):\n for j, l in enumerate(label):\n if l == BAD_LABEL:\n labels[i,j] = 0\n return labels",
"def one_hot_encoder(dataframe, nan_as_category = True):\r\n df = dataframe\r\n original_columns = list(df.columns)\r\n df = pd.get_dummies(df, dummy_na= True,drop_first=True)\r\n debug(df.info(memory_usage='deep'))\r\n df = df.loc[:,~df.columns.duplicated()]\r\n debug(df.info(memory_usage='deep'))\r\n new_columns = [c for c in df.columns if c not in original_columns]\r\n const_columns = [c for c in new_columns if df[c].dtype!='object' \\\r\n and np.sum(df[c]) == 0 and np.std(df[c]) == 0]\r\n df.drop(const_columns, axis = 1, inplace = True)\r\n new_columns = list(set(new_columns).difference(set(const_columns)))\r\n return df, new_columns",
"def _remove_columns_zeros_threshold(self, threshold=None):\n try:\n threshold = threshold if threshold is not None else self.threshold\n dic_missing = self.percentage_zeros_missing()\n for item in dic_missing:\n if item[list(item.keys())[0]] >= threshold:\n self.df.drop(list(item.keys())[0], axis=1, inplace=True)\n except Exception as e:\n print(getErrorDesc(sys.exc_info()))\n return e.args"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clean our dataframe, this mainly means cleaning the categories column
|
def clean_data(df):
cleaned_categories = create_categories_columns(df["categories"])
# replace old categories with the cleaned one (which itself is a whole dataframe), then remove duplicates
df = df.drop(columns=["categories"], axis=1)
df = pd.concat([df, cleaned_categories], sort=False, axis=1)
df = remove_duplicates(df)
return df
|
[
"def clean_data(df):\n # Resolve categories and expand them to actual columns.\n categories_df = _resolve_categories(df['categories'])\n df = df.drop(columns=['categories'])\n df = pd.concat([df, categories_df], axis=1)\n\n # drop duplicates\n df = _drop_duplicates(df)\n return df",
"def clean_data(df):\n # split categories into columns\n categories = df['categories'].str.split(';', expand=True)\n \n # create list of category names\n row1 = categories.iloc[0]\n names = row1.transform(lambda x: x[:-2]).tolist()\n \n # use these names as new column names\n categories.columns = names\n \n # convert the -1 or -0 in each entry to 1 or 0 \n for i in categories:\n # final character is the number we want to keep\n categories[i] = categories[i].transform(lambda x: x[-1:])\n \n # conversion to numeric\n categories[i] = pd.to_numeric(categories[i])\n \n # the related column has some entries with value 2, which represent\n # incorreect data and should be removed\n categories = categories[categories['related'] != 2]\n \n # we also see that child_alone is always 0, so that column can\n # be dropped as it never applies\n categories.drop('child_alone', axis=1, inplace = True)\n \n # we can now combine this dataset with the original one \n # in the process, we can drop unneeded columns: original and categories\n\n df.drop('categories', axis=1, inplace = True)\n df.drop('original', axis=1, inplace=True)\n \n df_clean = pd.concat([df, categories], axis=1)\n \n # finally we can drop duplicated rows (there are 170)\n \n df_clean.drop_duplicates(inplace=True)\n \n df = df_clean\n \n return df",
"def add_clean_cats(df):\n df['cats'] = df['categories'].apply(clean_cats)",
"def clean_data(self, data: pd.DataFrame) -> pd.DataFrame:",
"def remove_unused_categories(df: pd.DataFrame, inplace=False) -> pd.DataFrame:\n if inplace is False:\n df = df.copy()\n\n for col in df.columns:\n try:\n df[col].cat.remove_unused_categories(inplace=True)\n except Exception:\n pass\n\n if inplace is False:\n return df",
"def clean(self, df):\n df = df.drop(self.__preprocessor.get_non_redundant_entity_attributes(), axis=1)\n df = df.drop(self.__preprocessor.get_redundant_entity_attributes(), axis=1)\n return df",
"def remove_non_informative_categories(categories_df):\n columns_only_zeros = categories_df.columns[categories_df.sum(axis=0) == 0].values\n columns_only_ones = categories_df.columns[categories_df.sum(axis=0) == len(categories_df)].values\n categories_df = categories_df.drop(columns=np.concatenate((columns_only_ones, columns_only_zeros)), axis=1)\n return categories_df",
"def clean_categories(self, table):\n # get list of categories in database\n cat = Category()\n cat = cat.create()\n cat_list = table.read(cat)\n # instantiate products table\n prod = Product()\n prod = prod.create()\n for i in cat_list:\n # check number of products for a category\n cid = i[\"cid\"]\n check = table.read(prod, cid=cid)\n # delete category if empty\n if not check:\n table.delete(cat, cid=cid)\n else:\n pass",
"def tidy_data(df):\n\n ##clean up column headings\n df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')",
"def clean(self):\n for column in self.columns:\n column.change_misc_values()\n column.drop_greater_than()",
"def prune_rare_cats(df):\n\n new_df = df.copy()\n categories = []\n [categories.append(item) for item in list(df.columns) if 'category_' in item] \n \n [new_df.drop(columns=category, inplace=True) for category in categories if df[category].sum() < 5]\n \n return new_df",
"def perform_data_clean(df):\n # Perform data cleanup\n for col, value in NAN_VALUE_MAP.items():\n df[col] = df[col].fillna(value)\n\n for col in BOOLEAN_VALUE_COLUMNS:\n df[col] = df[col].astype(\"bool\")",
"def additionalCleanup(df):\n\n df = removeCancellations(df)\n df = removeTurnbacks(df)\n df = removeDiversions(df)\n df = filterFlights(df)\n return df",
"def cleandata(df):\r\n df = clean_column_names(df)\r\n print(\"Columns headers cleaned\")\r\n df_dup = drop_duplicate(df, keep='first')\r\n print(\"Dropped duplicate rows\")\r\n df = remove_outlier_IQR(df_dup)\r\n print(\"Outliers removed\")\r\n df = impute_missing_value(df)\r\n print(\"Missing Values imputed\")\r\n return df",
"def cleaning(df):\n df['Weather'] = df['Weather'].str.replace('Moderate ', '')\n df['Weather'] = df['Weather'].str.replace(' Showers', '')\n df['Weather'] = df['Weather'].str.replace('Mainly ', '')\n df['Weather'] = df['Weather'].str.replace('Mostly ', '')\n df = df.groupby('Weather').filter(lambda x: len(x) >= 10)\n df['Weather'] = df['Weather'].str.replace('Drizzle', 'Rain')\n df = df[df['Weather'] != 'Fog']\n df = df[df['Weather'] != 'Rain,Fog']\n return df",
"def clean_data(dataframe):\n dataframe[dataframe.isnull()] = np.NaN\n dataframe = dataframe.dropna(subset=['connective_positions', 'sentences'])\n return dataframe",
"def clean_data(self):\r\n self.all_data.drop(len(self.all_data) - 1, inplace = True)",
"def clear_known_categories(x, cols=None, index=True, dtype_backend=None):\n if dtype_backend == \"pyarrow\":\n # Right now Categorical with PyArrow is implemented as dictionary and\n # categorical accessor is not yet available\n return x\n\n if isinstance(x, (pd.Series, pd.DataFrame)):\n x = x.copy()\n if isinstance(x, pd.DataFrame):\n mask = x.dtypes == \"category\"\n if cols is None:\n cols = mask[mask].index\n elif not mask.loc[cols].all():\n raise ValueError(\"Not all columns are categoricals\")\n for c in cols:\n x[c] = x[c].cat.set_categories([UNKNOWN_CATEGORIES])\n elif isinstance(x, pd.Series):\n if isinstance(x.dtype, pd.CategoricalDtype):\n x = x.cat.set_categories([UNKNOWN_CATEGORIES])\n if index and isinstance(x.index, pd.CategoricalIndex):\n x.index = x.index.set_categories([UNKNOWN_CATEGORIES])\n elif isinstance(x, pd.CategoricalIndex):\n x = x.set_categories([UNKNOWN_CATEGORIES])\n return x",
"def clean_titanic(df):\n \n \n df[\"is_female\"] = df.sex == \"Female\"\n embarked_dummies = pd.get_dummies(df.embarked, prefix='Embarked', drop_first=True)\n class_dummies = pd.get_dummies(df.pclass, prefix='class', drop_first=True)\n\n dropcols = ['deck', 'age', 'embark_town', 'passenger_id', 'embarked', 'sex', 'pclass', 'class']\n df.drop(columns= dropcols, inplace=True)\n\n return pd.concat([df, embarked_dummies, class_dummies], axis =1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run classification test with ExponentiatedGradient.
|
def run_expgrad_classification(estimator, moment):
X, Y, A = fetch_adult()
expgrad = ExponentiatedGradient(
estimator,
constraints=moment)
expgrad.fit(X, Y, sensitive_features=A)
assert expgrad.n_oracle_calls_ > 1
assert len(expgrad.predictors_) > 1
|
[
"def experiment(self, x_train, x_test, y_train, y_test, **kwargs):\n\n print('\\n--------------------------')\n self.plot_model_complexity(x_train, y_train, **kwargs)\n self.plot_learning_curve(x_train, y_train, **kwargs)\n self.fit(x_train, y_train)\n self.evaluate(x_test, y_test)",
"def test_train(self):\n trace.train(10)",
"def main(num_hidden, epochs, lr, act_type): \r\n # load MNIST dataset \r\n digits = load_digits()\r\n \r\n X_train, X_test, Y_train, Y_test = train_test_split(digits.data, digits.target, test_size=0.3, random_state=0)\r\n \r\n # normalize data \r\n scaler = preprocessing.StandardScaler().fit(X_train)\r\n X_train = scaler.transform(X_train)\r\n X_test = scaler.transform(X_test)\r\n \r\n # initialize weights of the neural network \r\n params = initialize_model(size_input = X_train.shape[1], size_hidden=num_hidden, size_output=10)\r\n \r\n \r\n # training \r\n train_err_log = []\r\n test_err_log = []\r\n \r\n for epoch in range(epochs):\r\n # train model on training set \r\n params, err_train = train(params, X_train.T, Y_train, lr, act_type=act_type)\r\n train_err_log.append(err_train*100)\r\n \r\n # test model on test set \r\n err_test = test(params, X_test.T, Y_test, act_type=act_type)\r\n test_err_log.append(err_test*100)\r\n \r\n if epoch % 1000 == 0: \r\n print(\"EPOCH [%d] train_err %.3f, test_err %.3f\"%(epoch, err_train, err_test))\r\n \r\n # plot training curve \r\n plt.figure(1, figsize=(12, 8))\r\n plt.plot(range(epochs), train_err_log, '-', color='orange',linewidth=2, label='training error (Learning rate='+str(lr)+')')\r\n plt.plot(range(epochs), test_err_log, '-b', linewidth=2, label='test error (Learning rate='+str(lr)+')')\r\n \r\n plt.title('%s activation (Learning rate=%s)' % (act_type, str(lr)))\r\n plt.xlabel('epoch')\r\n plt.ylabel('classification error (%)')\r\n plt.legend(loc='best')\r\n plt.show()",
"def train_and_evaluate(self, x_train, y_train, x_test, y_test):\n try:\n classifier = self.classifier_class(random_state=self.classifier_state, **self.classifier_parameters)\n except TypeError:\n classifier = self.classifier_class(**self.classifier_parameters)\n if self.data_balancer_class is not None:\n self.data_balancer = self.data_balancer_class(random_state=self.data_balancer_state)\n self.train_and_evaluate_fold(x_train, y_train, x_test, y_test, classifier, 0, data_balancer=self.data_balancer)\n\n # Error rates\n avg_metric_dict = self.ml_stats.calculate_average_results()\n\n return avg_metric_dict",
"def train(self):\n mse = train_and_score(self.network)\n self.accuracy = 1/ mse\n self.mse = mse",
"def evaluate_features(experiment_seed_tuple):\n\n experiment = experiment_seed_tuple[0]\n seed = experiment_seed_tuple[1]\n\n X_train = experiment[\"train\"]\n X_valid = experiment[\"valid\"]\n X_test = experiment[\"test\"]\n\n Y_train = Y_target[\"train\"]\n Y_valid = Y_target[\"valid\"]\n Y_test = Y_target[\"test\"]\n\n def df_to_dmatrix(features, target):\n x = features.drop(columns=[\"Date\"], errors=\"ignore\") # Ignore if not exist\n y = target\n dmatrix = xgb.DMatrix(x, label=y)\n return dmatrix\n\n dm_train = df_to_dmatrix(X_train, Y_train)\n dm_valid = df_to_dmatrix(X_valid, Y_valid)\n dm_test = df_to_dmatrix(X_test, Y_test)\n\n # set seed in XGBoost params\n CMP_XGB_PARAMS[\"seed\"] = seed\n\n # Determine optimal model size\n evals = [(dm_train, \"train\"), (dm_valid, \"valid\")]\n model_bst = xgb.train(\n params=CMP_XGB_PARAMS,\n dtrain=dm_train,\n evals=evals,\n num_boost_round=CMP_NUM_BOOST_ROUND,\n early_stopping_rounds=CMP_EARLY_STOPPING_ROUNDS,\n )\n best_ntree_limit = model_bst.best_ntree_limit\n\n # OPTIONAL: Append train and valid set and train on both sets\n\n # Retrain on all training data\n evals2 = [(dm_train, \"train\"), (dm_test, \"test\")]\n model_final = xgb.train(\n params=CMP_XGB_PARAMS,\n dtrain=dm_train,\n evals=evals2,\n num_boost_round=best_ntree_limit,\n )\n\n # Feature importance (Information Gain)\n feature_information_gain = model_final.get_score(importance_type=\"gain\")\n feature_importance = pd.DataFrame(\n list(feature_information_gain.items()), columns=[\"feature\", \"information_gain\"]\n )\n feature_importance[\"algorithm\"] = experiment[\"name\"]\n feature_importance[\"seed\"] = seed\n # Reorder columns\n feature_importance = feature_importance[FEATURE_IMPORTANCE_COLUMNS]\n\n # Predict values of test set\n y_pred = model_final.predict(dm_test)\n y_true = dm_test.get_label()\n preds = pd.DataFrame()\n preds[\"y_true\"] = y_true\n preds[\"y_pred\"] = y_pred\n preds[\"algorithm\"] = experiment[\"name\"]\n preds[\"seed\"] = seed\n # Reorder columns\n preds = preds[PREDS_COLUMNS]\n\n # Calculate and save error metrics\n r2 = r2_score(y_true=y_true, y_pred=y_pred)\n mse = mean_squared_error(y_true=y_true, y_pred=y_pred)\n rmse = sqrt(mse)\n metrics = pd.DataFrame(\n [[experiment[\"name\"], seed, mse, rmse, r2]], columns=EVAL_COLUMNS\n )\n\n eval_dict = {\n \"name\": experiment[\"name\"],\n \"metrics\": metrics,\n \"preds\": preds,\n \"feature_importance\": feature_importance,\n }\n\n return eval_dict",
"def runElasticNet():\n X,y=preprocess()\n ElasticNet(X,y)",
"def evaluate(self, test_data, training_data=None):\n if training_data is not None:\n self.train(training_data)\n pred = self.predict(test_data)\n y_true = test_data.Y.reshape(-1)\n if len(pred.shape) == 3:\n y_pred = pred.reshape(-1, pred.shape[2])\n num_labels = y_pred.shape[-1]\n labels = np.arange(num_labels)\n loss = sklearn.metrics.log_loss(y_true=y_true, y_pred=y_pred,\n labels=labels)\n\n y_pred_best = np.argmax(y_pred, -1)\n acc = sklearn.metrics.accuracy_score(y_true, y_pred_best)\n print loss, acc\n return loss, acc",
"def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):\n \n if test_data: \n n_test = len(test_data)\n \n n = len(training_data)\n \n for j in xrange(epochs):\n random.shuffle(training_data)\n \n# mini_batches = [\n # training_data[k:k+mini_batch_size] for k in xrange(0, n, mini_batch_size)]\n \n mini_batches = training_data[:mini_batch_size]\n \n self.backPropagate(mini_batches, eta)\n \n if test_data:\n# print \"Epoch {0}: {1} / {2}\".format(j, self.evaluate(test_data), n_test)\n print \"Epoch {0}: cost is {1}\".format(j, self.evaluate(test_data))\n \n else:\n print \"Epoch {0} complete\".format(j)",
"def testTrainFnMulticlassFullHessian(self):\n with self.cached_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 1\n # Use full hessian multiclass strategy.\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.FULL_HESSIAN)\n learner_config.num_classes = 5\n learner_config.regularization.l1 = 0\n # To make matrix inversible.\n learner_config.regularization.l2 = 1e-5\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n batch_size = 3\n features[\"dense_float\"] = array_ops.constant(\n [0.3, 1.5, 1.1], dtype=dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=5,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],\n [0.0, 0.0, 0.0, 0.0, 1.2]],\n dtype=dtypes.float32)\n\n labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)\n weights = array_ops.ones([batch_size, 1], dtypes.float32)\n\n partition_ids = array_ops.zeros([batch_size], dtypes.int32)\n ensemble_stamp = variables.VariableV1(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 0,\n }\n\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n losses.per_example_maxent_loss(\n labels,\n weights,\n predictions,\n num_classes=learner_config.num_classes)[0]),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n # On second run, expect a trivial split to be chosen to basically\n # predict the average.\n train_op.run()\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output.ParseFromString(serialized.eval())\n self.assertEqual(len(output.trees), 1)\n # We got 3 nodes: one parent and 2 leafs.\n self.assertEqual(len(output.trees[0].nodes), 3)\n self.assertAllClose(output.tree_weights, [1])\n self.assertEquals(stamp_token.eval(), 2)\n\n # Leafs should have a dense vector of size 5.\n expected_leaf_1 = [-3.4480, -3.4429, 13.8490, -3.45, -3.4508]\n expected_leaf_2 = [-1.2547, -1.3145, 1.52, 2.3875, -1.3264]\n self.assertArrayNear(expected_leaf_1,\n output.trees[0].nodes[1].leaf.vector.value, 7e-3)\n self.assertArrayNear(expected_leaf_2,\n output.trees[0].nodes[2].leaf.vector.value, 7e-3)",
"def ensemble_models_and_evaluate_accuracy(train_probas, val_probas, test_probas, y_train, y_val, y_test):\n train_eq_ensemble_pred = equally_ensemble_results(train_probas)\n val_eq_ensemble_pred = equally_ensemble_results(val_probas)\n test_eq_ensemble_pred = equally_ensemble_results(test_probas)\n\n print(\"Equally weighted ensemble:\")\n print(\"--------------------------\")\n print(\"Train accuracy: \", accuracy_score(y_train, train_eq_ensemble_pred))\n print(\"Validation accuracy: \", accuracy_score(y_val, val_eq_ensemble_pred))\n print(\"Test accuracy: \", accuracy_score(y_test, test_eq_ensemble_pred))\n\n np.save(os.path.join('model', 'train_eq_ensemble_pred'), train_eq_ensemble_pred)\n np.save(os.path.join('model', 'val_eq_ensemble_pred'), val_eq_ensemble_pred)\n np.save(os.path.join('model', 'test_eq_ensemble_pred'), test_eq_ensemble_pred)\n\n confidence_train = calculate_confidence_val(train_probas, y_train)\n confidence_val = calculate_confidence_val(val_probas, y_val)\n confidence_test = calculate_confidence_val(test_probas, y_test)\n\n train_w_ensemble_pred = weighted_ensemble_results(train_probas, confidence_train)\n val_w_ensemble_pred = weighted_ensemble_results(val_probas, confidence_val)\n test_w_ensemble_pred = weighted_ensemble_results(test_probas, confidence_test)\n\n print(\"Weighted ensemble:\")\n print(\"--------------------------\")\n print(\"Train accuracy: \", accuracy_score(y_train, train_w_ensemble_pred))\n print(\"Validation accuracy: \", accuracy_score(y_val, val_w_ensemble_pred))\n print(\"Test accuracy: \", accuracy_score(y_test, test_w_ensemble_pred))\n\n np.save(os.path.join('model', 'train_w_ensemble_pred.npy'), train_w_ensemble_pred)\n np.save(os.path.join('model', 'val_w_ensemble_pred.npy'), val_w_ensemble_pred)\n np.save(os.path.join('model', 'test_w_ensemble_pred.npy'), test_w_ensemble_pred)",
"def _test_gradient_numerical(self, clf, x, extra_classes=None,\n th=1e-3, epsilon=eps, **grad_kwargs):\n if 'y' in grad_kwargs:\n raise ValueError(\"`y` cannot be passed to this unittest.\")\n\n if extra_classes is not None:\n classes = clf.classes.append(extra_classes)\n else:\n classes = clf.classes\n\n grads = []\n for c in classes:\n grad_kwargs['y'] = c # Appending class to test_f_x\n\n # Analytical gradient\n gradient = clf.grad_f_x(x, **grad_kwargs)\n grads.append(gradient)\n\n self.assertTrue(gradient.is_vector_like)\n self.assertEqual(x.size, gradient.size)\n self.assertEqual(x.issparse, gradient.issparse)\n\n # Numerical gradient\n num_gradient = CFunction(\n clf.decision_function).approx_fprime(x.todense(), epsilon, y=c)\n\n # Compute the norm of the difference\n error = (gradient - num_gradient).norm()\n\n self.logger.info(\n \"Analytic grad wrt. class {:}:\\n{:}\".format(c, gradient))\n self.logger.info(\n \"Numeric gradient wrt. class {:}:\\n{:}\".format(\n c, num_gradient))\n\n self.logger.info(\"norm(grad - num_grad): {:}\".format(error))\n self.assertLess(error, th)\n\n self.assertIsSubDtype(gradient.dtype, float)\n\n return grads",
"def test_machine_learning():\n df = build_dataset(wav_number=120, random_sate=42)\n df = df.drop(['fs', 'duration'], axis=1)\n\n train_set, test_set = train_test_split(df, test_size=0.2, random_state=42)\n X_train, y_train = preprocessing(train_set)\n X_test, y_test = preprocessing(test_set)\n\n print('Decision tree')\n tree = DecisionTreeClassifier(random_state=42)\n evaluation(tree, X_train, y_train, X_test, y_test)\n\n print('Random forest')\n rforest = RandomForestClassifier(random_state=42)\n evaluation(rforest, X_train, y_train, X_test, y_test)",
"def do_training():\n train_cls = Train()\n train_cls.run()",
"def evaluate(self, train_data, test_data):\n tot_time = time.time()\n\n LGMSimVars.per_metric_optValues = config.MLConf.opt_values[self.encoding.lower()]\n assert (os.path.isfile(os.path.join(config.default_data_path, train_data))), \\\n f'{train_data} dataset does not exist'\n assert (os.path.isfile(os.path.join(config.default_data_path, test_data))), \\\n f'{test_data} dataset does not exist'\n\n f = Features()\n pt = hyperparam_tuning.ParamTuning()\n\n start_time = time.time()\n f.load_data(os.path.join(config.default_data_path, train_data), self.encoding)\n fX_train, y_train = f.build()\n print(\"Loaded train dataset and build features for {} setup; {} sec.\".format(\n config.MLConf.classification_method, time.time() - start_time))\n\n start_time = time.time()\n f.load_data(os.path.join(config.default_data_path, test_data), self.encoding)\n fX_test, y_test = f.build()\n print(\"Loaded test dataset and build features; {} sec\".format(time.time() - start_time))\n\n for clf in config.MLConf.clf_custom_params:\n print('Method {}'.format(clf))\n print('=======', end='')\n print(len(clf) * '=')\n\n start_time = time.time()\n # 1st phase: train each classifier on the whole train dataset (no folds)\n estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])\n estimator = pt.trainClassifier(fX_train, y_train, estimator)\n print(\"Finished training model on dataset; {} sec.\".format(time.time() - start_time))\n\n start_time = time.time()\n # 2nd phase: test each classifier on the test dataset\n metrics = pt.testClassifier(fX_test, y_test, estimator)\n self._print_stats({'classifier': clf, **metrics, 'time': start_time})\n\n print(\"The whole process took {} sec.\\n\".format(time.time() - tot_time))",
"def train(self):\n theta = self.theta\n J_pre = float('inf')\n i = 0\n while(True):\n (J, grad) = self.cost_function_reg(self.X, self.y, theta)\n if abs(J - J_pre) < 0.001*J:\n break\n else:\n theta -= self.step * grad\n i += 1\n J_pre = J\n self.theta = theta",
"def testTrainFnMulticlassDiagonalHessian(self):\n with self.cached_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 1\n # Use full hessian multiclass strategy.\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)\n learner_config.num_classes = 5\n learner_config.regularization.l1 = 0\n # To make matrix inversible.\n learner_config.regularization.l2 = 1e-5\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n batch_size = 3\n features = {}\n features[\"dense_float\"] = array_ops.constant(\n [0.3, 1.5, 1.1], dtype=dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=5,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],\n [0.0, 0.0, 0.0, 0.0, 1.2]],\n dtype=dtypes.float32)\n\n labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)\n weights = array_ops.ones([batch_size, 1], dtypes.float32)\n\n partition_ids = array_ops.zeros([batch_size], dtypes.int32)\n ensemble_stamp = variables.VariableV1(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 0,\n }\n\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n losses.per_example_maxent_loss(\n labels,\n weights,\n predictions,\n num_classes=learner_config.num_classes)[0]),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEqual(len(output.trees), 0)\n self.assertEqual(len(output.tree_weights), 0)\n self.assertEqual(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n # On second run, expect a trivial split to be chosen to basically\n # predict the average.\n train_op.run()\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output.ParseFromString(serialized.eval())\n self.assertEqual(len(output.trees), 1)\n # We got 3 nodes: one parent and 2 leafs.\n self.assertEqual(len(output.trees[0].nodes), 3)\n self.assertAllClose(output.tree_weights, [1])\n self.assertEqual(stamp_token.eval(), 2)\n\n # Leafs should have a dense vector of size 5.\n expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]\n expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]\n self.assertArrayNear(expected_leaf_1,\n output.trees[0].nodes[1].leaf.vector.value, 1e-3)\n self.assertArrayNear(expected_leaf_2,\n output.trees[0].nodes[2].leaf.vector.value, 1e-3)",
"def experiment(args, logger, out_dir, seed):\n\n # get model and data\n clf = model_util.get_classifier(args.tree_type,\n n_estimators=args.n_estimators,\n max_depth=args.max_depth,\n random_state=seed)\n\n data = data_util.get_data(args.dataset,\n random_state=seed,\n data_dir=args.data_dir)\n X_train, X_test, y_train, y_test, label = data\n\n logger.info('train instances: {:,}'.format(len(X_train)))\n logger.info('test instances: {:,}'.format(len(X_test)))\n logger.info('no. features: {:,}'.format(X_train.shape[1]))\n\n # train a tree ensemble\n model = clone(clf).fit(X_train, y_train)\n model_util.performance(model, X_train, y_train,\n X_test=X_test, y_test=y_test,\n logger=logger)\n\n # randomly pick test instances to explain\n np.random.seed(seed)\n test_ndx = np.random.choice(len(y_test), size=1, replace=False)\n\n # train on predicted labels\n train_label = y_train if args.true_label else model.predict(X_train)\n\n # TREX\n if args.trex:\n logger.info('\\nTREX...')\n fine_tune, test_time = _trex_method(args, model, test_ndx, X_test, X_train, y_train,\n seed=seed, logger=logger)\n\n logger.info('fine tune: {:.3f}s'.format(fine_tune))\n logger.info('computation time: {:.3f}s'.format(test_time))\n r = {'fine_tune': fine_tune, 'test_time': test_time}\n np.save(os.path.join(out_dir, 'method.npy'), r)\n\n # Leaf Influence\n if args.tree_type == 'cb' and args.inf_k is not None:\n logger.info('\\nleafinfluence...')\n fine_tune, test_time = _influence_method(model, test_ndx, X_train,\n y_train, X_test, y_test, args.inf_k)\n\n if test_time is not None:\n logger.info('fine tune: {:.3f}s'.format(fine_tune))\n logger.info('computation time: {:.3f}s'.format(test_time))\n r = {'fine_tune': fine_tune, 'test_time': test_time}\n np.save(os.path.join(out_dir, 'method.npy'), r)\n else:\n logger.info('time limit reached!')\n\n if args.maple:\n logger.info('\\nMAPLE...')\n fine_tune, test_time = _maple_method(model, test_ndx, X_train, train_label, X_test, y_test,\n dstump=args.dstump, logger=logger)\n\n if fine_tune is not None and test_time is not None:\n logger.info('fine tune: {:.3f}s'.format(fine_tune))\n logger.info('computation time: {:.3f}s'.format(test_time))\n r = {'fine_tune': fine_tune, 'test_time': test_time}\n np.save(os.path.join(out_dir, 'method.npy'), r)\n else:\n logger.info('time limit reached!')\n\n if args.teknn:\n logger.info('\\nTEKNN...')\n fine_tune, test_time = _teknn_method(args, model, test_ndx, X_train, train_label,\n X_test, seed, logger=logger)\n if fine_tune is not None and test_time is not None:\n logger.info('fine tune: {:.3f}s'.format(fine_tune))\n logger.info('computation time: {:.3f}s'.format(test_time))\n r = {'fine_tune': fine_tune, 'test_time': test_time}\n np.save(os.path.join(out_dir, 'method.npy'), r)\n else:\n logger.info('time limit reached!')",
"def evaluate_on_testset(model, test_edges, test_edges_false, data):\n\n with torch.no_grad():\n model.eval()\n adj_rec = model(data['features'], data['adj_norm'])\n accuracy, roc_score, ap_score, tn, fp, fn, tp = eval_gae(test_edges, test_edges_false, adj_rec)\n model.train()\n\n return accuracy, roc_score, ap_score, tn, fp, fn, tp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run classification test with ThresholdOptimizer.
|
def run_thresholdoptimizer_classification(estimator):
X, Y, A = fetch_adult()
to = ThresholdOptimizer(estimator=estimator, prefit=False)
to.fit(X, Y, sensitive_features=A)
results = to.predict(X, sensitive_features=A)
assert results is not None
|
[
"def test_override_threshold():\n vals = np.zeros((2, 2))\n vals[0, :] = 5\n vals[1, :] = 10\n # create classifier and assert threshold\n C = classifier.BinaryClassifier(vals, 7)\n assert C.threshold == 7\n assert np.all(C.data[0, :] == 5)\n assert np.all(C.data[1, :] == 10)\n # classify with different value then check values\n C.classify(1)\n assert C.threshold == 1\n assert np.all(C.data[0, :] == 5)\n assert np.all(C.data[1, :] == 10)\n assert np.all(C.classified[0, :] == 1)\n assert np.all(C.classified[1, :] == 1)",
"def test_classification(init_env, config_file):\n run_all_steps(init_env, config_file)",
"def test_binary_neg_threshold():\n vals = np.zeros((2, 2))\n vals[0, :] = -5\n vals[1, :] = -10\n # create classifier\n C = classifier.BinaryClassifier(vals, -7.5)\n assert np.all(C.data[0, :] == -5)\n assert np.all(C.data[1, :] == -10)\n assert np.all(C.classified[0, :] == 1)\n assert np.all(C.classified[1, :] == 0)\n # check threshold value\n assert C.threshold == -7.5\n # re-classify then check values\n C.classify(0)\n assert np.all(C.data[0, :] == -5)\n assert np.all(C.data[1, :] == -10)\n assert np.all(C.classified[0, :] == 0)\n assert np.all(C.classified[1, :] == 0)",
"def evaluate(self, train_data, test_data):\n tot_time = time.time()\n\n LGMSimVars.per_metric_optValues = config.MLConf.opt_values[self.encoding.lower()]\n assert (os.path.isfile(os.path.join(config.default_data_path, train_data))), \\\n f'{train_data} dataset does not exist'\n assert (os.path.isfile(os.path.join(config.default_data_path, test_data))), \\\n f'{test_data} dataset does not exist'\n\n f = Features()\n pt = hyperparam_tuning.ParamTuning()\n\n start_time = time.time()\n f.load_data(os.path.join(config.default_data_path, train_data), self.encoding)\n fX_train, y_train = f.build()\n print(\"Loaded train dataset and build features for {} setup; {} sec.\".format(\n config.MLConf.classification_method, time.time() - start_time))\n\n start_time = time.time()\n f.load_data(os.path.join(config.default_data_path, test_data), self.encoding)\n fX_test, y_test = f.build()\n print(\"Loaded test dataset and build features; {} sec\".format(time.time() - start_time))\n\n for clf in config.MLConf.clf_custom_params:\n print('Method {}'.format(clf))\n print('=======', end='')\n print(len(clf) * '=')\n\n start_time = time.time()\n # 1st phase: train each classifier on the whole train dataset (no folds)\n estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])\n estimator = pt.trainClassifier(fX_train, y_train, estimator)\n print(\"Finished training model on dataset; {} sec.\".format(time.time() - start_time))\n\n start_time = time.time()\n # 2nd phase: test each classifier on the test dataset\n metrics = pt.testClassifier(fX_test, y_test, estimator)\n self._print_stats({'classifier': clf, **metrics, 'time': start_time})\n\n print(\"The whole process took {} sec.\\n\".format(time.time() - tot_time))",
"def test_total_images(test_data_path, nb_images, classifier,\n\t\t\t\t\t minibatch_size = 25, decision_rule = 'majority_vote',\n\t\t\t\t\t only_green = False):\n\tvalid_decision_rule = ['majority_vote', 'weighted_vote']\n\tif decision_rule not in valid_decision_rule:\n\t\traise NameError(decision_rule + ' is not a valid decision rule.')\n\n\tprint('\t Testing for the database : ' + test_data_path)\n\n\tdata_test = il.Test_loader(test_data_path, subimage_size = 100, only_green = only_green)\n\n\n\tpool = Pool()\n\ttp = 0\n\tfp = 0\n\tnb_CGG = 0\n\taccuracy = 0\n\tfor i in range(nb_images):\n\t\tbatch, label, width, height, original, image_file = data_test.get_next_image()\n\t\tbatch_size = batch.shape[0]\n\t\tj = 0\n\t\tprediction = 0\n\t\tlabels = []\n\t\tdiff = []\n\t\tnb_im = 0\n\t\twhile j < batch_size:\n\n\t\t\tdat = []\n\t\t\tfor k in range(j, min(j+minibatch_size, batch_size)): \n\t\t\t\tdat.append([batch[k], label])\n\n\t\t\tto_compute = [i for i in range(minibatch_size)]\n\t\t\tresult = pool.starmap(partial(compute_features, \n\t\t\t\t\t\t\t\t\t\tbatch_size = 1, \n\t\t\t\t\t\t\t\t\t\tnb_batch = minibatch_size, \n\t\t\t\t\t\t\t\t\t\tmode = 'lbp'),\n\t\t\t\t\t\t\t\t\t\tzip(dat, to_compute)) \n\t\t\tres = []\n\t\t\tfor k in range(len(result)):\n\t\t\t\tres.append(result[k][0][0])\n\t\t\tres = normalize(np.array(res), axis = 1)\n\t\t\tpred = np.log(classifier.predict_proba(res) + 0.00000001)\n\t\t\t# print(classifier.predict(np.array(res)))\n\t\t\t\t\t\n\t\t\tnb_im += pred.shape[0]\n\t\t\tlabel_image = np.argmax(pred, 1)\n\t\t\td =\tnp.max(pred, 1) - np.min(pred, 1)\n\t\t\tfor k in range(d.shape[0]):\n\t\t\t\tdiff.append(np.round(d[k], 1))\n\n\t\t\tif decision_rule == 'majority_vote':\n\t\t\t\tprediction += np.sum(label_image)\n\t\t\tif decision_rule == 'weighted_vote':\n\t\t\t\tprediction += np.sum(-2*d*(label_image - 0.5))\n\n\t\t\tfor l in label_image:\n\t\t\t\tlabels.append(data_test.image_class[l])\n\t\t\tj+=minibatch_size\n\n\t\t\t\t \n\t\tdiff = np.array(diff)\n\t\tif decision_rule == 'majority_vote':\n\t\t\tprediction = data_test.image_class[int(np.round(prediction/batch_size))]\n\t\tif decision_rule == 'weighted_vote':\n\t\t\tprediction = data_test.image_class[int(max(prediction,0)/abs(prediction))]\n\t\t\t\t\n\n\t\tif label == 'CGG':\n\t\t\tnb_CGG += 1\n\t\tif(label == prediction):\n\t\t\taccuracy+= 1\n\t\t\tif(prediction == 'CGG'):\n\t\t\t\ttp += 1\n\t\telse:\n\t\t\tif prediction == 'CGG':\n\t\t\t\tfp += 1\n\t\tprint(prediction, label)\n\n\t\tif ((i+1)%10 == 0):\n\t\t\tprint('\\n_______________________________________________________')\n\t\t\tprint(str(i+1) + '/' + str(nb_images) + ' images treated.')\n\t\t\tprint('Accuracy : ' + str(round(100*accuracy/(i+1), 2)) + '%')\n\t\t\tif tp + fp != 0:\n\t\t\t\tprint('Precision : ' + str(round(100*tp/(tp + fp), 2)) + '%')\n\t\t\tif nb_CGG != 0:\n\t\t\t\t\tprint('Recall : ' + str(round(100*tp/nb_CGG,2)) + '%')\n\t\t\tprint('_______________________________________________________\\n')\n\n\n\tprint('\\n_______________________________________________________')\n\tprint('Final Accuracy : ' + str(round(100*accuracy/(nb_images), 3)) + '%')\n\tprint('Final Precision : ' + str(round(100*tp/(tp + fp), 3)) + '%')\n\tprint('Final Recall : ' + str(round(100*tp/nb_CGG, 3)) + '%')\n\tprint('_______________________________________________________\\n')",
"def test_quick_method(self):\n\n X, y = make_classification(\n n_samples=400,\n n_features=20,\n n_informative=8,\n n_redundant=8,\n n_classes=2,\n n_clusters_per_class=4,\n random_state=2721,\n )\n\n _, ax = plt.subplots()\n\n discrimination_threshold(BernoulliNB(), X, y, ax=ax, random_state=5, show=False)\n self.assert_images_similar(ax=ax, tol=10)",
"def exec_classifiers(self, dataset):\n f = Features()\n pt = param_tuning.ParamTuning()\n\n start_time = time.time()\n Xtrain, Xtest, ytrain, ytest = self._load_and_split_data(dataset)\n print(\"Loaded train/test datasets in {} sec.\".format(time.time() - start_time))\n\n fX_train = f.build(Xtrain)\n fX_test = f.build(Xtest)\n print(\"Build features from train/test data in {} sec\".format(time.time() - start_time))\n\n for clf in config.MLConf.clf_custom_params:\n print('Method {}'.format(clf))\n print('=======', end='')\n print(len(clf) * '=')\n\n tot_time = time.time(); start_time = time.time()\n # 1st phase: train each classifier on the whole train dataset (no folds)\n # estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])\n estimator = pt.clf_names[clf][0](random_state=config.seed_no)\n estimator.set_params(**config.MLConf.clf_custom_params[clf])\n estimator = pt.trainClassifier(fX_train, ytrain, estimator)\n\n print(\"Finished training model on dataset; {} sec.\".format(time.time() - start_time))\n\n start_time = time.time()\n # 2nd phase: test each classifier on the test dataset\n res = pt.testClassifier(fX_test, ytest, estimator)\n self._print_stats(clf, res['metrics'], res['feature_imp'], start_time)\n # if not os.path.exists('output'):\n # os.makedirs('output')\n # np.savetxt(f'output/{clf}_default_stats.csv', res['metrics']['stats'], fmt=\"%u\")\n\n print(\"The whole process took {} sec.\\n\".format(time.time() - tot_time))",
"def test_train(self):\n trace.train(10)",
"def cnn_predict():\n\n x_test, y_test, file_name_test_list = load_test_set()\n\n model = cnn()\n\n weight_path = Path(config[\"weight_file\"])\n if weight_path.exists() is False:\n log.error(\"Not found weight file %s. Aborting.\" % (weight_path))\n sys.exit(1)\n\n model.load_weights(weight_path)\n\n y_predicted = model.predict(x_test)\n correct_count = 0\n total_count = x_test.shape[0]\n for i in range(total_count):\n # Ground truth\n # Convert the file name to a string that contains only the ground trugh classes\n name = file_name_test_list[i]\n underscore_pos = name.find(\"_\")\n if underscore_pos < 0:\n log.warning(\"Invalid image file name. Missing classification marker for file %s\" % (name))\n continue\n\n classes = name[0:underscore_pos]\n actual = \"\"\n if DogClassMarker.AIMEE in classes:\n actual = actual + DogClassMarker.AIMEE\n if DogClassMarker.MADDIE in classes:\n actual = actual + DogClassMarker.MADDIE\n if DogClassMarker.OLIVIA in classes:\n actual = actual + DogClassMarker.OLIVIA\n if DogClassMarker.PINK in classes:\n actual = actual + DogClassMarker.PINK\n if len(actual) == 0:\n actual = \"_\"\n\n # Prediction\n # Convert the predicted classes contained in the vector to a string.\n # Before conversion, round down or round up values to 0 or 1 except for the mid-range number.\n # A mid-range number is counted as a \"mismatch\".\n v = y_predicted[i]\n\n low_threshold_flag = v < 0.3\n v[low_threshold_flag] = 0\n\n high_threshold_flag = v > 0.7\n v[high_threshold_flag] = 1\n\n predicted = \"\"\n if v[DogClassIndex.AIMEE] == 1:\n predicted = predicted + DogClassMarker.AIMEE\n if v[DogClassIndex.MADDIE] == 1:\n predicted = predicted + DogClassMarker.MADDIE\n if v[DogClassIndex.OLIVIA] == 1:\n predicted = predicted + DogClassMarker.OLIVIA\n if v[DogClassIndex.PINK] == 1:\n predicted = predicted + DogClassMarker.PINK\n if len(predicted) == 0:\n predicted = \"_\"\n\n # Compare the ground-truth classification string and the predicted classification string\n # Count only the complete match as the match. Do not count the partial match.\n if actual == predicted:\n correct_count = correct_count + 1\n\n print(\"Total count: %d\" % (total_count))\n print(\"Correct count (complete match only): %d\" % (correct_count))\n print(\"Accuracy: %f percent\" % (correct_count * 100 / total_count))",
"def train_and_evaluate(self, x_train, y_train, x_test, y_test):\n try:\n classifier = self.classifier_class(random_state=self.classifier_state, **self.classifier_parameters)\n except TypeError:\n classifier = self.classifier_class(**self.classifier_parameters)\n if self.data_balancer_class is not None:\n self.data_balancer = self.data_balancer_class(random_state=self.data_balancer_state)\n self.train_and_evaluate_fold(x_train, y_train, x_test, y_test, classifier, 0, data_balancer=self.data_balancer)\n\n # Error rates\n avg_metric_dict = self.ml_stats.calculate_average_results()\n\n return avg_metric_dict",
"def setDecisionThreshold(self, thresh) -> None:\n ...",
"def test(self,test_set,test_label): \n\n\t\t# YOUR CODE HERE\n\t\tpred_label = np.zeros((len(test_set)))\n\n\t\ttest_set_biased = np.c_[test_set, np.ones(test_set.shape[0])]\n\t\tyhat = np.matmul(test_set_biased,self.w)\n\t\t\n\t\tpred_label = np.argmax(yhat, axis=1)\n\n\t\taccuracy = np.sum(np.equal(test_label,pred_label)) / len(test_set)\n\n\t\t# EVALUATION\n # get image with highest and lowest perceptron weight from each class\n\t\tself.highestPosteriorImages = np.zeros((self.feature_dim, self.num_class))\n\t\tself.lowestPosteriorImages = np.zeros((self.feature_dim, self.num_class))\n\n\t\tsummed = yhat\n\n\t\tlabelArgs = [np.nonzero(test_label == l)[0] for l in range(self.num_class)]\n\n\t\tfor classIdx, argsInClass in enumerate(labelArgs):\n\t\t\tmaxArg = np.argmax(summed[argsInClass, classIdx], axis=0)\n\t\t\tminArg = np.argmin(summed[argsInClass, classIdx], axis=0)\n\t\t\tself.highestPosteriorImages[:,classIdx] = (test_set[argsInClass])[maxArg]\n\t\t\tself.lowestPosteriorImages[:,classIdx] = (test_set[argsInClass])[minArg]\n\n\t\tprint (\"Perceptron Accuracy:\", accuracy)\n\t\t\n\t\treturn accuracy, pred_label",
"def test_strict_thresholding():\n\n # Generate test dataset\n test_dset_size = (100, 100)\n test_hdim_1_pt = 50.0\n test_hdim_2_pt = 50.0\n test_hdim_1_sz = 10\n test_hdim_2_sz = 10\n test_amp = 10\n test_data = np.zeros(test_dset_size)\n test_data = tbtest.make_feature_blob(\n test_data,\n test_hdim_1_pt,\n test_hdim_2_pt,\n h1_size=test_hdim_1_sz,\n h2_size=test_hdim_2_sz,\n amplitude=test_amp,\n )\n test_data_iris = tbtest.make_dataset_from_arr(test_data, data_type=\"iris\")\n\n # All of these thresholds will be met\n thresholds = [1, 5, 7.5]\n\n # The second n_min threshold can never be met\n n_min_thresholds = [0, test_data.size + 1, 0]\n\n # This will detect 2 features (first and last threshold value)\n features = feat_detect.feature_detection_multithreshold_timestep(\n test_data_iris,\n 0,\n dxy=1,\n threshold=thresholds,\n n_min_threshold=n_min_thresholds,\n strict_thresholding=False,\n )\n assert len(features) == 1\n assert features[\"threshold_value\"].item() == thresholds[-1]\n\n # Since the second n_min_thresholds value is not met this will only detect 1 feature\n features = feat_detect.feature_detection_multithreshold_timestep(\n test_data_iris,\n 0,\n dxy=1,\n threshold=thresholds,\n n_min_threshold=n_min_thresholds,\n strict_thresholding=True,\n )\n assert len(features) == 1\n assert features[\"threshold_value\"].item() == thresholds[0]",
"def test(self, file_dir=\"training_data\"):\n print(\"loading testing data\")\n test_data = MNIST(file_dir)\n img, lbl = test_data.load_testing()\n\n correct = 0\n for i in range(0, len(img)):\n self.classify(img[i])\n b = np.where(self.activations[-1] == max(self.activations[-1]))[0][0]\n c = lbl[i]\n if (np.where(self.activations[-1] == max(self.activations[-1]))[0][0]) == lbl[i]:\n correct += 1\n\n print(str((correct / len(img)) * 100) + \" % accuracy\")",
"def evaluate_classifier(self):\n \n test_generator = self.create_test_generators()\n output = self.model.evaluate(test_generator, steps=33//32 +1, workers = 0)\n \n return output",
"def __init__(self, model, number_tests=None, confidence_threshold=None,\n account_test_filter=None, target_ratio_test=0.20):\n if not model.model_trained:\n raise Exception(\"The model is not trained.\")\n self.model = model\n self.full_model = model\n \n self.show_tips = self.model.show_tips\n print(\"Analysing {} model\".format(self.model.model_name))\n \n # Check if evaluation is needed\n self.reevaluate = False\n # Check if recalculation is needed\n self.recalculate = False\n\n # Get parameters from the model or take the input values\n # number_tests\n if self.model.evaluation_metrics:\n if len(self.model.test_splits) != 0 and isinstance(self.model.test_splits, dict):\n self.number_tests = len(self.model.test_splits)\n print(\"Model's current number tests: {}\".format(self.number_tests))\n if not number_tests is None and number_tests != self.number_tests:\n self.number_tests = number_tests\n print(\"New number tests: {}\".format(self.number_tests))\n self.reevaluate = True\n else:\n self.number_tests = 10 if number_tests is None else number_tests\n # confidence_threshold\n if self.model.evaluation_metrics:\n if not self.model.confidence_threshold is None:\n self.confidence_threshold = self.model.confidence_threshold\n print(\"Model's current confidence threshold: {}\".format(self.confidence_threshold))\n if not confidence_threshold is None and confidence_threshold != self.confidence_threshold:\n self.confidence_threshold = confidence_threshold\n print(\"New confidence threshold: {}\".format(self.confidence_threshold))\n self.recalculate = True\n else:\n self.confidence_threshold = 300 if confidence_threshold is None else confidence_threshold\n # account_test_filter\n if self.model.evaluation_metrics:\n if isinstance(self.model.account_test_filter, list):\n self.account_test_filter = self.model.account_test_filter\n print(\"Model's current account test filter: {}\".format(self.account_test_filter))\n if not account_test_filter is None and account_test_filter != self.account_test_filter:\n self.account_test_filter = account_test_filter\n print(\"New account test filter: {}\".format(self.account_test_filter))\n self.reevaluate = True\n else:\n self.account_test_filter = self.model.data['account_banner'].unique() if account_test_filter is None \\\n else account_test_filter\n # target_ratio_test\n if self.model.evaluation_metrics:\n self.target_ratio_test = self.model.target_ratio_test\n print(\"Model's current target ratio test: {}\".format(self.target_ratio_test))\n if not target_ratio_test is None and target_ratio_test != self.target_ratio_test:\n self.target_ratio_test = target_ratio_test\n print(\"New target ratio test: {}\".format(self.target_ratio_test))\n self.reevaluate = True\n else:\n self.target_ratio_test = target_ratio_test\n \n if not self.model.evaluation_metrics:\n print(\"The model is not evaluated.\")\n self.reevaluate = True\n elif self.reevaluate:\n print(\"The model was evaluated with different parameters.\")\n self.model.evaluation_metrics = None\n self.model.confidence_metrics = None\n self.model.test_splits = None\n else:\n print(\"The model is already evaluated.\")\n self.reevaluate = False\n \n if self.reevaluate:\n print(\"Evaluation is needed, thus, recalculation is needed too\")\n self.recalculate = True\n elif self.recalculate:\n print(\"The model is evaluated, the new threshold is defined\")\n else:\n print(\"Recalculation is not needed\")\n self.recalculate = False\n \n self.test_splitting = Splitting(splits=self.model.test_splits, data=self.model.data,\n number_tests=self.number_tests, target_ratio=self.target_ratio_test,\n cat_feature = self.model.cat_feature)\n self.__get_unique_index_splits()\n self.__evaluate_model()\n self.__initialize_confidence_filter()\n# self.__initialize_confidence_future_filter()\n self.__calculate_model()\n \n self.evaluation_overalls = self.__overall_metrics('evaluation')\n self.evaluation_accounts = self.__account_metrics('evaluation')\n self.confidence_overalls = self.__overall_metrics('confidence')\n self.confidence_accounts = self.__account_metrics('confidence')\n \n self.__update_model()\n self.__tips_useful_methods()",
"def train_and_evaluate_fold(self, x_train, y_train, x_test, y_test, classifier, index, data_balancer=None):\n if data_balancer is not None:\n x_train, y_train = data_balancer.fit_sample(x_train, y_train)\n\n # Training fold specific statistics\n verbose_print(\"\\n== Training Stats Fold {0} ==\".format(index + 1))\n verbose_print(\"Number of rows for training fold {0}: {1}\".format(index + 1, x_train.shape[0]))\n verbose_print(\"Number of defaulters for training fold {0}: {1}\".format(index + 1, y_train[y_train == 1].shape[0]))\n\n start_time = timer()\n classifier.fit(x_train, y_train)\n end_time = timer()\n fit_time = end_time - start_time\n\n # Testing fold specific statistics\n verbose_print(\"== Testing Stats Fold {0} ==\".format(index + 1))\n verbose_print(\"Number of rows for training fold {0}: {1}\".format(index + 1, len(y_test)))\n verbose_print(\"Number of defaulters for training fold {0}: {1}\".format(index + 1, np.count_nonzero(y_test == 1)))\n\n # Test accuracy\n test_classification = classifier.predict(x_test)\n test_classification = np.array(test_classification)\n test_classification = test_classification.flatten()\n\n try:\n test_probabilities = classifier.predict_proba(x_test)\n if len(test_probabilities[0]) < 2:\n raise RuntimeError(\"test probabilities is not correct length\")\n except Exception:\n test_probabilities = [[-1, -1]] * len(test_classification)\n\n outcome_decision_values = None\n try:\n predictions = classifier.predict_proba(x_test)\n outcome_decision_values = predictions[:, 1]\n except Exception as e:\n outcome_decision_values = None\n verbose_print(\"WARNING: unable to calculate classification accuracy - {0} - {1}\".format(classifier.__class__.__name__, e))\n\n fpr, tpr = None, None\n if outcome_decision_values is not None:\n try:\n fpr, tpr, _ = roc_curve(y_test, outcome_decision_values)\n fpr = fpr.tolist()\n tpr = tpr.tolist()\n except Exception as e:\n print(e)\n\n self.ml_stats.calculate_and_append_fold_accuracy(test_classification, y_test, tpr, fpr, fit_time, test_probabilities=test_probabilities)",
"def __main__():\n train_dataset, test_dataset, hot_dog_count = generate_dataset()\n\n scalar = StandardScaler()\n scaled_data_train = scalar.fit_transform(train_dataset)\n pca = PCA(n_components=3)\n pca.fit_transform(scaled_data_train)\n\n scaled_data_test = scalar.transform(test_dataset)\n pca.transform(scaled_data_test)\n y_pca = pca.transform(scaled_data_test)\n\n training_data = []\n print(hot_dog_count)\n for elements in train_dataset:\n if hot_dog_count > 0:\n training_data.append([elements, \"H\"])\n hot_dog_count -= 1\n else:\n training_data.append([elements, \"N\"])\n\n t = [1 for i in range(12)]\n for i in range(12):\n t.append(0)\n\n random.shuffle(training_data)\n T = y_pca\n print(\"train\", training_data[0])\n print(\"t\", T)\n obj = KNearestNeighbour(training_data, T, 3)\n obj.kNN()",
"def performClassification(X_train, y_train, X_test, y_test, method, parameters, fout, savemodel):\n\n if method == 'RF':\n return performRFClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel)\n\n elif method == 'KNN':\n return performKNNClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel)\n\n elif method == 'SVM':\n return performSVMClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel)\n\n elif method == 'ADA':\n return performAdaBoostClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel)\n\n elif method == 'GTB':\n return performGTBClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel)\n\n elif method == 'QDA':\n return performQDAClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function creates the table header based on the number of columns.
|
def _Header(numCols):
return "\\begin{center}\n\\begin{tabular}{" + "|c" * numCols + "|}\n"
|
[
"def tbl_header():\n header = ['REGION', 'DEL/DUP', 'CNV LENGTH', 'ZSCORE', 'MEAN DEPTH', 'NUMBER OF PROBES', 'TOTAL ALLELES',\n 'POP DEL COUNT', 'POP DEL AF', 'POP DUP COUNT', 'POP DUP AF', 'GENES']\n return header",
"def format_medical_table_headers(self):\n med_cols = ['B', 'C', 'D', 'E']\n for col in med_cols:\n cell = f'{col}{self.title_final_row + 1}'\n self.format_cell_as_header(cell)",
"def table_header(request):\n html = ''\n for field_name in settings.TABLE_COLUMNS:\n col_name = COLUMN_NAMES.get(field_name, None)\n if col_name is None:\n continue\n col_style = settings.COLUMN_STYLES.get(field_name, DEFAULT_CULUMN_STYLE)\n html += '<th data-width=\"{width}\" data-ds=\"{defaultstate}\" id=\"id-col-{col_name}\">{link}</th>'.format(\n width=col_style['width'],\n defaultstate=col_style['default_state'],\n col_name=col_name,\n link=sort_link(request, col_name, field_name))\n return html",
"def _generate_header(self):\n margin_str = ' ' * self.column_margin\n top = '┌'\n headings = '│'\n heading_sep = '╞'\n row_sep = '├'\n self._bottom = '└'\n for i, col in enumerate(self.columns, start=1):\n top += ('─' * (col.width + 2 * self.column_margin)\n + ('┐' if i == len(self.columns) else '┬'))\n headings += margin_str + col.get_header_cell() + margin_str + '│'\n heading_sep += ('═' * (col.width + 2 * self.column_margin)\n + ('╡' if i == len(self.columns) else '╪'))\n row_sep += ('─' * (col.width + 2 * self.column_margin)\n + ('┤' if i == len(self.columns) else '┼'))\n self._bottom += ('─' * (col.width + 2 * self.column_margin)\n + ('┘' if i == len(self.columns) else '┴'))\n if self.title:\n self._text_lines.append(self.title)\n self._text_lines.append(top)\n if self.include_headings:\n self._text_lines.append(headings)\n self._text_lines.append(heading_sep)\n self._row_separator = row_sep if self.use_row_separators else None",
"def createHeading(self):\n\t\tfieldNames = ['Year','Month','State','District']\n\t\tfor i in range(1,43):\n\t\t\tfieldNames.append('col '+str(i))\n\t\twith open(self.filepath, 'w') as PMGSYFile:\n\t\t\tcsvWriter = csv.writer(PMGSYFile)\n\t\t\tcsvWriter.writerow(fieldNames)\n\t\tPMGSYFile.close()",
"def make_source_table_header(self, current_results):\n table_header = ' <tr>\\n'\n table_header+= ' <th>Source_id</th>\\n'\n table_header+= ' <th>Source_name</th>\\n'\n table_header+= ' <th>Author</th>\\n'\n if jedli_global.include_len_in_table:\n table_header+= ' <th>Number of characters</th>\\n'\n table_header+= ' <th>Date AH</th>\\n'\n for word in sorted(current_results):\n table_header += ' <th>{}</th>\\n'.format(word)\n table_header += ' </tr>\\n'\n return table_header",
"def _create_columns(self):\n\n class_names = const.CO_TABLE_COLUMNS\n\n self._t_output.config(columns=class_names)\n\n for index in range(len(class_names)):\n col = '#' + str(index + 1)\n\n self._t_output.column(column=col,\n anchor='center',\n minwidth=150,\n stretch=True)\n\n self._t_output.heading(column=col,\n text=class_names[index])",
"def generate_headers(headers, periodic_table):\n\n\twith open(periodic_table) as file:\n\t\trows = csv.reader(file)\n\t\twhitespace = re.compile(r'\\s*')\n\t\tfor row in rows:\n\t\t\tif (rows.line_num == 1):\n\t\t\t\tcontinue\n\t\t\theaders.append(re.sub(whitespace, '', row[2]))",
"def buildDiffTableHeader(txn, table):\n heading = table.thead.tr\n heading.th('Begin Probe')\n heading.th('End Probe')\n heading.th('Wall Time')\n\n if txn.endpoint.pmcNames:\n for pmcName in txn.endpoint.pmcNames:\n heading.th(pmcName)\n\n if txn.endpoint.topdownValues:\n for value in txn.endpoint.topdownValues:\n heading.th(value.name)",
"def report_header(col_names, col_widths):\n s = \"\\nDonor Report\"\n s = \"{}\\n{}\".format(s, report_row_separator(\"=\", col_widths))\n s = \"{}\\n{}\".format(s, format_str(col_names[0], col_widths[0]))\n for n in range(1, len(col_names)):\n s = \"{} | {}\".format(s, format_str(col_names[n], col_widths[n]))\n s = \"{}\\n{}\".format(s, report_row_separator(\"-\", col_widths))\n return s",
"def html_table_header_row(data):\n html = '\\n\\t<tr>'\n\n for th in data:\n title = th.replace('_', ' ').title()\n html += '<th>{}</th>'.format(title)\n\n return html + '</tr>'",
"def print_header():\n header = \"| {:<18} | {:<18} | {:<21} | {:<21} |\".format(\"ROLL_NUMBER\",\n \"NAME\",\n \"DATE-OF-BIRTH\",\n \"REGISTRATION_DATE\")\n print(header, '\\n', \"_\"*(len(header)), \"\\n\")",
"def create_header(freqs):\n header = ''\n for i in range (len(freqs)):\n if freqs[i] != 0:\n header = header + str(i) + ' ' + str(freqs[i]) + ' '\n return header[:len(header) - 1]",
"def insert_column_a_financials_headers(self):\n header_values = ['Settlement Amount',\n 'Amount Stated to Providers',\n '1/3 of Settlement for Meds',\n 'Attorney Fees',\n '% to Business',\n 'OJ',\n 'Expenses',\n 'Total to Business', \n 'Net to Client']\n header_rows = range(self.title_final_row + 1, self.title_final_row + 1 + len(header_values) * 2, 2)\n\n for row, val in zip(header_rows, header_values):\n cell = f'A{row}'\n self.sheet[cell] = val\n self.format_cell_as_header(cell)",
"def synth_header(self):\n\n header = \"n,imbalanced,num_c,internoiselvl,intranoiselvl,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,kvs_sze,kvs_fsze,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_sze_GT,l2_fsze_GT,l1_sze_GT,l1_fsze_GT,l2_usze_G, th_usze_G,l2_ufsze_G, th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)",
"def header_format_xlsx(self, headers, header_format, total_row):\n header = [\n {\"header\": col, \"header_format\": header_format}\n for col in headers\n ]\n [header[i].update(total_row[i]) for i in range(len(total_row))]\n return header",
"def real_header(self):\n\n header = \"n,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_usze_G,th_usze_G,l2_ufsze_G,th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)",
"def print_headers():\n print(\"symbol\\t count\\t price\\t\\t total\")\n print(\"-\" * 71)",
"def createHeader(self, monthName):\n header = createText(self.marginl, self.calHeight, self.width, self.rowSize)\n self.applyTextToFrame(monthName, header)\n colCnt = 0\n for i in self.dayOrder:\n cel = createText(self.marginl + colCnt * self.colSize,\n self.calHeight + self.rowSize,\n self.colSize, self.rowSize)\n self.applyTextToFrame(i, cel)\n colCnt += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function creates the column header based on the list of strings that are passed in via the input strIn.
|
def _colHeader(strIn):
return " & ".join(strIn) + "\\\\\n"
|
[
"def format_medical_table_headers(self):\n med_cols = ['B', 'C', 'D', 'E']\n for col in med_cols:\n cell = f'{col}{self.title_final_row + 1}'\n self.format_cell_as_header(cell)",
"def _Header(numCols):\n return \"\\\\begin{center}\\n\\\\begin{tabular}{\" + \"|c\" * numCols + \"|}\\n\"",
"def generate_headers(headers, periodic_table):\n\n\twith open(periodic_table) as file:\n\t\trows = csv.reader(file)\n\t\twhitespace = re.compile(r'\\s*')\n\t\tfor row in rows:\n\t\t\tif (rows.line_num == 1):\n\t\t\t\tcontinue\n\t\t\theaders.append(re.sub(whitespace, '', row[2]))",
"def tbl_header():\n header = ['REGION', 'DEL/DUP', 'CNV LENGTH', 'ZSCORE', 'MEAN DEPTH', 'NUMBER OF PROBES', 'TOTAL ALLELES',\n 'POP DEL COUNT', 'POP DEL AF', 'POP DUP COUNT', 'POP DUP AF', 'GENES']\n return header",
"def create_header(freqs):\n header = ''\n for i in range (len(freqs)):\n if freqs[i] != 0:\n header = header + str(i) + ' ' + str(freqs[i]) + ' '\n return header[:len(header) - 1]",
"def parse_header(self, headlist):\n mapping = []\n found = []\n headlist = [cleancol.sub(\"_\", col) for col in headlist]\n logstr = \"\"\n for i, heading in enumerate(headlist):\n for key in (\n (heading, heading.lower()) if heading != heading.lower() else (heading,)\n ):\n if key in self.fieldmap:\n found.append(key.lower())\n field = self.fieldmap[key]\n key = self.check_fkey(key, field)\n mapping.append(\"column%s=%s\" % (i + 1, key))\n for key in headlist:\n if key.lower() not in found:\n logstr += \", %s\" % key\n if mapping:\n mappingstr = \",\".join(mapping)\n if logstr:\n logmsg = \"CSV header unmatched ignored cols = %s\" % logstr[2:]\n else:\n logmsg = \"CSV header matched all cols\"\n self.loglist.append(logmsg)\n return mappingstr\n return \"\"",
"def table_header(request):\n html = ''\n for field_name in settings.TABLE_COLUMNS:\n col_name = COLUMN_NAMES.get(field_name, None)\n if col_name is None:\n continue\n col_style = settings.COLUMN_STYLES.get(field_name, DEFAULT_CULUMN_STYLE)\n html += '<th data-width=\"{width}\" data-ds=\"{defaultstate}\" id=\"id-col-{col_name}\">{link}</th>'.format(\n width=col_style['width'],\n defaultstate=col_style['default_state'],\n col_name=col_name,\n link=sort_link(request, col_name, field_name))\n return html",
"def make_source_table_header(self, current_results):\n table_header = ' <tr>\\n'\n table_header+= ' <th>Source_id</th>\\n'\n table_header+= ' <th>Source_name</th>\\n'\n table_header+= ' <th>Author</th>\\n'\n if jedli_global.include_len_in_table:\n table_header+= ' <th>Number of characters</th>\\n'\n table_header+= ' <th>Date AH</th>\\n'\n for word in sorted(current_results):\n table_header += ' <th>{}</th>\\n'.format(word)\n table_header += ' </tr>\\n'\n return table_header",
"def get_header(header_lines_in): \n \n header = \"\"\n \n for line in header_lines_in[:4]:\n header += line\n \n return header",
"def add_column_names(self, *columns):\n\n next_column = len(self.header) + 1\n\n for column in columns:\n self.wsc.cell(row=1, column=next_column).value = column\n self.header[column] = next_column - 1\n next_column += 1",
"def insert_column_a_financials_headers(self):\n header_values = ['Settlement Amount',\n 'Amount Stated to Providers',\n '1/3 of Settlement for Meds',\n 'Attorney Fees',\n '% to Business',\n 'OJ',\n 'Expenses',\n 'Total to Business', \n 'Net to Client']\n header_rows = range(self.title_final_row + 1, self.title_final_row + 1 + len(header_values) * 2, 2)\n\n for row, val in zip(header_rows, header_values):\n cell = f'A{row}'\n self.sheet[cell] = val\n self.format_cell_as_header(cell)",
"def report_header(col_names, col_widths):\n s = \"\\nDonor Report\"\n s = \"{}\\n{}\".format(s, report_row_separator(\"=\", col_widths))\n s = \"{}\\n{}\".format(s, format_str(col_names[0], col_widths[0]))\n for n in range(1, len(col_names)):\n s = \"{} | {}\".format(s, format_str(col_names[n], col_widths[n]))\n s = \"{}\\n{}\".format(s, report_row_separator(\"-\", col_widths))\n return s",
"def _write_column_titles(self, column_names, extra_parameters):\n\n # Concatenate the lists\n columns = column_names + extra_parameters\n\n # Write the column titles\n self._csv_writer.writerow(columns)",
"def _generate_header(self):\n margin_str = ' ' * self.column_margin\n top = '┌'\n headings = '│'\n heading_sep = '╞'\n row_sep = '├'\n self._bottom = '└'\n for i, col in enumerate(self.columns, start=1):\n top += ('─' * (col.width + 2 * self.column_margin)\n + ('┐' if i == len(self.columns) else '┬'))\n headings += margin_str + col.get_header_cell() + margin_str + '│'\n heading_sep += ('═' * (col.width + 2 * self.column_margin)\n + ('╡' if i == len(self.columns) else '╪'))\n row_sep += ('─' * (col.width + 2 * self.column_margin)\n + ('┤' if i == len(self.columns) else '┼'))\n self._bottom += ('─' * (col.width + 2 * self.column_margin)\n + ('┘' if i == len(self.columns) else '┴'))\n if self.title:\n self._text_lines.append(self.title)\n self._text_lines.append(top)\n if self.include_headings:\n self._text_lines.append(headings)\n self._text_lines.append(heading_sep)\n self._row_separator = row_sep if self.use_row_separators else None",
"def create_csv_header():\n return 'cid,code,code_gold'",
"def build_column_index(headers, required_cols, optional_cols):\n index_d = {}\n\n def canon(s):\n \"\"\"\n Remove spaces, underscores, etc.\n \"\"\"\n return s.lower().replace(\" \", \"\").replace(\"_\",\"\")\n\n # Canoncize headers, including removing any Unicode BOM bytes.\n hd = [ canon(s.replace(u'\\ufeff','')) for s in headers ]\n \n for n in required_cols:\n cn = canon(n)\n assert cn in hd, \"Expected to find column name %s in CSV file, but only had %s\"%(n,headers)\n index_d[n] = hd.index(cn)\n\n for n in optional_cols:\n cn = canon(n)\n if cn in hd:\n index_d[n] = hd.index(cn)\n\n return index_d",
"def _create_columns(self):\n\n class_names = const.CO_TABLE_COLUMNS\n\n self._t_output.config(columns=class_names)\n\n for index in range(len(class_names)):\n col = '#' + str(index + 1)\n\n self._t_output.column(column=col,\n anchor='center',\n minwidth=150,\n stretch=True)\n\n self._t_output.heading(column=col,\n text=class_names[index])",
"def header_format_xlsx(self, headers, header_format, total_row):\n header = [\n {\"header\": col, \"header_format\": header_format}\n for col in headers\n ]\n [header[i].update(total_row[i]) for i in range(len(total_row))]\n return header",
"def getColumns(inFile, delim=tab, header=True):\r\n cols = {}\r\n indexToName = {}\r\n for lineNum, line in enumerate(inFile):\r\n if lineNum == 0:\r\n headings = line.strip().split(delim)\r\n k = 0\r\n for heading in headings:\r\n heading = heading.strip()\r\n if header:\r\n cols[heading] = []\r\n indexToName[k] = heading\r\n else:\r\n # in this case the heading is actually just a cell\r\n cols[k] = [heading]\r\n indexToName[k] = k\r\n k += 1\r\n else:\r\n cells = line.strip().split(delim)\r\n k = 0\r\n for cell in cells:\r\n cell = cell.strip()\r\n cols[indexToName[k]] += [cell]\r\n k += 1\r\n \r\n return cols, indexToName"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function transforms the 2D numpy array (arrIn) into latex tabular format. The "form" argument specifies the number format to be used in the tabular environment. The "rowHeader" argument is a list of strings that are used in the first column of each row in the tabular environment. The latex tabular environment is returned as a string.
|
def _Arr2Tab(arrIn, form="%.4E", rowHeader=None):
out = str()
if rowHeader is None:
if np.size(arrIn.shape) == 2:
numRows = arrIn.shape[0]
for k in range(numRows):
out += np.array2string(
arrIn[k, :], separator=" & ", formatter={"float_kind": lambda x: form % x}
).strip("[]")
out += "\\\\\n\\hline\n"
else:
out += np.array2string(
arrIn[k, :], separator=" & ", formatter={"float_kind": lambda x: form % x}
).strip("[]")
out += "\\\\\n"
else:
if np.size(arrIn.shape) == 2:
numRows = arrIn.shape[0]
for k in range(numRows):
out += rowHeader[k] + " & "
out += np.array2string(
arrIn[k, :], separator=" & ", formatter={"float_kind": lambda x: form % x}
).strip("[]")
out += "\\\\\n\\hline\n"
else:
out += np.array2string(
arrIn[k, :], separator=" & ", formatter={"float_kind": lambda x: form % x}
).strip("[]")
out += "\\\\\n"
return out.rstrip()
|
[
"def SimpleTable(arrIn, form=\"%.4E\", colHeader=None, rowHeader=None):\n\n if colHeader is None and rowHeader is None:\n return (\n table._Header(arrIn.shape[1])\n + \"\\\\hline\\n\"\n + table._Arr2Tab(arrIn, form=form)\n + \"\\n\"\n + table._Footer()\n )\n elif rowHeader is None:\n return (\n table._Header(arrIn.shape[1])\n + \"\\\\hline\\n\"\n + table._colHeader(colHeader)\n + \"\\\\hline\\n\"\n + table._Arr2Tab(arrIn, form=form)\n + \"\\n\"\n + table._Footer()\n )\n elif colHeader is None:\n return (\n table._Header(arrIn.shape[1] + 1)\n + \"\\\\hline\\n\"\n + table._Arr2Tab(arrIn, form=form, rowHeader=rowHeader)\n + \"\\n\"\n + table._Footer()\n )\n else:\n return (\n table._Header(arrIn.shape[1] + 1)\n + \"\\\\hline\\n\"\n + table._colHeader(colHeader)\n + \"\\\\hline\\n\"\n + table._Arr2Tab(arrIn, form=form, rowHeader=rowHeader)\n + \"\\n\"\n + table._Footer()\n )",
"def numpy2tex(array,\n filename = '',\n caption = '',\n label = '',\n columns = '',\n columnTitles = [],\n rowTitles = [],\n upperLeft = '',\n dataType = '|S100',\n option = '[h!]'):\n\n # convert any 2D iterable to np.array. Array content needs to be convertable to string\n array = np.array(array, dtype=dataType)\n if len(array.shape) != 2:\n print \"Works only for 2D Arrays. If you want to print an array of iterables (such as array of tuples)\" + \\\n \" to table, please convert it to an array of strings first.\"\n raise Exception()\n m, n = array.shape\n # todo: if array is 3D (e.g. when input array is 2D array of tuples), convert third dimension to\n # string and reshape to 2D\n # add titles for Columns, Rows and upper left cell, if given/neccessary\n cTitles = False\n if columnTitles:\n array = np.append(np.array(columnTitles)[np.newaxis, :], array, axis = 0)\n cTitles = True\n if rowTitles:\n if cTitles:\n rowTitles = np.append([upperLeft], rowTitles)[:, np.newaxis]\n columns = '|c' + columns\n array = np.append(rowTitles, array, axis = 1)\n\n # convert to tex string\n texString = \\\n \"\\\\begin{table}\" + option + \"\\n\" + \\\n \"\\\\centering\\n\" + \\\n \"\\\\begin{tabular}{\" + columns + \"}\\n\\\\hline\\n\"\n for line in array:\n for el in line[:-1]:\n texString += str(el) + \" & \"\n texString += str(line[-1]) + \" \\\\\\\\\\\\hline\\n\"\n texString += \\\n \"\\\\end{tabular}\\n\" + \\\n \"\\\\caption{\" + caption + \"}\\n\" + \\\n \"\\\\label{tab:\" + label + \"}\\n\" + \\\n \"\\\\end{table}\\n\"\n # write to file if specified and return string\n if filename:\n with open(filename, 'w') as f:\n f.write(texString)\n return texString",
"def html_table(matrix_or_array_like,\n float_fmt=None,\n raw=False,\n first_row_headers=False,\n caption=None,\n style=None,\n formatter=None):\n\n raw_table = matrix_or_array_like\n if not float_fmt:\n float_fmt = '%.2f'\n\n if not formatter:\n formatter = formatter_factory(default_fmt=float_fmt,\n outlier_fmt=float_fmt)\n\n if 'sympy.matrices' in str(type(matrix_or_array_like)):\n raw_table = array(raw_table)\n if style:\n html_table = ['<table style=\"%s\">' % style]\n else:\n html_table = ['<table>']\n if caption:\n html_table.append('<caption>%s</caption>' % caption)\n row_count = 0\n for row in raw_table:\n html_table.append('<tr>')\n for col in row:\n to_append = formatter(col)\n\n if first_row_headers and row_count == 0:\n html_table.append('<th>{0}</th>'.format(to_append))\n else:\n html_table.append('<td>{0}</td>'.format(to_append))\n\n html_table.append('</tr>')\n row_count += 1\n html_table.append('</table>')\n if raw:\n return ''.join(html_table)\n else:\n return HTML(''.join(html_table))",
"def to_latex(a):\n if len(a.shape) > 2:\n raise ValueError(\"bmatrix can at most display two dimensions\")\n lines = str(a).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\n rv = [r\"\\begin{bmatrix}\"]\n rv += [\n \" \" + \" & \".join([f\"{float(n):.3f}\" for n in l.split()]) + r\" \\\\\"\n for l in lines\n ]\n rv += [r\"\\end{bmatrix}\"]\n return \"\\n\".join(rv)",
"def tablify(moduleList):\n header = \"| Module | Year | Term | Prerequisites\\n|----|----|----|----\\n\"\n rows = \"\"\n for code in moduleList:\n rows += tableRow(modules[code]) # this is a fold...do it with functools\n return header + rows",
"def to_latex(df: pd.DataFrame, label=\"\", caption=\"\", **kwargs):\n kwargs[\"escape\"] = kwargs[\"escape\"] if \"escape\" in kwargs.keys() else False \n tabular_str = df.to_latex(**kwargs)\n\n str_map = {r\"\\toprule\": r\"\\hline\\hline\",\n r\"\\midrule\": r\"\\hline\",\n r\"\\bottomrule\": r\"\\hline\\hline\"}\n for old, new in str_map.items():\n tabular_str = tabular_str.replace(old, new)\n\n label = \"{\" + \"tab:\" + label + \"}\"\n caption = \"{\" + caption + \"}\"\n table_str = \"\\\\begin{table}[htb]\\n\" + \\\n f\"\\\\caption{caption}\\n\" + \\\n f\"\\\\label{label}\\n\" +\\\n tabular_str +\\\n \"\\\\end{table}\"\n\n print(table_str)\n return",
"def grid_to_latex(sudoku, param=0):\n s = ''\n for r in 'ABCDEFGHI':\n for c in '12345678':\n cell = r+c\n value = sudoku[cell]\n if len(value) > 1:\n s = s + '~' + ' & '\n else:\n s = s + value + ' & '\n value = sudoku[r + '9']\n if len(value) >1:\n s = s + '~' + '\\\\\\ \\n'\n else:\n s = s + value + '\\\\\\ \\n'\n if param == 1:\n s = s + '\\\\hline \\n'\n if r in 'CF':\n s = s + '\\\\hline \\n'\n return s",
"def to_latex(self,fn='tableone.tex'):\n tablefmt = 'latex'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))",
"def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '⟨' and '⟩'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '⟨'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'⟩'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht",
"def writeTab(header_rows,matrix,matrixFile,precision=4):\n \n nrows=len(header_rows)\n \n header_tabs=headers2tabs(header_rows)\n \n # interaction matrix output\n out_fh=gzip.open(matrixFile,\"wb\")\n \n format_func=(\"{:0.\"+str(precision)+\"f}\").format\n \n k=0\n \n for i in xrange(nrows):\n print(\"\\t\".join(header_tabs[i])+\"\\t\"+\"\\t\".join(map(format_func,matrix[i,:])),file=out_fh)\n \n out_fh.close()",
"def array_dict_table_printer(array, order=None, header=None, vertical=False):\n if array is None or array == []:\n return None\n \n # header\n if header is None:\n header = list(array[0].keys())\n\n if order is None:\n order = header\n\n if header is None:\n if vertical:\n \n x = PrettyTable()\n x.add_column(\"Item\", order)\n else:\n x = PrettyTable(order)\n else:\n if vertical:\n x = PrettyTable()\n x.add_column(\"Item\", header)\n else:\n x = PrettyTable(header)\n\n for element in array:\n values = []\n for key in order:\n try:\n tmp = str(element[key])\n except:\n tmp = ' '\n values.append(tmp)\n if vertical:\n x.add_column(\" \", values)\n else:\n x.add_row(values)\n x.align = \"l\"\n return x",
"def grading_to_html_table(gr):\n\n layers = [a for a in gr]\n layerstrs = [str(a) for a in gr]\n if len(gr.magma().gens()) == 1:\n layerstrs = [a.replace(\"(\", \"\").replace(\")\", \"\") for a in layerstrs]\n elif len(gr.magma().gens()) == 0:\n layerstrs = [a.replace(\"()\", \"0\") for a in layerstrs]\n htmlstr = '<table class=\"gradingarray\"><tr>'\n for a in layerstrs:\n htmlstr += '<td> %s </td>' % a\n htmlstr += '</tr><tr>'\n for a in layers:\n lstr = \", \".join([str(X) for X in gr[a]])\n htmlstr += '<td> <%s> </td>' % lstr\n htmlstr += '</tr></table>'\n\n return htmlstr",
"def to_latex_table(self, experiment, **kwargs):\n\n if 'caption' not in kwargs or kwargs['caption'] is None:\n caption_text = \"\\\\caption{Parameters for Axelrod Simulations for Experiment Name: \"\n caption_text += experiment\n caption_text += '}\\n'\n else:\n caption_text = '\\\\caption{'\n caption_text += kwargs['caption']\n caption_text += '}\\n'\n\n\n t = []\n t.append('\\\\begin{table}[h]\\n')\n t.append('\\\\begin{tabular}{|p{0.6\\\\textwidth}|p{0.4\\\\textwidth}|}\\n')\n t.append('\\\\hline\\n')\n t.append('\\\\textbf{Simulation Parameter} & \\\\textbf{Value or Values} \\\\\\\\ \\n')\n t.append('\\\\hline\\n')\n\n for var in self._get_public_variables():\n s = self.parameter_labels[var[0]]\n s += ' & '\n\n\n # need to know if var[1] is a single integer, or a list\n if hasattr(var[1], '__iter__'):\n s += ', '.join(map(str, var[1]))\n else:\n s += str(var[1])\n\n s += '\\\\\\\\ \\n'\n t.append(s)\n\n\n t.append('\\\\hline\\n')\n t.append('\\\\end{tabular}\\n')\n t.append(caption_text)\n t.append('\\\\label{tab:ctpy-sim-parameters}\\n')\n t.append('\\\\end{table}\\n')\n\n return ''.join(t)",
"def _tab_print_ ( t , title = '' , prefix = '' , alignment = 'll' , xfmt = '%+.5g' , yfmt = '%+-.5g' ) :\n rows = [ ('Abscissa' , 'Value' ) ] \n for i in range ( t.size() ) :\n x = t.x ( i )\n y = t.y ( i )\n row = xfmt % x, yfmt % y\n rows.append ( row )\n \n if not title : title = 'Interpolation Table' \n import ostap.logger.table as T\n return T.table ( rows , title = title , prefix = prefix , alignment = alignment )",
"def create_tex_table(dbs):\n obs, series, pts = get_ordered_series(dbs)\n\n head = r\"\"\"\\begin{center}\n\\begin{tabular}{l|c|c|c}\n\\hline\n\"\"\"\n head += r\"\"\"Year & Cases & median Attack Ratio $ $S_0$ \\\\\n\\hline\n\"\"\"\n bot = r\"\"\"\n\\hline\n\\end{tabular}\n\\end{center}\n \"\"\"\n body = r\"\"\n st = []\n # years = sorted(list(series.keys()))\n print (series.keys())\n for i, (Y, V) in enumerate(series.items()):\n cases = obs[Y].sum()\n first_week = V.index[0]\n s0 = array(series[Y].S.ix[first_week])\n try:\n ratio = 1.0*cases/s0\n body += Y + r\" & {:.3} & {:.2} ({:.2}-{:.2}) & {:.3}({:.2}-{:.2})\\\\\".format(cases*100, nanmedian(ratio),\n stats.scoreatpercentile(ratio, 2.5),\n stats.scoreatpercentile(ratio, 97.5),\n nanmedian(s0)*100,\n stats.scoreatpercentile(s0, 2.5)*100,\n stats.scoreatpercentile(s0, 97.2)*100\n )\n body += \"\\n\"\n except KeyError as e:\n print (Y, first_week, e)\n except ValueError as e:\n print (s0, e)\n\n return head + body + bot",
"def __latex__(self):\n a = self.MomentMatrix\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{bmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{bmatrix}']\n return '\\n'.join(rv)",
"def format_table(filename, savefilename, model=False, bracket=[\"(\", \")\"], LaTeX=True):\n\tif type(filename) == str:\n\t\tm = dataobj(filename)\n\telif type(filename).__name__ != \"dataobj\":\n\t\tprint(\"File {0} is not being supported.\".format(filename))\n\tN,nvar = m.data.shape\n\tnmodel = N/2 \n\tcoef = m.data[0:nmodel]\n\tstd = m.data[nmodel:N] \n\tcoefs = [[\"{0:.3f}\".format(i) for i in l] for l in coef.transpose()]\n\tstds = [[\"{0:.3f}\".format(i) for i in l] for l in std.transpose()]\n\ts = \"\"\n\tif LaTeX==False:\n\t\tif model != False:\n\t\t\ts = \"&\" + \"&\".join(model)+\" \\n\"\n\t\tfor i in range(0,nvar):\n\t\t\ts = s+m.date[i]+\"&\"+\"&\".join([j for j in coefs[i]]) + \"\\n\" + \"&\"+\"&\".join([bracket[0]+k+bracket[1] for k in stds[i]])+\"\\n\"\n\telse: \n\t\tfor i in range(0,nvar):\n\t\t\ts = s+m.date[i]+\"&\"+\"&\".join([j for j in coefs[i]]) + \"\\\\\\\\\\n\" + \"&\"+\"&\".join([bracket[0]+k+bracket[1] for k in stds[i]])+\"\\\\\\\\\\n\"\n\t\theader = \"\\\\begin{tabular}{c|\"\n\t\tfor i in range(0,nmodel):\n\t\t\theader = header + \"c\"\n\t\theader= header + \"|} \\n \\\\hline\"\n\t\tif model != False:\n\t\t\theader = header + \"&\" + \"&\".join(model)+\"\\\\\\\\ \\\\hline \\n\"\n\t\tend = \"\\hline\\\\end{tabular}\"\n\t\ts = header + s + end\n\tf = open(savefilename, \"w\")\n\tf.write(s)\n\tf.close()",
"def build_table(headings, data, caption='',label=''):\n preamble = []\n preamble.append(r'\\begin{table}[!t]')\n preamble.append(r'\\caption{' + caption + r'}')\n preamble.append(r'\\label{' + label + r'}')\n preamble.append(r'\\centering')\n preamble.append(r'\\begin{tabular}{' + ' '.join('r' *\n len(headings)) +\n '}')\n\n heading = []\n heading.append(r'\\hline')\n heading.append(r' & '.join(headings) + r'\\\\')\n heading.append(r'\\hline')\n\n body = []\n for d in data:\n body.append(r' & '.join(d) + r'\\\\')\n\n postamble = []\n postamble.append(r'\\hline')\n postamble.append(r'\\end{tabular}')\n postamble.append(r'\\end{table}')\n\n table = ''\n table += '\\n'.join(preamble) + '\\n'\n table += '\\n'.join(heading) + '\\n'\n table += '\\n'.join(body) + '\\n'\n table += '\\n'.join(postamble)\n\n return table",
"def get_table_from_dict(dict_):\n dept, n_cols = get_dept(dict_)\n if dept is 0:\n return \"Not a neasted dictionary.\"\n\n dict_ = {'dict': dict_}\n\n dept, n_cols = get_dept(dict_)\n\n latex_code = '\\\\begin{tabular}{|'\n for i in range(n_cols):\n latex_code += 'c|'\n latex_code += '}\\n \\\\hline \\n'\n\n\n sub_dict = dict_\n lines = [''] * dept\n for level in range(dept-1):\n dict_tmp = dict_\n mult_k_cols = n_cols\n\n key_levels = [dict_tmp.keys()]\n # n_k_cols = len(dict_tmp.keys())\n\n\n for i in range(level):\n dict_tmp = dict_tmp[dict_tmp.keys()[0]]\n key_levels += [dict_tmp.keys()]\n\n\n key_pool = itertools.product(*key_levels)\n\n\n\n for i,comb in enumerate(key_pool):\n dict_tmp = dict_\n for key_i in comb:\n dict_tmp = dict_tmp[key_i]\n n_k_cols = len(dict_tmp.keys())\n mult_k_cols /= n_k_cols\n\n lines[level] += get_dict_line(dict_tmp, mult_k_cols)\n mult_k_cols = n_cols\n\n for l in lines:\n li = l.rsplit('&', 1)\n l = ' '.join(li)\n latex_code += l + '\\\\\\\\ \\n \\\\hline \\n'\n\n latex_code += '\\\\end{tabular}'\n return latex_code"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function creates the footer for the latex table.
|
def _Footer():
return "\\end{tabular}\n\\end{center}"
|
[
"def print_table_footer():\n\n print('''\\\\bottomrule\n\\\\end{tabular}\n}\n\\\\end{center}\n\\\\end{table}\\n\\n''')",
"def print_latex_footer():\n print(\n \"\"\"\\\\bottomrule\n\\\\end{tabular}\n\\\\end{center}\n\\\\end{Large}\n\\\\end{document}\"\"\"\n )",
"def generate_footer_html(self):\n footer = '<td colspan=\"' + str(self.num_models + 1) + '\" style=\"border-bottom: 1px solid black\"></td></tr>'\n\n if not self.show_footer:\n return footer\n footer += self.generate_observations_html()\n footer += self.generate_r2_html()\n footer += self.generate_r2_adj_html()\n if self.show_residual_std_err:\n footer += self.generate_resid_std_err_html()\n if self.show_f_statistic:\n footer += self.generate_f_statistic_html()\n footer += '<tr><td colspan=\"' + str(self.num_models + 1) + '\" style=\"border-bottom: 1px solid black\"></td></tr>'\n footer += self.generate_notes_html()\n footer += '</table>'\n\n return footer",
"def WriteFooter(self):\n pass",
"def add_footer(table: LongTable, len_data: int):\n\n table_style = TableStyle([('FONTNAME', (0, len_data - 2), (-1, len_data - 1), 'Courier-Bold'),\n ('BACKGROUND', (0, len_data - 2), (-1, len_data - 1), colors.sandybrown)\n ])\n table.setStyle(table_style)",
"def footer(self):\n output = \"\"\"\n</body> \n</html> \n\"\"\"\n return output",
"def create_footer_from_template(self):\r\n self.footer_lines = []\r\n for line in _Templates().footer.split(\"\\n\"):\r\n line = line.replace(\" \", \"\\t\")\r\n self.footer_lines.append(line + \"\\n\")",
"def draw_footer(canvas,pdf):\n oBank=get_object_or_404(SYSTEM_BANK, pk=1)\n oCompany=get_object_or_404(SYSTEM_MAIN, pk=1)\n oCompany=get_object_or_404(SYSTEM_MAIN, pk=1)\n note = (\n u'Bank Details: '+oBank.BankName+'',\n u'Sort Code: '+oBank.BankSortCode+' Account No: '+oBank.BankAccountNo+' (Quote invoice number).',\n u\"Please pay via bank transfer or cheque. All payments should be made in \"+oBank.Curency.Name+\"'s.\",\n u'Make cheques payable to '+oCompany.CompanyName+'.',\n )\n textobject = canvas.beginText(1 * cm, -27 * cm)\n for line in note:\n textobject.textLine(line)\n canvas.drawText(textobject)",
"def produce_header_footer():\n header = pl.PageStyle(\"header\", header_thickness=0.1)\n\n image_filename = get_image()\n with header.create(pl.Head(\"L\")) as logo:\n logo.append(pl.StandAloneGraphic(image_options=\"width=110px\", filename=image_filename))\n\n # Date\n with header.create(pl.Head(\"R\")):\n header.append(\"Date Report Issued: \" + datetime.today().strftime('%Y-%m-%d'))\n\n # Footer\n with header.create(pl.Foot(\"C\")):\n with header.create(pl.Tabular('lcr')) as table:\n table.add_row('', bold('Data interpretation guidelines can be found in RDIMS document ID: 10401305'), '')\n table.add_row('', bold('This report was generated with OLC AutoROGA v0.0.1'), '')\n return header",
"def create_tex_table(dbs):\n obs, series, pts = get_ordered_series(dbs)\n\n head = r\"\"\"\\begin{center}\n\\begin{tabular}{l|c|c|c}\n\\hline\n\"\"\"\n head += r\"\"\"Year & Cases & median Attack Ratio $ $S_0$ \\\\\n\\hline\n\"\"\"\n bot = r\"\"\"\n\\hline\n\\end{tabular}\n\\end{center}\n \"\"\"\n body = r\"\"\n st = []\n # years = sorted(list(series.keys()))\n print (series.keys())\n for i, (Y, V) in enumerate(series.items()):\n cases = obs[Y].sum()\n first_week = V.index[0]\n s0 = array(series[Y].S.ix[first_week])\n try:\n ratio = 1.0*cases/s0\n body += Y + r\" & {:.3} & {:.2} ({:.2}-{:.2}) & {:.3}({:.2}-{:.2})\\\\\".format(cases*100, nanmedian(ratio),\n stats.scoreatpercentile(ratio, 2.5),\n stats.scoreatpercentile(ratio, 97.5),\n nanmedian(s0)*100,\n stats.scoreatpercentile(s0, 2.5)*100,\n stats.scoreatpercentile(s0, 97.2)*100\n )\n body += \"\\n\"\n except KeyError as e:\n print (Y, first_week, e)\n except ValueError as e:\n print (s0, e)\n\n return head + body + bot",
"def _add_footer(self, line, align, text, *extra):\n k = self._text_width() - 1 - len(text)\n pos = max(0, (0 if align == \"left\" else (k if align == \"right\" else k //2 )))\n self._footer.addnstr(line, pos, text, self._text_width() - 1 - pos, *extra)",
"def build_footer():\n return html.Div(\n children=[\n html.P(\"-MH- 2020\"),\n html.P(\"This app is dedicated to my precious baby girl\"),\n ],\n className=\"footer\",\n )",
"def createFileFooter(self):\n import_file_desc_h = open('xml_footer.txt', 'r')\n readlines = import_file_desc_h.read()\n self.fileDesXmlData.write(readlines)\n import_file_desc_h.close()",
"def close_show():\n\n return \"</tbody></table></br>\"",
"def footer_section():\n LOGGER.info(\"Generating post footer section...\")\n section = (\n \"\"\n \"<br><br>## First Time Contributing in [Utopian.io](https://join.utopian.io/)?\"\n \"<br><br><a href="https://join.utopian.io/guidelines">Learn how to contribute on our website</a>\"\n \"<br><br><center><iframe width="560" height="315" src="https://www.youtube.com/embed/8S1AtrzYY1Q" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe></center>\"\n \"<br><br><center><a href="https://discord.gg/h52nFrV"><img src="https://cdn.discordapp.com/attachments/396653220702978049/452918421235957763/footer_558.png" /></a></center>\"\n \"<br><br><center><h4><a href="https://steemconnect.com/sign/account-witness-vote?witness=utopian-io&approve=1">Vote for the Utopian Witness</a></h4></center>\"\n )\n return section",
"def add_padded_header_footer_columns(table: LongTable, len_data: int) -> None:\n table_style = TableStyle(\n [('BACKGROUND', (0, 0), (-1, 0), colors.wheat),\n ('TEXTCOLOR', (0, 0), (-1, 0), colors.black),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('FONTNAME', (0, 0), (-1, 0), 'Courier-Bold'),\n ('FONTSIZE', (0, 0), (-1, 0), 10),\n ('BOTTOMPADDING', (0, 0), (-1, 0), 10),\n ('BACKGROUND', (0, 1), (-1, 0), colors.beige)\n ]\n )\n\n table.setStyle(table_style)",
"def add_footer_plantuml(input: str) -> str:\n\n output = input + \"\"\"\n}\n@enduml\n\"\"\"\n return output",
"def print_footer():\n sys.stdout.write(linesep + linesep)\n sys.stdout.write('longest file: %s' % globals.longest_file + linesep)\n sys.stdout.write('deepest path: %s' % globals.deepest_path + linesep)",
"def _get_estimation_table_body_and_footer(\n models,\n column_names,\n column_groups,\n custom_param_names,\n custom_index_names,\n significance_levels,\n stats_options,\n show_col_names,\n show_col_groups,\n show_stars,\n show_inference,\n confidence_intervals,\n number_format,\n add_trailing_zeros,\n):\n body, max_trail = _build_estimation_table_body(\n models,\n column_names,\n column_groups,\n custom_param_names,\n custom_index_names,\n show_col_names,\n show_col_groups,\n show_inference,\n show_stars,\n confidence_intervals,\n significance_levels,\n number_format,\n add_trailing_zeros,\n )\n footer = _build_estimation_table_footer(\n models,\n stats_options,\n significance_levels,\n show_stars,\n number_format,\n add_trailing_zeros,\n max_trail,\n )\n footer.columns = body.columns\n return body, footer"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function creates a simple latex table for the 2D numpy array arrIn. The "form" argument specifies the number format to be used in the tabular environment. The "colHeader" arugment is a list of strings that are used as the first row in the tabular environment. The "rowHeader" argument is a list of strings that are used in the first column of each row in the tabular environment. The latex tabular environment is returned as a string.
|
def SimpleTable(arrIn, form="%.4E", colHeader=None, rowHeader=None):
if colHeader is None and rowHeader is None:
return (
table._Header(arrIn.shape[1])
+ "\\hline\n"
+ table._Arr2Tab(arrIn, form=form)
+ "\n"
+ table._Footer()
)
elif rowHeader is None:
return (
table._Header(arrIn.shape[1])
+ "\\hline\n"
+ table._colHeader(colHeader)
+ "\\hline\n"
+ table._Arr2Tab(arrIn, form=form)
+ "\n"
+ table._Footer()
)
elif colHeader is None:
return (
table._Header(arrIn.shape[1] + 1)
+ "\\hline\n"
+ table._Arr2Tab(arrIn, form=form, rowHeader=rowHeader)
+ "\n"
+ table._Footer()
)
else:
return (
table._Header(arrIn.shape[1] + 1)
+ "\\hline\n"
+ table._colHeader(colHeader)
+ "\\hline\n"
+ table._Arr2Tab(arrIn, form=form, rowHeader=rowHeader)
+ "\n"
+ table._Footer()
)
|
[
"def _Arr2Tab(arrIn, form=\"%.4E\", rowHeader=None):\n out = str()\n if rowHeader is None:\n if np.size(arrIn.shape) == 2:\n numRows = arrIn.shape[0]\n for k in range(numRows):\n out += np.array2string(\n arrIn[k, :], separator=\" & \", formatter={\"float_kind\": lambda x: form % x}\n ).strip(\"[]\")\n out += \"\\\\\\\\\\n\\\\hline\\n\"\n else:\n out += np.array2string(\n arrIn[k, :], separator=\" & \", formatter={\"float_kind\": lambda x: form % x}\n ).strip(\"[]\")\n out += \"\\\\\\\\\\n\"\n else:\n if np.size(arrIn.shape) == 2:\n numRows = arrIn.shape[0]\n for k in range(numRows):\n out += rowHeader[k] + \" & \"\n out += np.array2string(\n arrIn[k, :], separator=\" & \", formatter={\"float_kind\": lambda x: form % x}\n ).strip(\"[]\")\n out += \"\\\\\\\\\\n\\\\hline\\n\"\n else:\n out += np.array2string(\n arrIn[k, :], separator=\" & \", formatter={\"float_kind\": lambda x: form % x}\n ).strip(\"[]\")\n out += \"\\\\\\\\\\n\"\n return out.rstrip()",
"def numpy2tex(array,\n filename = '',\n caption = '',\n label = '',\n columns = '',\n columnTitles = [],\n rowTitles = [],\n upperLeft = '',\n dataType = '|S100',\n option = '[h!]'):\n\n # convert any 2D iterable to np.array. Array content needs to be convertable to string\n array = np.array(array, dtype=dataType)\n if len(array.shape) != 2:\n print \"Works only for 2D Arrays. If you want to print an array of iterables (such as array of tuples)\" + \\\n \" to table, please convert it to an array of strings first.\"\n raise Exception()\n m, n = array.shape\n # todo: if array is 3D (e.g. when input array is 2D array of tuples), convert third dimension to\n # string and reshape to 2D\n # add titles for Columns, Rows and upper left cell, if given/neccessary\n cTitles = False\n if columnTitles:\n array = np.append(np.array(columnTitles)[np.newaxis, :], array, axis = 0)\n cTitles = True\n if rowTitles:\n if cTitles:\n rowTitles = np.append([upperLeft], rowTitles)[:, np.newaxis]\n columns = '|c' + columns\n array = np.append(rowTitles, array, axis = 1)\n\n # convert to tex string\n texString = \\\n \"\\\\begin{table}\" + option + \"\\n\" + \\\n \"\\\\centering\\n\" + \\\n \"\\\\begin{tabular}{\" + columns + \"}\\n\\\\hline\\n\"\n for line in array:\n for el in line[:-1]:\n texString += str(el) + \" & \"\n texString += str(line[-1]) + \" \\\\\\\\\\\\hline\\n\"\n texString += \\\n \"\\\\end{tabular}\\n\" + \\\n \"\\\\caption{\" + caption + \"}\\n\" + \\\n \"\\\\label{tab:\" + label + \"}\\n\" + \\\n \"\\\\end{table}\\n\"\n # write to file if specified and return string\n if filename:\n with open(filename, 'w') as f:\n f.write(texString)\n return texString",
"def html_table(matrix_or_array_like,\n float_fmt=None,\n raw=False,\n first_row_headers=False,\n caption=None,\n style=None,\n formatter=None):\n\n raw_table = matrix_or_array_like\n if not float_fmt:\n float_fmt = '%.2f'\n\n if not formatter:\n formatter = formatter_factory(default_fmt=float_fmt,\n outlier_fmt=float_fmt)\n\n if 'sympy.matrices' in str(type(matrix_or_array_like)):\n raw_table = array(raw_table)\n if style:\n html_table = ['<table style=\"%s\">' % style]\n else:\n html_table = ['<table>']\n if caption:\n html_table.append('<caption>%s</caption>' % caption)\n row_count = 0\n for row in raw_table:\n html_table.append('<tr>')\n for col in row:\n to_append = formatter(col)\n\n if first_row_headers and row_count == 0:\n html_table.append('<th>{0}</th>'.format(to_append))\n else:\n html_table.append('<td>{0}</td>'.format(to_append))\n\n html_table.append('</tr>')\n row_count += 1\n html_table.append('</table>')\n if raw:\n return ''.join(html_table)\n else:\n return HTML(''.join(html_table))",
"def to_latex(a):\n if len(a.shape) > 2:\n raise ValueError(\"bmatrix can at most display two dimensions\")\n lines = str(a).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\n rv = [r\"\\begin{bmatrix}\"]\n rv += [\n \" \" + \" & \".join([f\"{float(n):.3f}\" for n in l.split()]) + r\" \\\\\"\n for l in lines\n ]\n rv += [r\"\\end{bmatrix}\"]\n return \"\\n\".join(rv)",
"def array_dict_table_printer(array, order=None, header=None, vertical=False):\n if array is None or array == []:\n return None\n \n # header\n if header is None:\n header = list(array[0].keys())\n\n if order is None:\n order = header\n\n if header is None:\n if vertical:\n \n x = PrettyTable()\n x.add_column(\"Item\", order)\n else:\n x = PrettyTable(order)\n else:\n if vertical:\n x = PrettyTable()\n x.add_column(\"Item\", header)\n else:\n x = PrettyTable(header)\n\n for element in array:\n values = []\n for key in order:\n try:\n tmp = str(element[key])\n except:\n tmp = ' '\n values.append(tmp)\n if vertical:\n x.add_column(\" \", values)\n else:\n x.add_row(values)\n x.align = \"l\"\n return x",
"def build_table(headings, data, caption='',label=''):\n preamble = []\n preamble.append(r'\\begin{table}[!t]')\n preamble.append(r'\\caption{' + caption + r'}')\n preamble.append(r'\\label{' + label + r'}')\n preamble.append(r'\\centering')\n preamble.append(r'\\begin{tabular}{' + ' '.join('r' *\n len(headings)) +\n '}')\n\n heading = []\n heading.append(r'\\hline')\n heading.append(r' & '.join(headings) + r'\\\\')\n heading.append(r'\\hline')\n\n body = []\n for d in data:\n body.append(r' & '.join(d) + r'\\\\')\n\n postamble = []\n postamble.append(r'\\hline')\n postamble.append(r'\\end{tabular}')\n postamble.append(r'\\end{table}')\n\n table = ''\n table += '\\n'.join(preamble) + '\\n'\n table += '\\n'.join(heading) + '\\n'\n table += '\\n'.join(body) + '\\n'\n table += '\\n'.join(postamble)\n\n return table",
"def create_tex_table(dbs):\n obs, series, pts = get_ordered_series(dbs)\n\n head = r\"\"\"\\begin{center}\n\\begin{tabular}{l|c|c|c}\n\\hline\n\"\"\"\n head += r\"\"\"Year & Cases & median Attack Ratio $ $S_0$ \\\\\n\\hline\n\"\"\"\n bot = r\"\"\"\n\\hline\n\\end{tabular}\n\\end{center}\n \"\"\"\n body = r\"\"\n st = []\n # years = sorted(list(series.keys()))\n print (series.keys())\n for i, (Y, V) in enumerate(series.items()):\n cases = obs[Y].sum()\n first_week = V.index[0]\n s0 = array(series[Y].S.ix[first_week])\n try:\n ratio = 1.0*cases/s0\n body += Y + r\" & {:.3} & {:.2} ({:.2}-{:.2}) & {:.3}({:.2}-{:.2})\\\\\".format(cases*100, nanmedian(ratio),\n stats.scoreatpercentile(ratio, 2.5),\n stats.scoreatpercentile(ratio, 97.5),\n nanmedian(s0)*100,\n stats.scoreatpercentile(s0, 2.5)*100,\n stats.scoreatpercentile(s0, 97.2)*100\n )\n body += \"\\n\"\n except KeyError as e:\n print (Y, first_week, e)\n except ValueError as e:\n print (s0, e)\n\n return head + body + bot",
"def tablify(moduleList):\n header = \"| Module | Year | Term | Prerequisites\\n|----|----|----|----\\n\"\n rows = \"\"\n for code in moduleList:\n rows += tableRow(modules[code]) # this is a fold...do it with functools\n return header + rows",
"def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '⟨' and '⟩'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '⟨'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'⟩'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht",
"def _Header(numCols):\n return \"\\\\begin{center}\\n\\\\begin{tabular}{\" + \"|c\" * numCols + \"|}\\n\"",
"def _tab_print_ ( t , title = '' , prefix = '' , alignment = 'll' , xfmt = '%+.5g' , yfmt = '%+-.5g' ) :\n rows = [ ('Abscissa' , 'Value' ) ] \n for i in range ( t.size() ) :\n x = t.x ( i )\n y = t.y ( i )\n row = xfmt % x, yfmt % y\n rows.append ( row )\n \n if not title : title = 'Interpolation Table' \n import ostap.logger.table as T\n return T.table ( rows , title = title , prefix = prefix , alignment = alignment )",
"def create_table(small_dict):\r\n keys, values = tuple(zip(*small_dict.items()))\r\n table = tabulate(\r\n [values],\r\n headers=keys,\r\n tablefmt=\"pipe\",\r\n floatfmt=\".3f\",\r\n stralign=\"center\",\r\n numalign=\"center\",\r\n )\r\n return table",
"def format_table(filename, savefilename, model=False, bracket=[\"(\", \")\"], LaTeX=True):\n\tif type(filename) == str:\n\t\tm = dataobj(filename)\n\telif type(filename).__name__ != \"dataobj\":\n\t\tprint(\"File {0} is not being supported.\".format(filename))\n\tN,nvar = m.data.shape\n\tnmodel = N/2 \n\tcoef = m.data[0:nmodel]\n\tstd = m.data[nmodel:N] \n\tcoefs = [[\"{0:.3f}\".format(i) for i in l] for l in coef.transpose()]\n\tstds = [[\"{0:.3f}\".format(i) for i in l] for l in std.transpose()]\n\ts = \"\"\n\tif LaTeX==False:\n\t\tif model != False:\n\t\t\ts = \"&\" + \"&\".join(model)+\" \\n\"\n\t\tfor i in range(0,nvar):\n\t\t\ts = s+m.date[i]+\"&\"+\"&\".join([j for j in coefs[i]]) + \"\\n\" + \"&\"+\"&\".join([bracket[0]+k+bracket[1] for k in stds[i]])+\"\\n\"\n\telse: \n\t\tfor i in range(0,nvar):\n\t\t\ts = s+m.date[i]+\"&\"+\"&\".join([j for j in coefs[i]]) + \"\\\\\\\\\\n\" + \"&\"+\"&\".join([bracket[0]+k+bracket[1] for k in stds[i]])+\"\\\\\\\\\\n\"\n\t\theader = \"\\\\begin{tabular}{c|\"\n\t\tfor i in range(0,nmodel):\n\t\t\theader = header + \"c\"\n\t\theader= header + \"|} \\n \\\\hline\"\n\t\tif model != False:\n\t\t\theader = header + \"&\" + \"&\".join(model)+\"\\\\\\\\ \\\\hline \\n\"\n\t\tend = \"\\hline\\\\end{tabular}\"\n\t\ts = header + s + end\n\tf = open(savefilename, \"w\")\n\tf.write(s)\n\tf.close()",
"def grid_to_latex(sudoku, param=0):\n s = ''\n for r in 'ABCDEFGHI':\n for c in '12345678':\n cell = r+c\n value = sudoku[cell]\n if len(value) > 1:\n s = s + '~' + ' & '\n else:\n s = s + value + ' & '\n value = sudoku[r + '9']\n if len(value) >1:\n s = s + '~' + '\\\\\\ \\n'\n else:\n s = s + value + '\\\\\\ \\n'\n if param == 1:\n s = s + '\\\\hline \\n'\n if r in 'CF':\n s = s + '\\\\hline \\n'\n return s",
"def to_latex(self,fn='tableone.tex'):\n tablefmt = 'latex'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))",
"def generate_table_report(self):\n # create header row\n html_content = \"<table cellspacing='{0}' border='0'>\".format(self.cellspacing)\n html_content += \"<tr style='font-size:{0}pt; font-family:{1}; color:{2};'>\".format(\n self.font_size_heading_2,\n self.font_family,\n pyani.core.ui.CYAN\n )\n\n if not self.headings:\n self.headings = [\"Could not build headings\"]\n self.col_widths = [\"100\"]\n self.data = [\"Heading build error, could not construct data portion of table.\"]\n\n for index, heading in enumerate(self.headings):\n html_content += \"<td width='{0}%'>\".format(self.col_widths[index])\n html_content += heading\n html_content += \"</td>\"\n html_content += \"</tr>\"\n\n # add spacer row\n html_content += \"<tr>\"\n for _ in self.headings:\n html_content += \"</td> </td>\"\n html_content += \"</tr>\"\n\n if self.data:\n for data in self.data:\n html_content += \"<tr style='font-size:{0}pt; font-family:{1}; color: #ffffff;'>\".format(\n self.font_size_body,\n self.font_family\n )\n for item in data:\n html_content += \"<td>\"\n html_content += item\n html_content += \"</td>\"\n html_content += \"</tr>\"\n\n html_content += \"</table>\"\n self.show_content(html_content)",
"def table_printer(the_dict, header_info=None):\n # header_info [\"attribute\", \"value\"]\n if (header_info is not None) or (header_info == \"\"):\n result = '<tr><th>{0}</th><th>{1}</th></tr>'\\\n .format(header_info[0], header_info[1])\n else:\n result = ''\n if isinstance(the_dict, dict):\n for name, value in the_dict.items():\n result = result + \\\n '<tr><td>{0}</td><td>{1}</td></tr>'\\\n .format(name.title(), str(table_printer(value)))\n result = '<table>' + result + '</table>'\n return result\n elif isinstance(the_dict, list):\n for element in the_dict:\n try:\n for name, value in element.items():\n result = result + \\\n '<tr><td>{0}</td><td>{1}</td></tr>'\\\n .format(name.title(), str(table_printer(value)))\n except:\n # If the element is not dict\n return str(element)\n result = '<table>' + result + '</table>'\n return result\n else:\n return the_dict",
"def get_table_from_dict(dict_):\n dept, n_cols = get_dept(dict_)\n if dept is 0:\n return \"Not a neasted dictionary.\"\n\n dict_ = {'dict': dict_}\n\n dept, n_cols = get_dept(dict_)\n\n latex_code = '\\\\begin{tabular}{|'\n for i in range(n_cols):\n latex_code += 'c|'\n latex_code += '}\\n \\\\hline \\n'\n\n\n sub_dict = dict_\n lines = [''] * dept\n for level in range(dept-1):\n dict_tmp = dict_\n mult_k_cols = n_cols\n\n key_levels = [dict_tmp.keys()]\n # n_k_cols = len(dict_tmp.keys())\n\n\n for i in range(level):\n dict_tmp = dict_tmp[dict_tmp.keys()[0]]\n key_levels += [dict_tmp.keys()]\n\n\n key_pool = itertools.product(*key_levels)\n\n\n\n for i,comb in enumerate(key_pool):\n dict_tmp = dict_\n for key_i in comb:\n dict_tmp = dict_tmp[key_i]\n n_k_cols = len(dict_tmp.keys())\n mult_k_cols /= n_k_cols\n\n lines[level] += get_dict_line(dict_tmp, mult_k_cols)\n mult_k_cols = n_cols\n\n for l in lines:\n li = l.rsplit('&', 1)\n l = ' '.join(li)\n latex_code += l + '\\\\\\\\ \\n \\\\hline \\n'\n\n latex_code += '\\\\end{tabular}'\n return latex_code",
"def grading_to_html_table(gr):\n\n layers = [a for a in gr]\n layerstrs = [str(a) for a in gr]\n if len(gr.magma().gens()) == 1:\n layerstrs = [a.replace(\"(\", \"\").replace(\")\", \"\") for a in layerstrs]\n elif len(gr.magma().gens()) == 0:\n layerstrs = [a.replace(\"()\", \"0\") for a in layerstrs]\n htmlstr = '<table class=\"gradingarray\"><tr>'\n for a in layerstrs:\n htmlstr += '<td> %s </td>' % a\n htmlstr += '</tr><tr>'\n for a in layers:\n lstr = \", \".join([str(X) for X in gr[a]])\n htmlstr += '<td> <%s> </td>' % lstr\n htmlstr += '</tr></table>'\n\n return htmlstr"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Take a source hdf5 file and a set of datasets and produce a dest hdf5 file that contains only those datasets and that has been repacked.
|
def convert_and_copy( src, dest, datasets, srsly=False ):
if not os.path.isfile(src):
return -1
temp = tempfile.NamedTemporaryFile()
for dset in datasets:
### copy only the relevant datasets
cmd_args = ["h5copy", "-i", src, "-o", temp.name, "-s", dset, "-d", dset, "-p"]
if args.dryrun:
print ' '.join(cmd_args)
ret = 0
else:
ret = subprocess.call( cmd_args )
cmd_args = ["h5repack", "-L", "-v", "-f", "GZIP=1", temp.name, dest]
if args.dryrun:
print ' '.join(cmd_args)
ret = 0
else:
ret += subprocess.call( cmd_args )
temp.close()
return ret
|
[
"def pack(name, f_name, img_size=(227,227),\n\t\tgreyscale=False, flatten=False, istest=False):\n\t \n\tdtype = \"Float64\" # Should be Float64\n\tdata_folder = \"DATA\"\n\thdfname = \"%s.hdf5\" % name\n\n\tf = h5py.File(\"%s/%s\" % (data_folder, hdfname), \"w\")\n\tif istest:\n\t\tX, paths = _load_testset(f_name, img_size=img_size,\n\t\t\tgreyscale=greyscale, flatten=flatten)\n\t\txfile = f.create_dataset(\"/data\", data=X, dtype=dtype)\n\telse:\n\t\tX, y = _load_dataset(f_name, img_size=img_size,\n\t\t\tgreyscale=greyscale, flatten=flatten)\n\t\tlookup, rev_lookup = _gen_lookup_table(y)\n\t\ty_n = np.array([rev_lookup[label] for label in y], dtype='uint8')\n\t\txfile = f.create_dataset(\"data\", data=X, dtype=dtype)\n\t\tyfile = f.create_dataset(\"label\", data=y_n, dtype=dtype)\n\t\tfor keys in lookup:\n\t\t\tyfile.attrs[str(keys)] = lookup[keys]\n\n\twith open(\"%s/%s.txt\" % (data_folder, name), \"w\") as ref:\n\t\tref.write(\"%s/%s\" % (data_folder, hdfname))\n\tprint(\"Created Datasets:\")\n\tfor name in f:\n\t\tprint(\" - %s\" % name)\n\tprint(\"Dimensions:\")\n\tprint(\" - %s\" % \", \".join(str(i) for i in X.shape))\n\tif not istest:\n\t\tprint(\" - %s\" % \", \".join(str(i) for i in y_n.shape))",
"def create_hdf5_from_dataset(dataset_dir, result_hdf5): \n print(f\"create_hdf5 ({dataset_dir, result_hdf5})\")\n hf = h5py.File(result_hdf5, 'w') # open the file in write mode\n logistics = hf.create_group('LOGISTICS')\n files = hf.create_group('FILES')\n\n print('creating FILES for {}'.format(dataset_dir))\n create_hdf5_recursive_files(files, dataset_dir)\n print('creating LOGISTICS for {}'.format(dataset_dir))\n create_hdf5_recursive_logistics(logistics, dataset_dir)\n hf.close()",
"def copyRegionRefs(h5_source, h5_target):\n '''\n Check both h5_source and h5_target to ensure that are Main\n '''\n if not all([checkIfMain(h5_source), checkIfMain(h5_target)]):\n raise TypeError('Inputs to copyRegionRefs must be HDF5 Datasets.')\n\n h5_source_inds = h5_source.file[h5_source.attrs['Spectroscopic_Indices']]\n\n h5_spec_inds = h5_target.file[h5_target.attrs['Spectroscopic_Indices']]\n h5_spec_vals = h5_target.file[h5_target.attrs['Spectroscopic_Values']]\n\n for key in h5_source.attrs.keys():\n if '_Plot_Group' not in key:\n continue\n\n if h5_source_inds.shape[0] == h5_spec_inds.shape[0]:\n '''\n Spectroscopic dimensions are identical.\n Do direct copy.\n '''\n ref_inds = simpleRefCopy(h5_source, h5_target, key)\n\n else:\n '''\n Spectroscopic dimensions are different.\n Do the dimenion reducing copy.\n '''\n ref_inds = reducingRefCopy(h5_source, h5_target, h5_source_inds, h5_spec_inds, key)\n '''\n Create references for Spectroscopic Indices and Values\n Set the end-point of each hyperslab in the position dimension to the number of\n rows in the index array\n '''\n ref_inds[:, 1, 0][ref_inds[:, 1, 0] > h5_spec_inds.shape[0]] = h5_spec_inds.shape[0] - 1\n spec_inds_ref = createRefFromIndices(h5_spec_inds, ref_inds)\n h5_spec_inds.attrs[key] = spec_inds_ref\n spec_vals_ref = createRefFromIndices(h5_spec_vals, ref_inds)\n h5_spec_vals.attrs[key] = spec_vals_ref",
"def merged_simulated_dl1_file(simulated_dl1_file, temp_dir_simulated_files):\n shutil.copy(simulated_dl1_file, temp_dir_simulated_files / \"dl1_copy.h5\")\n merged_dl1_file = temp_dir_simulated_files / \"script_merged_dl1.h5\"\n run_program(\n \"lstchain_merge_hdf5_files\",\n \"-d\",\n temp_dir_simulated_files,\n \"-o\",\n merged_dl1_file,\n \"--no-image\",\n \"--pattern=dl1_*.h5\"\n )\n return merged_dl1_file",
"def test_hdf5_file_input():\n catfile = os.path.join(TEST_DATA_DIR, 'point_sources.cat')\n output_hdf5 = os.path.join(TEST_DATA_DIR, 'all_spectra.hdf5')\n sed_file = os.path.join(TEST_DATA_DIR, 'sed_file_with_normalized_dataset.hdf5')\n sed_catalog = spec.make_all_spectra(catfile, input_spectra_file=sed_file,\n normalizing_mag_column='nircam_f444w_magnitude',\n output_filename=output_hdf5)\n\n comparison = hdf5.open(os.path.join(TEST_DATA_DIR, 'output_spec_from_hdf5_input_including_normalized.hdf5'))\n constructed = hdf5.open(sed_catalog)\n for key in comparison:\n assert key in constructed.keys()\n assert all(comparison[key][\"wavelengths\"].value == constructed[key][\"wavelengths\"].value)\n assert all(comparison[key][\"fluxes\"].value == constructed[key][\"fluxes\"].value)\n assert comparison[key][\"wavelengths\"].unit == constructed[key][\"wavelengths\"].unit\n assert comparison[key][\"fluxes\"].unit == constructed[key][\"fluxes\"].unit\n\n cat_base = catfile.split('.')[0]\n outbase = cat_base + '_with_flambda.cat'\n flambda_output_catalog = os.path.join(TEST_DATA_DIR, outbase)\n os.remove(flambda_output_catalog)\n os.remove(sed_catalog)",
"def rewrite_to_ess_format(source_filename, target_filename, compress_type='gzip', compress_opts=1):\n with h5py.File(source_filename, 'r') as source_file:\n with h5py.File(target_filename, 'w') as target_file:\n entry_group = target_file.create_group('entry')\n entry_group.attrs['NX_class'] = 'NXentry'\n\n instr_group = entry_group.create_group('instrument')\n instr_group.attrs['NX_class'] = 'NXinstrument'\n\n target_file.copy(source_file['/raw_data_1/instrument/source'], '/raw_data_1/instrument/source')\n\n det_group = instr_group.create_group('detector_1')\n det_group.attrs['NX_class'] = 'NXdetector'\n\n event_group = det_group.create_group('detector_1_events')\n event_group.attrs['NX_class'] = 'NXlog'\n\n data_value = source_file.get('/raw_data_1/detector_1_events/event_id')\n event_value = event_group.create_dataset('value', data_value[...].shape, dtype=data_value.dtype,\n compression=compress_type, compression_opts=compress_opts)\n event_value[...] = data_value[...]\n\n data_time = source_file.get('/raw_data_1/detector_1_events/event_time_offset')\n event_time = event_group.create_dataset('time', data_time[...].shape, dtype=np.dtype('u4'),\n compression=compress_type, compression_opts=compress_opts)\n event_time[...] = data_time[...] * 1000\n event_time.attrs['relative_to'] = 'step'\n event_time.attrs['tick_length'] = 1 # nanoseconds\n\n pulse_time = source_file.get('/raw_data_1/detector_1_events/event_time_zero')\n step_time = event_group.create_dataset('step_time', pulse_time[...].shape, dtype=np.dtype('u4'),\n compression=compress_type, compression_opts=compress_opts)\n step_time[...] = pulse_time[...] * 100\n step_time.attrs['tick_length'] = 10000000 # nanoseconds (10 milliseconds)\n\n event_index = source_file.get('/raw_data_1/detector_1_events/event_index')\n step_index = event_group.create_dataset('step_index', event_index[...].shape, dtype=event_index.dtype,\n compression=compress_type, compression_opts=compress_opts)\n step_index[...] = event_index[...]",
"def touch_result_hdf5_file(target_dir, poe, ds_names, n_realizations,\n n_periods):\n file_name = _HDF5_FILE_NAME_FMT % poe\n full_path = os.path.join(target_dir, file_name)\n\n ds_shape = (n_realizations, n_periods)\n\n with h5py.File(full_path, 'w') as h5_file:\n for name in ds_names:\n h5_file.create_dataset(name, dtype=numpy.float64, shape=ds_shape)\n\n return full_path",
"def main(combined_h5, source_h5, axis, overwrite, process_size, log_file,\n verbose):\n if verbose:\n log_level = 'DEBUG'\n else:\n log_level = 'INFO'\n\n if log_file is not None:\n log_dir = os.path.dirname(log_file)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n init_logger('rex', log_file=log_file, log_level=log_level)\n\n dst_dir = os.path.dirname(combined_h5)\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n\n CombineH5.run(combined_h5, *source_h5, axis=axis, overwrite=overwrite,\n process_size=process_size)",
"def write_H5scanData(self,dir,H5file,H5name,averaged='False'):\n g = H5file.create_group(H5name) #H5 subgroup with the name of the sample\n H5_ela = g.create_group('elastic') #H5 subgroup for elastics\n H5_xrs = g.create_group('XRS') #H5 subgroup for NIXS\n all_scans = self.elastic_scans+self.nixs_scans\n for file in all_scans:\n scan_info = self.scan_info(file)\n if scan_info[2] == 'elastic':\n h5group = H5_ela.create_group(scan_info[1])\n h5group.create_dataset(\"energy\",data=self.scans[scan_info[1]].energy)\n h5group.create_dataset(\"signals\",data=self.scans[scan_info[1]].signals)\n h5group.create_dataset(\"errors\",data=self.scans[scan_info[1]].errors)\n h5group.create_dataset(\"cenoms\",data=self.scans[scan_info[1]].cenom)\n elif scan_info[2]=='nixs':\n h5group = H5_xrs.create_group(scan_info[1])\n h5group.create_dataset(\"energy\",data=self.scans[scan_info[1]].energy)\n h5group.create_dataset(\"signals\",data=self.scans[scan_info[1]].signals)\n h5group.create_dataset(\"eloss\",data=self.scans[scan_info[1]].eloss)\n h5group.create_dataset(\"errors\",data=self.scans[scan_info[1]].errors)\n h5group.create_dataset(\"tth\",data=self.scans[scan_info[1]].tth)\n\n g.create_dataset(\"energy\",data=self.energy)\n g.create_dataset(\"signals\",data=self.signals)\n g.create_dataset(\"eloss\",data=self.eloss)\n g.create_dataset(\"errors\",data=self.errors)\n g.create_dataset(\"tth\",data=self.tth)\n g.create_dataset(\"Mean Resolutions\", data=np.array(self.resolution.items()))\n\n #Never forget to close an open H5 file!!!\n H5file.close()",
"def write_func(in_files, out_file, groups):\r\n data_file = h5py.File(out_file, 'a')\r\n image_extensions = ['jpg', 'jpeg', 'png', 'bmp', 'tiff']\r\n count = 0\r\n try:\r\n for in_file in in_files:\r\n if in_file.split('.')[-1] not in image_extensions:\r\n try:\r\n with open(in_file) as ocf:\r\n data = ocf.read()\r\n str_type = h5py.special_dtype(vlen=str)\r\n dset = data_file.create_dataset(\r\n groups[count] + in_file.split('/')[-1],\r\n data=data, shape=(1,),\r\n dtype=str_type\r\n )\r\n attributes = generate_attributes_to_add(\r\n groups[count] + in_file.split('/')[-1])\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v\r\n except FileNotFoundError:\r\n print(in_file, \"not found\")\r\n else:\r\n dset = image_to_hdf5(in_file, data_file, groups[count])\r\n attributes = generate_attributes_to_add(\r\n groups[count] + in_file.split('/')[-1])\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v\r\n if len(groups) == 1:\r\n count = 0\r\n else:\r\n count += 1\r\n except RuntimeError:\r\n pass",
"def temp_emsoft_h5ebsd_file(tmpdir, request):\n f = File(tmpdir.join(\"emsoft_h5ebsd_file.h5\"), mode=\"w\")\n\n # Unpack parameters\n map_shape, (dy, dx), example_rotations, n_top_matches, refined = request.param\n ny, nx = map_shape\n map_size = ny * nx\n\n # Create groups used in reader\n ebsd_group = f.create_group(\"Scan 1/EBSD\")\n data_group = ebsd_group.create_group(\"Data\")\n header_group = ebsd_group.create_group(\"Header\")\n phase_group = header_group.create_group(\"Phase/1\") # Always single phase\n\n # Create `header_group` datasets used in reader\n for name, data, dtype in zip(\n [\"nRows\", \"nColumns\", \"Step Y\", \"Step X\"],\n [ny, nx, dy, dx],\n [np.int32, np.int32, np.float32, np.float32],\n ):\n header_group.create_dataset(name, data=np.array([data], dtype=dtype))\n\n # Create `data_group` datasets, mostly quality metrics\n data_group.create_dataset(\"X Position\", data=np.tile(np.arange(nx) * dx, ny))\n # Note that \"Y Position\" is wrongly written to their h5ebsd file by EMsoft\n data_group.create_dataset(\n \"Y Position\",\n data=np.tile(np.arange(nx) * dx, ny), # Wrong\n # data=np.sort(np.tile(np.arange(ny) * dy, nx)), # Correct\n )\n for name, shape, dtype in [\n (\"AvDotProductMap\", map_shape, np.int32),\n (\"CI\", map_size, np.float32),\n (\"CIMap\", map_shape, np.int32),\n (\"IQ\", map_size, np.float32),\n (\"IQMap\", map_shape, np.int32),\n (\"ISM\", map_size, np.float32),\n (\"ISMap\", map_shape, np.int32),\n (\"KAM\", map_shape, np.float32),\n (\"OSM\", map_shape, np.float32),\n (\"Phase\", map_size, np.uint8),\n ]:\n data_group.create_dataset(name, data=np.zeros(shape, dtype=dtype))\n\n # `data_group` with rotations\n # Sample as many rotations from `rotations` as `map_size`\n rot_idx = np.random.choice(np.arange(len(example_rotations)), map_size)\n rot = example_rotations[rot_idx]\n n_sampled_oris = 333227 # Cubic space group with Ncubochoric = 100\n data_group.create_dataset(\"FZcnt\", data=np.array([n_sampled_oris], dtype=np.int32))\n data_group.create_dataset(\n \"TopMatchIndices\",\n data=np.vstack(\n (np.random.choice(np.arange(n_sampled_oris), n_top_matches),) * map_size\n ),\n dtype=np.int32,\n )\n data_group.create_dataset(\n \"TopDotProductList\",\n data=np.vstack((np.random.random(size=n_top_matches),) * map_size),\n dtype=np.float32,\n )\n data_group.create_dataset(\n \"DictionaryEulerAngles\",\n data=np.column_stack(\n (np.random.uniform(low=0, high=2 * np.pi, size=n_sampled_oris),) * 3\n ),\n dtype=np.float32,\n )\n\n if refined:\n data_group.create_dataset(\"RefinedEulerAngles\", data=rot.astype(np.float32))\n data_group.create_dataset(\n \"RefinedDotProducts\", data=np.zeros(map_size, dtype=np.float32)\n )\n\n # Number of top matches kept\n f.create_dataset(\n \"NMLparameters/EBSDIndexingNameListType/nnk\",\n data=np.array([n_top_matches], dtype=np.int32),\n )\n\n # `phase_group`\n for name, data in [\n (\"Point Group\", \"Cubic (Oh) [m3m]\"),\n (\"MaterialName\", \"austenite/austenite\"),\n (\"Lattice Constant a\", \"3.595\"),\n (\"Lattice Constant b\", \"3.595\"),\n (\"Lattice Constant c\", \"3.595\"),\n (\"Lattice Constant alpha\", \"90.000\"),\n (\"Lattice Constant beta\", \"90.000\"),\n (\"Lattice Constant gamma\", \"90.000\"),\n ]:\n phase_group.create_dataset(name, data=np.array([data], dtype=np.dtype(\"S\")))\n\n yield f\n gc.collect()",
"def to_hdf(data_dict, tgt):\n if not isinstance(data_dict, dict):\n errmsg = 'to_hdf: `data_dict` only accepts top-level dict.'\n logging.error(errmsg)\n raise TypeError(errmsg)\n\n # Define a function for iteratively doing the work\n def store_recursively(fhandle, node, path=None, node_hashes=None):\n if path is None:\n path = []\n if node_hashes is None:\n node_hashes = {}\n full_path = '/' + '/'.join(path)\n if isinstance(node, dict):\n logging.trace(\" creating Group `%s`\" % full_path)\n try:\n fhandle.create_group(full_path)\n except ValueError:\n pass\n for key in sorted(node.iterkeys()):\n key_str = str(key)\n if not isinstance(key, str):\n logging.warn('Stringifying key `' + key_str +\n '`for use as name in HDF5 file')\n val = node[key]\n new_path = path + [key_str]\n store_recursively(fhandle=fhandle, node=val, path=new_path,\n node_hashes=node_hashes)\n else:\n # Check for existing node\n node_hash = utils.hash_obj(node)\n if node_hash in node_hashes:\n logging.trace(\" creating hardlink for Dataset: `%s` -> `%s`\" %\n (full_path, node_hashes[node_hash]))\n # Hardlink the matching existing dataset\n fhandle[full_path] = fhandle[node_hashes[node_hash]]\n return\n # For now, convert None to np.nan since h5py appears to not handle None\n if node is None:\n node = np.nan\n logging.warn(\" encountered `None` at node `%s`; converting to\"\n \" np.nan\" % full_path)\n # \"Scalar datasets don't support chunk/filter options\". Shuffling\n # is a good idea otherwise since subsequent compression will\n # generally benefit; shuffling requires chunking. Compression is\n # not done here since it is slow.\n if np.isscalar(node):\n shuffle = False\n chunks = None\n else:\n shuffle = True\n chunks = True\n # Store the node_hash for linking to later if this is more than\n # a scalar datatype. Assumed that \"None\" has \n node_hashes[node_hash] = full_path\n # TODO: Treat strings as follows? Would this break compatibility\n # with pytables/Pandas? What are benefits? Leaving out for now.\n # if isinstance(node, basestr):\n # dtype = h5py.special_dtype(vlen=str)\n # fh.create_dataset(k,data=v,dtype=dtype)\n logging.trace(\" creating dataset at node `%s`\" % full_path)\n try:\n fhandle.create_dataset(name=full_path, data=node,\n chunks=chunks, compression=None,\n shuffle=shuffle, fletcher32=False)\n except TypeError:\n try:\n shuffle = False\n chunks = None\n fhandle.create_dataset(name=full_path, data=node,\n chunks=chunks, compression=None,\n shuffle=shuffle, fletcher32=False)\n except:\n logging.error(' full_path: ' + full_path)\n logging.error(' chunks : ' + str(chunks))\n logging.error(' shuffle : ' + str(shuffle))\n logging.error(' node : ' + str(node))\n raise\n\n # Perform the actual operation using the dict passed in by user\n if isinstance(tgt, basestring):\n try:\n h5file = h5py.File(os.path.expandvars(tgt), 'w')\n store_recursively(fhandle=h5file, node=data_dict)\n except IOError, e:\n logging.error(e)\n logging.error(\"to_hdf: Unable to open `%s` for writing\" % tgt)\n raise\n finally:\n h5file.close()\n elif isinstance(tgt, h5py.Group):\n store_recursively(fhandle=tgt, node=data_dict)\n else:\n errmsg = \"to_hdf: Invalid `tgt` type: `\"+ type(target_entity)+\"`\"\n logging.error(errmsg)\n raise TypeError(errmsg)",
"def prepare_data(path, output):\n\n with h5.File(NORM, 'r') as fh:\n M = fh['M'][()]\n\n with h5.File(path, 'r') as fh:\n # load extracted snvs\n snv = fh[\"SNVR\"][()].T.reshape(3, 3, 16, 4, 2, 2, 96, -1)\n\n # compute the normalization constant\n N0 = (snv.sum(axis=(4, 5, 6, 7)) / snv.sum()).reshape(3, 3, 16, 4, 1)\n N1 = np.concatenate(\n [N0, N0[[1, 0, 2], :, :][:, [1, 0, 2], :, :]], axis=4)\n N2 = N1.reshape(3, 3, 16, 4, 1, 2, 1, 1)\n N = (N2 * M) / 2\n\n # collapse data\n N = collapse_data(np.concatenate([N] * 2, axis=-4))\n snv = collapse_data(snv)\n\n # to be changed soon\n sv = np.zeros([81, snv.shape[-1]])\n sv[:] = np.nan\n other = np.concatenate(\n [fh['MNV'][()].T, fh['INDELS'][()].T, sv], axis=0)\n\n with h5.File(output, 'w') as fh:\n fh.create_dataset('SNV', data=snv)\n fh.create_dataset('OTHER', data=other)\n fh.create_dataset('N', data=N)\n\n return 0",
"def mat2h5(config):\n dataset_name = config.dataset_name\n base_path = config.data_path\n mat_dir = os.path.join(base_path, 'data_mat')\n h5_dir = os.path.join(base_path, 'data_h5')\n if dataset_name == 'Salinas':\n dataset_mat_dir = os.path.join(mat_dir, '{name}/{name}_corrected.mat'.format(name=dataset_name))\n dataset_gt_dir = os.path.join(mat_dir, '{name}/{name}_gt.mat'.format(name=dataset_name))\n dataset_h5_save_dir = os.path.join(h5_dir, '{}.h5'.format(dataset_name))\n elif dataset_name == 'Indian':\n dataset_mat_dir = os.path.join(mat_dir, '{name}/{name}_pines_corrected.mat'.format(name=dataset_name))\n dataset_gt_dir = os.path.join(mat_dir, '{name}/{name}_pines_gt.mat'.format(name=dataset_name))\n dataset_h5_save_dir = os.path.join(h5_dir, '{}.h5'.format(dataset_name))\n elif dataset_name == 'WHU_Hi_HongHu':\n dataset_mat_dir = os.path.join(mat_dir, '{name}/{name}.mat'.format(name=dataset_name))\n dataset_gt_dir = os.path.join(mat_dir, '{name}/{name}_gt.mat'.format(name=dataset_name))\n dataset_h5_save_dir = os.path.join(h5_dir, '{}.h5'.format(dataset_name))\n hsi_data = sio.loadmat(dataset_mat_dir)[config.dataset_HSI]\n hsi_gt = sio.loadmat(dataset_gt_dir)[config.dataset_gt]\n with h5py.File(dataset_h5_save_dir, 'w') as f:\n f['data'] = hsi_data\n f['label'] = hsi_gt",
"def generate_single_files_dataset():\n\toriginal_imgs_picked = None\n\tedgemaps_picked = None\n\tcount = 0\n\t\n\tfor batch_name in os.listdir(settings.PICKED_ORIGINALS_PATH):\n\n\t\toriginal_imgs = np.load(settings.PICKED_ORIGINALS_PATH+batch_name)\n\t\tedgemaps = np.load(settings.PICKED_EDGEMAPS_PATH+batch_name)\n\n\t\tif original_imgs_picked is None and edgemaps is None:\n\t\t\toriginal_imgs_picked = original_imgs\n\t\t\tedgemaps_picked = edgemaps\n\t\telse:\n\t\t\toriginal_imgs_picked = np.concatenate((original_imgs_picked, original_imgs), axis=0)\n\t\t\tedgemaps_picked = np.concatenate((edgemaps_picked, edgemaps), axis=0)\n\n\t\tcount += 1\n\t\tprint(\"{}/1000 is appended\".format(count))\n\n\tnp.save(settings.DATASET_PATH+\"original_images.npy\", original_imgs_picked)\n\tnp.save(settings.DATASET_PATH+\"edgemaps.npy\", edgemaps_picked)",
"def hdf5_images(self, filename, output_filename, action, images=True):\n self.stop = False\n ycube = self.get_ycube(filename)\n print 'output_filename is', output_filename\n self.temp_hdf5 = h5py.File(output_filename +'temporary','w')\n self.read_into_temp_hdf5(self.temp_hdf5,\n ycube,\n action,\n images)\n if not self.stop:\n self.generate_output(filename, output_filename, self.temp_hdf5)\n self.temp_hdf5.close()\n os.remove(output_filename +'temporary')\n self.input_hdf5.close() \n self.close()",
"def create_hdf5(\n bigwig_paths, chrom_sizes_path, out_path, chunk_size, batch_size=100\n):\n bigwig_readers = [pyBigWig.open(path) for path in bigwig_paths]\n \n # Read in chromosome sizes\n with open(chrom_sizes_path, \"r\") as f:\n chrom_sizes = {}\n for line in f:\n tokens = line.strip().split(\"\\t\")\n chrom_sizes[tokens[0]] = int(tokens[1])\n \n # Convert batch size to be in terms of rows, not number of chunks\n batch_size = batch_size * chunk_size\n\n with h5py.File(out_path, \"w\") as f:\n # Store source paths\n f.create_dataset(\"bigwig_paths\", data=np.array(bigwig_paths, dtype=\"S\"))\n for chrom in sorted(chrom_sizes.keys()):\n chrom_size = chrom_sizes[chrom]\n num_batches = int(np.ceil(chrom_size / batch_size))\n chrom_dset = f.create_dataset(\n chrom, (chrom_size, len(bigwig_paths), 1), dtype=\"f\",\n compression=\"gzip\", chunks=(chunk_size, len(bigwig_paths), 1)\n )\n for i in tqdm.trange(num_batches, desc=chrom):\n start = i * batch_size\n end = min(chrom_size, (i + 1) * batch_size)\n\n values = np.stack([\n np.stack([\n np.nan_to_num(reader.values(chrom, start, end))\n ], axis=1) for reader in bigwig_readers\n ], axis=1)\n\n chrom_dset[start : end] = values",
"def create_datafile(datasource, ticlist, dest_basename):\n def get_gvkeys_from_ticlist(ticlist): #TODO: use actual gvkeys\n \"\"\"\n Returns 'gvkeys' from ticlist.dat as a sorted list.\n\n NOTE: Right now, 'gvkeys' are not the actual gvkeys that you'd see in\n Compustat. Instead, they're unique identifiers constructed by concatenating\n a numeric id for the exchange (1 for Nasdaq, 2 for NYSE) with the ticker\n name.\n \"\"\"\n ticlist_filepath = os.path.join(DATASETS_PATH, ticlist)\n\n if os.path.isfile(ticlist_filepath):\n ticlist_df = pd.read_csv(ticlist_filepath, sep=' ', header=None)\n gvkeys = list()\n for line in ticlist_df.values:\n if line[1] == 'Nasdaq':\n gvkeys.append('1'+line[0])\n elif line[1] == 'NYSE':\n gvkeys.append('2'+line[0])\n else:\n gvkeys.append('9'+line[0]) # TODO: is that best way to handle\n # unrecognized market?\n else:\n gvkeys = list()\n \n return gvkeys\n\n def shave_open_dataset(ticlist, dest):\n \"\"\"\n Shaves wanted data (in terms of tics and features only; the shaving by\n dates is done by BatchGenerator's constructor), stores shaved .dat file\n at dest.\n\n NOTE: shaving by features not implemented yet, will rely on a\n feat_map.txt file.\n \"\"\"\n gvkeys = get_gvkeys_from_ticlist(ticlist)\n open_df = pd.read_csv(OPEN_DF_PATH, sep=' ', dtype={'gvkey': str})\n shaved_df = open_df[open_df.gvkey.isin(gvkeys)]\n shaved_df.to_csv(dest, sep=' ', index=False)\n\n def write_WRDS_data(dest):\n \"\"\"\n Writes .dat file using data from WRDS.\n \"\"\"\n raise NotImplementedError(\"Sorry! WRDS integration not ready.\") # TODO\n\n dest = get_data_path(DATASETS_PATH, dest_basename)\n\n if datasource == \"open_dataset\":\n shave_open_dataset(ticlist, dest)\n elif datasource == \"WRDS\":\n write_WRDS_data(ticlist, dest)\n else:\n raise Exception(\"Unknown datasource.\")",
"def create_hdf(affinity_data_path, output_total_hdf, mol2_path, general_PDBs_path, refined_PDBs_path, path_to_elements_xml, bad_pdbids_input = []):\n\n #Necessary import statements\n import pickle\n import numpy as np\n import openbabel.pybel\n from openbabel.pybel import Smarts\n from math import ceil, sin, cos, sqrt, pi\n from itertools import combinations\n import pandas as pd\n import h5py\n import csv\n import xml.etree.ElementTree as ET\n import os\n\n # Import the Featurizer class from tfbio(source code here): https://gitlab.com/cheminfIBB/tfbio/-/blob/master/tfbio/data.py\n from tfbio.data import Featurizer\n\n # define function to select pocket mol2 files with atoms that have unrealistic charges\n def high_charge(molecule):\n for i, atom in enumerate(molecule):\n if atom.atomicnum > 1:\n if (abs(atom.__getattribute__('partialcharge'))>2):\n return True\n else: \n return False \n\n # define function to extract features from the binding pocket mol2 file check for unrealistic charges\n def __get_pocket():\n for pfile in pocket_files:\n pocket = next(openbabel.pybel.readfile('mol2', pfile))\n if high_charge(pocket):\n bad_complexes.append((os.path.splitext(os.path.split(pfile)[1])[0]).split('_')[0]) \n pocket_coords, pocket_features = featurizer.get_features(pocket, molcode=-1)\n pocket_vdw = parse_mol_vdw(mol=pocket, element_dict=element_dict)\n yield (pocket_coords, pocket_features, pocket_vdw)\n\n # define function to extract information from elements.xml file\n def parse_element_description(desc_file):\n element_info_dict = {}\n element_info_xml = ET.parse(desc_file)\n for element in element_info_xml.getiterator():\n if \"comment\" in element.attrib.keys():\n continue\n else:\n element_info_dict[int(element.attrib[\"number\"])] = element.attrib\n\n return element_info_dict\n\n # define function to create a list of van der Waals radii for a molecule\n def parse_mol_vdw(mol, element_dict):\n vdw_list = []\n for atom in mol.atoms:\n if int(atom.atomicnum)>=2:\n vdw_list.append(float(element_dict[atom.atomicnum][\"vdWRadius\"]))\n return np.asarray(vdw_list)\n\n # read in data and format properly\n element_dict = parse_element_description(path_to_elements_xml)\n affinities = pd.read_csv(affinity_data_path)\n pdbids_cleaned = affinities['pdbid'].to_numpy()\n bad_complexes = bad_pdbids_input\n \n # fill lists with paths to pocket and ligand mol2 files\n pocket_files, ligand_files = [], []\n for i in range(0, len(pdbids_cleaned)):\n if pdbids_cleaned[i] not in bad_complexes:\n pocket_files.append(mol2_path + \"/\" + pdbids_cleaned[i] + '_pocket.mol2')\n if affinities['set'][i]=='general':\n ligand_files.append(general_PDBs_path + \"/\" + pdbids_cleaned[i] + '/' + pdbids_cleaned[i] + '_ligand.mol2')\n else:\n ligand_files.append(refined_PDBs_path + \"/\" + pdbids_cleaned[i] + '/' + pdbids_cleaned[i] + '_ligand.mol2')\n\n num_pockets = len(pocket_files)\n num_ligands = len(ligand_files)\n\n affinities_ind = affinities.set_index('pdbid')['-logKd/Ki']\n\n featurizer = Featurizer()\n\n # create a new hdf file to store all of the data\n with h5py.File(output_total_hdf, 'a') as f:\n\n pocket_generator = __get_pocket()\n for lfile in ligand_files:\n # use pdbid as dataset name\n name = os.path.splitext(os.path.split(lfile)[1])[0]\n pdbid = name.split('_')[0]\n\n #Avoid duplicates\n if pdbid in list(f.keys()):\n continue\n\n # read ligand file using pybel\n ligand = next(openbabel.pybel.readfile('mol2', lfile))\n\n # extract features from pocket and check for unrealistic charges\n pocket_coords, pocket_features, pocket_vdw = next(pocket_generator)\n\n # extract features from ligand and check for unrealistic charges\n ligand_coords, ligand_features = featurizer.get_features(ligand, molcode=1)\n ligand_vdw = parse_mol_vdw(mol=ligand, element_dict=element_dict)\n if high_charge(ligand):\n if pdbid not in bad_complexes:\n bad_complexes.append(pdbid)\n\n # if the current ligand file is part of a bad complex, do not copy to the cleaned hdf file\n if pdbid in bad_complexes:\n continue\n\n # center the ligand and pocket coordinates\n centroid = ligand_coords.mean(axis=0)\n ligand_coords -= centroid\n pocket_coords -= centroid\n\n # assemble the features into one large numpy array: rows are heavy atoms, columns are coordinates and features\n data = np.concatenate(\n (np.concatenate((ligand_coords, pocket_coords)),\n np.concatenate((ligand_features, pocket_features))),\n axis=1,\n )\n # concatenate van der Waals radii into one numpy array\n vdw_radii = np.concatenate((ligand_vdw, pocket_vdw))\n\n # create a new dataset for this complex in the hdf file\n dataset = f.create_dataset(pdbid, data=data, shape=data.shape,\n dtype='float32', compression='lzf')\n\n # add the affinity and van der Waals radii as attributes for this dataset \n dataset.attrs['affinity'] = affinities_ind.loc[pdbid]\n assert len(vdw_radii) == data.shape[0]\n dataset.attrs[\"van_der_waals\"] = vdw_radii"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Suggest a new name for an h5lmt file.
|
def suggest_name( src ):
date = src.split(os.sep)[-2]
basename = os.path.basename(src).split('.', 2)[0]
if basename in hpcparse.FS_MAP_REV:
return hpcparse.FS_MAP_REV[basename] + "_" + date + ".hdf5"
else:
return basename + "_" + date + ".hdf5"
|
[
"def get_nameSimulation(self):\n self.path.name = self.input_file.name.split(\"_ky\")[0] if \"_ky\" in self.input_file.name else self.input_file.stem\n return",
"def setRawName(*args, **kwargs):\n \n pass",
"def setH5file(self, h5filepath):\n self.h5file = os.path.expanduser(h5filepath)",
"def SetTopographyFileName(self, name):\n print (\"SetTopographyFileName\", name)\n name = name if name != 'None' else None\n if self._toponame != name:\n self._toponame = name\n self.Modified()",
"def reset_new_name(self, event=None):\n\n selected_file = self._selected_file.get()\n base, ext = os.path.splitext(os.path.basename(selected_file))\n\n # Reset the displayed basename\n self._new_name.set(base)\n\n # Set the focus on the filename entry box and select all text\n self.focus_filename_entry()",
"def name_film():\n # Use the last 4 chars as the file extension\n # (this should be .mp4 or other video ext.) \n ext = film_link[-4:] \n # Name the file by putting\n file_name = title_hyphen + ext \n \n # FIXME this finishes on a new line - which we dont want! \n yes_no_question(\"\\n[+] We have attempted to name the file from the\"\\\n \"title;\\n\\\"{0}\\\"\\nIs our guess O.K? [Yes/no]\\n--> \"\\\n .format(file_name))",
"def _tablet_filename(self, cgroup):\n\t\treturn '%s.%s.h5' % (self.name, cgroup)",
"def check_name(self) -> str:\n raise NotImplementedError",
"def default_save_as_fname(input_fname):\n parts = input_fname.split('.')\n if len(parts) == 1:\n return parts[0] + \"_hrv\"\n\n return '.'.join(parts[:-1]) + '_hrv'",
"def get_viable_similarities_file_name(gt_id, target_horizon, history, lag, metric, model):\n return os.path.join(\n get_cache_dir(model),\n '{}-viable_similarities-{}-{}-hist{}-lag{}.h5'.format(\n metric, gt_id,target_horizon,history,lag))",
"def auto_rename(source_file_path, save_path, md_path):\n if os.path.isfile(md_path):\n info_dict = ValidMarkdown(md_path).check_markdown_file()\n else:\n raise FileNotFoundError(f\"{md_path} not exists\")\n\n if os.path.isfile(source_file_path):\n # Create new file name\n file_format = os.path.splitext(source_file_path)[-1]\n name_list = [info_dict['backbone-name'], info_dict['train-backend'], info_dict['mindspore-version']]\n if info_dict.get('train-dataset', None):\n name_list.append(info_dict['train-dataset'])\n now_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n name_list.append(now_time)\n new_file_name = '_'.join([str(x) for x in name_list]) + file_format\n new_file_name = new_file_name.replace('/', '-')\n\n # Create new file path\n des_file_path = os.path.join(save_path, info_dict['module-type'], info_dict['backbone-name'], new_file_name)\n\n # If file path is not exist, create des file path\n if not os.path.exists(os.path.dirname(des_file_path)):\n os.makedirs(os.path.dirname(des_file_path))\n\n # Copy file\n if source_file_path and des_file_path:\n try:\n shutil.copyfile(source_file_path, des_file_path)\n except IOError as e:\n raise Exception(e)\n except:\n raise Exception('Unexcepted error: ', sys.exc_info())\n\n print('Rename and copy file done!')\n return des_file_path",
"def metal_name(self, name):\n self._name = name",
"def set_name(self,name):\r\n if not len(name):\r\n raise Exception(\"The specified morphism name is empty\")\r\n self.name = name",
"def fix_germline_samplename(in_file, sample_name, data):\n out_file = \"%s-fixnames%s\" % utils.splitext_plus(in_file)\n if not utils.file_exists(out_file):\n with file_transaction(data, out_file) as tx_out_file:\n sample_file = \"%s-samples.txt\" % utils.splitext_plus(tx_out_file)[0]\n with open(sample_file, \"w\") as out_handle:\n out_handle.write(\"%s\\n\" % sample_name)\n cmd = (\"bcftools reheader -s {sample_file} {in_file} -o {tx_out_file}\")\n do.run(cmd.format(**locals()), \"Fix germline samplename: %s\" % sample_name)\n return vcfutils.bgzip_and_index(out_file, data[\"config\"])",
"def renameUI():\n pass",
"def _html_file_ext(name):\n return '%(name)s.html' % {'name': name}",
"def set_filename(self, name):\n\t\tself.cfg.set_str(ROOTKEY, 'filename', os.path.basename(name))",
"def test_used_as_name_reifier (self):\n self._test_reifiable(self.create_name())",
"def test_maya_name_correct_mb():\r\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Many tag related changes. add many to many relationships for added/removed tags to changes. add a composed primary key to Tag from name and is_default_language. change stickertag many to many relationship to new primary key of tag.
|
def upgrade():
op.drop_constraint("sticker_tag_tag_name_fkey", "sticker_tag", type_="foreignkey")
op.drop_constraint("tag_pkey", "tag")
op.create_primary_key("tag_pkey", "tag", ["name", "is_default_language"])
# Change added tags many to many relationship
op.create_table(
"change_added_tags",
sa.Column("change_id", sa.Integer(), nullable=True),
sa.Column("tag_name", sa.String(), nullable=True),
sa.Column("tag_is_default_language", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["change_id"],
["change.id"],
onupdate="cascade",
ondelete="cascade",
deferrable=True,
),
sa.ForeignKeyConstraint(
["tag_name", "tag_is_default_language"],
["tag.name", "tag.is_default_language"],
onupdate="cascade",
ondelete="cascade",
deferrable=True,
),
)
op.create_index(
op.f("ix_change_added_tags_change_id"),
"change_added_tags",
["change_id"],
unique=False,
)
op.create_index(
op.f("ix_change_added_tags_tag_name"),
"change_added_tags",
["tag_name"],
unique=False,
)
# Change removed tags many to many relationship
op.create_table(
"change_removed_tags",
sa.Column("change_id", sa.Integer(), nullable=True),
sa.Column("tag_name", sa.String(), nullable=True),
sa.Column("tag_is_default_language", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["change_id"],
["change.id"],
onupdate="cascade",
ondelete="cascade",
deferrable=True,
),
sa.ForeignKeyConstraint(
["tag_name", "tag_is_default_language"],
["tag.name", "tag.is_default_language"],
onupdate="cascade",
ondelete="cascade",
deferrable=True,
),
)
op.create_index(
op.f("ix_change_removed_tags_change_id"),
"change_removed_tags",
["change_id"],
unique=False,
)
op.create_index(
op.f("ix_change_removed_tags_tag_name"),
"change_removed_tags",
["tag_name"],
unique=False,
)
op.add_column(
"sticker_tag", sa.Column("tag_is_default_language", sa.Boolean(), nullable=True)
)
op.create_foreign_key(
"sticker_tag_tag_name_fkey",
"sticker_tag",
"tag",
["tag_name", "tag_is_default_language"],
["name", "is_default_language"],
onupdate="cascade",
ondelete="cascade",
deferrable=True,
)
|
[
"def bind_tags(self, tags):\n current_map = dict((x.name, x) for x in self.tags)\n currently_attached = set(x.name for x in self.tags)\n new_tags = set(tags)\n\n def lookup_tag(name):\n tag = Tag.query.filter_by(locale=self.locale,\n name=name).first()\n if tag is not None:\n return tag\n return Tag(name, self.locale)\n\n # delete outdated tags\n for name in currently_attached.difference(new_tags):\n self.tags.remove(current_map[name])\n\n # add new tags\n for name in new_tags.difference(currently_attached):\n self.tags.append(lookup_tag(name))",
"def set_tags_attr(self, tags):\n for i in self.tags.all():\n db.session.delete(i)\n db.session.commit()\n # Update with new list of tags returned from make_tags\n tags_models = Tag().create(tags)\n if tags_models:\n self.tags = tags_models",
"def update_tags(self):\n raise NotImplementedError",
"def update_tags(model, tag_property, tags, tag_model, tagging_user):\n taggable = getattr(model, tag_property)\n taggable.clear()\n\n tag_models = [model_matching_tag(tag, tag_model, tagging_user) for tag in tags]\n\n for tag in tag_models:\n if isinstance(tag, tag_model):\n taggable.add(tag)\n\n model.save(tagging_user)",
"def _add_tags(session, new_tags: List[str], old_tags: Dict[str, int]) -> List[int]:\n new_ids: List[int] = []\n for tag in new_tags:\n # Add new tags\n if tag not in old_tags:\n tag_item = session.query(Tag).filter(Tag.name == tag).first()\n if not tag_item:\n # Complete new tag in db, add it to Tag table\n tag_item = Tag(name=tag)\n session.add(tag_item)\n session.commit()\n new_ids.append(tag_item.id)\n return new_ids",
"def modify_tags(self,note_id,tags):\n\n self._find_note(note_id).tags = tags",
"def on_pre_sync(self, changed):\n _add_tags(changed)",
"def assign_extra_tags(sender, **kwargs):\n action = kwargs.get('action')\n if action == 'post_add':\n reverse = kwargs.get('reverse')\n if not reverse:\n # In the event of a forward relation, the post\n # instance is assigned to the instance keyword.\n # While the list of startup primary keys being\n # associated to the post object is passed to\n # pk_set keyword.The Startup is assigned to\n # the model keyword. I have commented it out\n # because we are not going to use the startup.\n post = kwargs.get('instance')\n # Startup = kwargs.get('model')\n startup_pk_set = kwargs.get('pk_set')\n # The in lookup available to managers and\n # querysets finds all the values in a list.\n # In our case we are using the in lookup on\n # our tags to find out all the tags associated\n # with the startups, that have pk in startup_pk_set keyword.\n # We then call the values_list method on the queryset\n # to give us a flat list of primary keys.\n # We then use the distinct() to make sure the pk are unique.\n # iterator() method is used to ensure, django doesn't cache\n # our queryset.\n tag_pk_set = Tag.objects.filter(startup__in=startup_pk_set) \\\n .values_list('pk', flat=True).distinct() \\\n .iterator()\n post.tags.add(*tag_pk_set)\n else:\n startup = kwargs.get('instance')\n post = kwargs.get('model')\n post_pk_set = kwargs.get('pk_set')\n # We use the relatedManager, that is created\n # for m2m and foreign key relation to call\n # the values_list() method to retrieve the\n # pk of the tags associated with a startup\n tags_associated_with_startup = startup.tags.values_list(\n 'pk', flat=True\n ).iterator()\n # We then use the in_bulk queryset method to load\n # the post objects in post_pk_set\n post_dict = post.objects.in_bulk(post_pk_set)\n # We then get the values of the dict that is\n # a list of post objects and iterate over them\n # The tags associated with startup are then\n # added to the post.\n for post in post_dict.values:\n post.tags.add(tags_associated_with_startup)",
"def save_tags(article_id, tag_names=None):\n # Get tags in the correct format\n if isinstance(tag_names, str):\n tag_names = tag_names.split(\",\")\n tag_names = tuple(tag.strip() for tag in tag_names if tag != \"\")\n if not isinstance(tag_names, (list, tuple, type(None))):\n try:\n tag_names = tuple(tag_names)\n tag_names = tuple(tag for tag in tag_names if tag != \"\")\n except TypeError:\n current_app.logger.error(\"Could not convert tags to Tuple.\")\n return\n current_app.logger.debug(\"Tags given: {}\".format(tag_names))\n\n conn = engine.connect()\n\n # Remove all current tags for the given article\n delstmt = tag_map.delete().where(tag_map.c.article_id == article_id)\n conn.execute(delstmt)\n\n # If tags is None, we just wanted to delete current tag associations\n if tag_names is None or len(tag_names) == 0:\n conn.close()\n return\n\n # Insert any new tags which didn't exist before\n insstmt = tags.insert().prefix_with(\"OR IGNORE\")\n conn.execute(insstmt, [{'tag': tag} for tag in tag_names])\n\n # Now attach the tags to the articles using the map table\n selstmt = select([tags.c.id]).where(tags.c.tag == bindparam(\"tag_name\"))\n mapstmt = tag_map.insert({'tag_id': selstmt})\n conn.execute(mapstmt,\n [{'tag_name': tag,\n 'article_id': article_id} for tag in tag_names])",
"def add_tags(original_wf, tags_list):\r\n\r\n # WF metadata\r\n if \"tags\" in original_wf.metadata:\r\n for t in tags_list:\r\n if t not in original_wf.metadata[\"tags\"]:\r\n original_wf.metadata[\"tags\"].append(t)\r\n else:\r\n original_wf.metadata[\"tags\"] = tags_list\r\n\r\n # FW metadata\r\n for idx_fw in range(len(original_wf.fws)):\r\n if \"tags\" in original_wf.fws[idx_fw].spec:\r\n for t in tags_list:\r\n if t not in original_wf.fws[idx_fw].spec[\"tags\"]:\r\n original_wf.fws[idx_fw].spec[\"tags\"].append(t)\r\n else:\r\n original_wf.fws[idx_fw].spec[\"tags\"] = tags_list\r\n\r\n # DB insertion tasks\r\n idxs = get_fws_and_tasks(original_wf, task_name_constraint=\"ToDb\")\r\n for idx_fw, idx_t in idxs:\r\n if \"additional_fields\" in original_wf.fws[idx_fw].tasks[idx_t].optional_params:\r\n if \"tags\" in original_wf.fws[idx_fw].tasks[idx_t][\"additional_fields\"]:\r\n for t in tags_list:\r\n if (\r\n t\r\n not in original_wf.fws[idx_fw].tasks[idx_t][\r\n \"additional_fields\"\r\n ][\"tags\"]\r\n ):\r\n original_wf.fws[idx_fw].tasks[idx_t][\"additional_fields\"][\r\n \"tags\"\r\n ].append(t)\r\n else:\r\n original_wf.fws[idx_fw].tasks[idx_t][\"additional_fields\"][\r\n \"tags\"\r\n ] = tags_list\r\n\r\n return original_wf",
"def update_tags(instance, **kwargs):\n old_tags = list(instance.tags.all())\n for token in instance.content.tags:\n tag, t_is_new = Tag.objects.get_or_create(content=token,\n defaults={'creator':instance.author})\n\n taggedNote, tn_is_new = TaggedNote.objects.get_or_create(\n note=instance, tag=tag,\n defaults={'tagged_by':instance.author})\n if tag in old_tags:\n # old tags that remain in the content are removed from\n # the `old_tags` list, which in the end contains only \n # tags that are not longer used by `instance`\n old_tags.remove(tag)\n\n for tag in old_tags:\n taggedNote = TaggedNote.objects.get(note=instance,\n tag=tag)\n taggedNote.delete()",
"def _save_tags(self, photo_obj, tags_data):\n\n # The existing tag-photo relationships.\n tagged_photos = Photo.tags.through.objects.filter(content_object=photo_obj)\n\n local_flickr_ids = set([])\n remote_flickr_ids = set([])\n\n # Get the Flickr IDs of all the current tag-photo relationships.\n for tagged_photo in tagged_photos:\n local_flickr_ids.add(tagged_photo.flickr_id)\n\n for tag in tags_data:\n remote_flickr_ids.add(tag[\"id\"])\n\n if tag[\"id\"] not in local_flickr_ids:\n\n # This tag isn't currently on the photo, so add it.\n try:\n tag_obj, tag_created = Tag.objects.get_or_create(\n slug=tag[\"_content\"], defaults={\"name\": tag[\"raw\"]}\n )\n except IntegrityError:\n # It's possible for there to be a tag with a different\n # slug but the same name, which would cause an\n # IntegrityError.\n # In which case, just fetch the existing Tag by name:\n tag_obj = Tag.objects.get(name=tag[\"raw\"])\n\n # Who created this tag?\n if tag[\"author\"] == photo_obj.user.nsid:\n # The same person whose photo these tags are on.\n user = photo_obj.user\n else:\n # In theory we'll already have fetched and saved data for\n # all authors of these tags when fetching this photo's\n # data.\n try:\n user = User.objects.get(nsid=tag[\"author\"])\n except User.DoesNotExist:\n raise FetchError(\n \"Tried to add a Tag authored by a Flickr user \"\n \"with NSID %s who doesn't exist in the DB.\" % tag[\"author\"]\n )\n\n pt_obj = Photo.tags.through(\n flickr_id=tag[\"id\"],\n author=user,\n machine_tag=(tag[\"machine_tag\"] == \"1\"),\n content_object=photo_obj,\n tag=tag_obj,\n )\n pt_obj.save()\n\n flickr_ids_to_delete = local_flickr_ids.difference(remote_flickr_ids)\n\n # Finally, delete any tag-photo relationships which were identified\n # above as no longer on the photo on Flickr.\n for tagged_photo in tagged_photos:\n if tagged_photo.flickr_id in flickr_ids_to_delete:\n tagged_photo.delete()",
"def _log_tag_changes(self, cr, uid, ids, tags_val, context=None):\n if self._track_tags and hasattr(self, '_track'):\n for obj_id in ids:\n message = \"\"\n for args in tags_val:\n act, arg = args[0], args[1:]\n msg = \"\"\n if act == 0: # create\n arg1, arg2 = arg\n msg = _(\"<span>Tag <b>%s</b> created</span>\") % arg2['name']\n elif act == 1: # update\n arg1, arg2 = arg\n tag = self.pool.get('res.tag').name_get(cr, uid, arg1, context=context)[0][1]\n msg = _(\"<span>Tag <b>%s</b> modified</span>\") % tag\n elif act == 2: # remove\n tag = self.pool.get('res.tag').name_get(cr, uid, arg[0], context=context)[0][1]\n msg = _(\"<span>Tag <b>%s</b> deleted</span>\") % tag\n elif act == 3: # unlink\n tag = self.pool.get('res.tag').name_get(cr, uid, arg[0], context=context)[0][1]\n msg = _(\"<span>Tag <b>%s</b> removed</span>\") % tag\n elif act == 4: # Link\n tag = self.pool.get('res.tag').name_get(cr, uid, arg[0], context=context)[0][1]\n msg = _(\"<span>Tag <b>%s</b> added</span>\") % tag\n elif act == 5: # unlink all\n msg = _(\"<span>All tags removed</span>\")\n elif act == 6: # set s list of links\n arg1, arg2 = arg\n # When edition through the form, this action triggered\n # in most cases\n old_tags = set(self.browse(cr, uid, obj_id, context=context).tag_ids)\n new_tags = set(self.pool.get('res.tag').browse(cr, uid, arg2, context=context))\n tags_added = new_tags - old_tags\n tags_removed = old_tags - new_tags\n msg_tmpl = _(\"<div><span>Tags changed:</span><ul>%s</ul></div>\")\n\n msg_body = \"\"\n if tags_added:\n msg_body += _(\"<li class='oe_tags'><b>Tags added</b>: <span>%s</span></li>\") % u''.join(('<span class=\"oe_tag\">%s</span>' % tag.name_get()[0][1] for tag in tags_added))\n if tags_removed:\n msg_body += _(\"<li class='oe_tags'><b>Tags removed</b>: <span>%s</span></li>\") % u''.join(('<span class=\"oe_tag\">%s</span>' % tag.name_get()[0][1] for tag in tags_removed))\n if tags_added or tags_removed:\n msg_body += _(\"<hr/><li class='oe_tags'><b>Tags resulting</b>: <span>%s</span></li>\") % u''.join(('<span class=\"oe_tag\">%s</span>' % tag.name_get()[0][1] for tag in new_tags))\n\n if msg_body:\n msg = msg_tmpl % msg_body\n\n message += msg\n\n if message:\n self.message_post(cr, uid, obj_id, message, context=context)",
"def add_tag(self, tag):\n self.tags = list(set(self.tags or []) | set([tag]))",
"def _set_tags(self, tags: dict[any, any]) -> None:\n\n self.set_tags(tags, inplace=True)",
"def updateTags(self,\n tagKey: str,\n additions: AbstractSet[str],\n removals: AbstractSet[str]\n ) -> None:\n values = self.__tags.get(tagKey)\n if values is None:\n self.__tags[tagKey] = values = set()\n values -= removals\n values |= additions",
"def test_portals_id_designs_nk_tags_fk_put(self):\n pass",
"def forwards(self, orm):\r\n\r\n for tag in orm.Tag.objects.filter(slug__isnull=True):\r\n tag.save()\r\n\r\n for tag in orm.Tag.objects.filter(slug=''):\r\n tag.save()",
"def save_related(self, request, form, formsets, change):\n value = super(SkillAdmin, self).save_related(request, form, formsets, change)\n instance = form.instance\n\n for course in instance.courses.all():\n if instance not in course.education.skills.all():\n course.education.skills.add(instance)\n # TODO: Maybe only have to save course.education\n course.save()\n\n for project in instance.projects.all():\n if instance not in project.content_object.skills.all():\n project.content_object.skills.add(instance)\n # TODO: Maybe only have to save project.content_object\n project.save()\n\n return value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Responsible for locking each test.
|
def run(self, messages):
if not self.args.lock:
return
format.print_line('~')
print('Locking tests')
print()
for test in self.assignment.test_map.values():
log.info('Locking {}'.format(test.name))
test.lock(self._hash_fn)
|
[
"def steal_test_lock(self, test_uuid):",
"def create_test_lock(self, test_uuid):",
"def test_multithreading():",
"def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n first.acquire(\"A\")\n with pytest.raises(Timeout):\n with lock(BASE_URL, \"A\", timeout=timedelta(seconds=1)):\n pass",
"def release_test_lock(self, test_uuid):",
"def CheckSharedLocks(self):\n for client in range(10):\n self.manager.lock(self.lockfunc, \"filename\", LOCK_SHARED, client)\n self._print(self.manager)\n for client in range(10):\n self.manager.unlock(\"filename\", LOCK_NONE, client)\n self._print(self.manager)\n self.assertTrue(self.manager.is_idle())",
"def test_simple_lock():\n lock = RedLock(\"test_simple_lock\", [{\"host\": \"localhost\"}], ttl=1000)\n locked = lock.acquire()\n lock.release()\n assert locked is True",
"def test_context_manager():\n ttl = 1000\n with RedLock(\"test_context_manager\", [{\"host\": \"localhost\"}], ttl=ttl) as validity:\n assert 0 < validity < ttl - ttl * CLOCK_DRIFT_FACTOR - 2\n lock = RedLock(\"test_context_manager\", [{\"host\": \"localhost\"}], ttl=ttl)\n locked = lock.acquire()\n assert locked is False\n\n lock = RedLock(\"test_context_manager\", [{\"host\": \"localhost\"}], ttl=ttl)\n locked = lock.acquire()\n assert locked is True\n\n # try to lock again within a with block\n try:\n with RedLock(\"test_context_manager\", [{\"host\": \"localhost\"}]):\n # shouldn't be allowed since someone has the lock already\n assert False\n except RedLockError:\n # we expect this call to error out\n pass\n\n lock.release()",
"def test_acquire_multiple_locks(self) -> None:\n\n # Take out multiple locks and ensure that we can't get those locks out\n # again.\n lock = self.get_success(\n self.store.try_acquire_multi_read_write_lock(\n [(\"name1\", \"key1\"), (\"name2\", \"key2\")], write=True\n )\n )\n self.assertIsNotNone(lock)\n\n assert lock is not None\n self.get_success(lock.__aenter__())\n\n lock2 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name1\", \"key1\", write=True)\n )\n self.assertIsNone(lock2)\n\n lock3 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name2\", \"key2\", write=False)\n )\n self.assertIsNone(lock3)\n\n # Overlapping locks attempts will fail, and won't lock any locks.\n lock4 = self.get_success(\n self.store.try_acquire_multi_read_write_lock(\n [(\"name1\", \"key1\"), (\"name3\", \"key3\")], write=True\n )\n )\n self.assertIsNone(lock4)\n\n lock5 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name3\", \"key3\", write=True)\n )\n self.assertIsNotNone(lock5)\n assert lock5 is not None\n self.get_success(lock5.__aenter__())\n self.get_success(lock5.__aexit__(None, None, None))\n\n # Once we release the lock we can take out the locks again.\n self.get_success(lock.__aexit__(None, None, None))\n\n lock6 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name1\", \"key1\", write=True)\n )\n self.assertIsNotNone(lock6)\n assert lock6 is not None\n self.get_success(lock6.__aenter__())\n self.get_success(lock6.__aexit__(None, None, None))",
"def lock(self):\n self.locked = True",
"def test_lock_account_user(self):\n pass",
"def mustlock(self):\n pass",
"async def test_lock(hass: HomeAssistant, lock) -> None:\n\n zha_device, cluster = lock\n entity_id = find_entity_id(Platform.LOCK, zha_device, hass)\n assert entity_id is not None\n\n assert hass.states.get(entity_id).state == STATE_UNLOCKED\n await async_enable_traffic(hass, [zha_device], enabled=False)\n # test that the lock was created and that it is unavailable\n assert hass.states.get(entity_id).state == STATE_UNAVAILABLE\n\n # allow traffic to flow through the gateway and device\n await async_enable_traffic(hass, [zha_device])\n\n # test that the state has changed from unavailable to unlocked\n assert hass.states.get(entity_id).state == STATE_UNLOCKED\n\n # set state to locked\n await send_attributes_report(hass, cluster, {1: 0, 0: 1, 2: 2})\n assert hass.states.get(entity_id).state == STATE_LOCKED\n\n # set state to unlocked\n await send_attributes_report(hass, cluster, {1: 0, 0: 2, 2: 3})\n assert hass.states.get(entity_id).state == STATE_UNLOCKED\n\n # lock from HA\n await async_lock(hass, cluster, entity_id)\n\n # unlock from HA\n await async_unlock(hass, cluster, entity_id)\n\n # set user code\n await async_set_user_code(hass, cluster, entity_id)\n\n # clear user code\n await async_clear_user_code(hass, cluster, entity_id)\n\n # enable user code\n await async_enable_user_code(hass, cluster, entity_id)\n\n # disable user code\n await async_disable_user_code(hass, cluster, entity_id)",
"def test_nested_locks():\n with throttle(b\"[semaphores]\\nA={ max=1, level=1 }\\nB={ max=1, level=0 }\") as url:\n client = Client(url)\n with lock(url, \"A\"):\n\n assert client.remainder(\"A\") == 0\n assert client.remainder(\"B\") == 1\n\n with lock(url, \"B\"):\n\n assert client.remainder(\"A\") == 0\n assert client.remainder(\"B\") == 0\n\n assert client.remainder(\"A\") == 0\n assert client.remainder(\"B\") == 1\n\n assert client.remainder(\"A\") == 1\n assert client.remainder(\"B\") == 1",
"def testLockExclusivity(self):\n lock_path = os.path.join(self.tempdir, 'locked_file')\n with locking.PortableLinkLock(lock_path, max_retry=0):\n with self.assertRaises(locking.LockNotAcquiredError):\n with locking.PortableLinkLock(lock_path, max_retry=5, sleep=0.1):\n self.fail('We acquired a lock twice?')",
"def testSingleProcessLock(self):\n arg_list = [\n [LOCK_ACQUIRED],\n [True, False], # blocking\n [True, False], # shared\n [locking.FLOCK, locking.LOCKF], # locking mechanism\n ]\n for args in itertools.product(*arg_list):\n self._HelperWithProcess(*args)",
"def test_lockfile(self):\n with lockfile(self.path) as lock:\n self.assertIsInstance(lock, LockFile)",
"def lock(self, **kwargs):\n for _ in range(3):\n result = self._nuki_lock.lock(True)\n if result is not None and result[\"success\"]:\n self._available = True\n self._cached_status_time = time.time()\n break\n\n self._available = False\n self.update()",
"def testLockfileRecreated(self):\n self._mock_basic_fs_calls()\n self._set_lock_status()\n self._set_stat_status(matching=False)\n self._set_unlink_status()\n with self.assertRaises(daemon.LockAlreadyLocked):\n with daemon.flock('bogus'):\n # Should never reach this.\n # pylint: disable=redundant-unittest-assert\n self.assertTrue(False) # pragma: no cover",
"def test_default_connection_details_value():\n RedLock(\"test_simple_lock\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Context manager to temporarily redirect stderr output to another source. If given, 'new_stderr' should be a filelike object.
|
def redirect_stderr(new_stderr=None):
if new_stderr is None:
new_stderr = cStringIO.StringIO()
old_stderr = sys.stderr
sys.stderr = new_stderr
try:
yield new_stderr
finally:
sys.stderr = old_stderr
|
[
"def _redirect_stderr(to_fd):\n # Flush the C-level buffer stderr\n libc.fflush(c_stderr)\n # Flush and close sys.stderr - also closes the file descriptor (fd)\n sys.stderr.close()\n # Make original_stderr_fd point to the same file as to_fd\n os.dup2(to_fd, original_stderr_fd)\n # Create a new sys.stderr that points to the redirected fd\n sys.stderr = os.fdopen(original_stderr_fd, 'wb')",
"def replace_stderr(replacement):\n _stderr = sys.stderr\n sys.stderr = replacement\n try:\n yield\n finally:\n sys.stderr = _stderr",
"def retrieve_stderr():\n with closing(StringIO()) as sio, replace_stderr(sio):\n oldprint = builtins.print\n try:\n # Overriding stderr doesn't work with libraries, this ensures even\n # cached variables take this up. Well... it works.\n def newprint(*args, **kwargs):\n kwargs['file'] = sio\n oldprint(*args, **kwargs)\n\n builtins.print = newprint\n yield sio\n finally:\n builtins.print = oldprint",
"def replace_stderr(target, process, fifo_value):\n restore_fns = []\n for mod in target.module_iter():\n for sym in mod:\n if sym.GetName() == STDERR_SYMBOL:\n if sym.GetStartAddress().GetOffset() != 0:\n addr = sym.GetStartAddress().GetLoadAddress(target)\n error = lldb.SBError()\n str_val = fifo_value.GetData().GetString(error, 0)\n try_sb_error(error)\n old_value = process.ReadMemory(addr, len(str_val), error)\n try_sb_error(error)\n process.WriteMemory(addr, str_val, error)\n try_sb_error(error)\n restore_fns.append(partial(process.WriteMemory, addr, old_value, error))\n return lambda: [f() for f in restore_fns]",
"def test_mute_stderr_redirecting(self, ):\n @redirect.mute_stderr\n def dummyfunc():\n return hash(sys.stderr)\n replaced_stderr = dummyfunc()\n self.assertNotEqual(self.hash_stderr, replaced_stderr, msg='Failed')",
"def trap_err(self):\n if sys.stderr is self.err:\n raise OutputTrapError('You are already trapping stderr.')\n if not self.debug:\n self._err_save = sys.stderr\n sys.stderr = self.err",
"def restord_stderr():\n sys.stderr = sys.__stderr__",
"def redirect_std_streams(stderr=sys.stderr, stdout=sys.stdout):\n def wrap(f):\n def newf(*args, **kwargs):\n old_stderr, old_stdout = sys.stderr, sys.stdout\n sys.stderr = stderr\n sys.stdout = stdout\n try:\n return f(*args, **kwargs)\n finally:\n sys.stderr, sys.stdout = old_stderr, old_stdout\n return newf\n return wrap",
"def test_stderr(self):\n tmp_file = os.path.join(tmp_dir_path,'tmp_log')\n saved_stderr = sys.stderr\n tmp_stderr = os.path.join(tmp_dir_path,'tmp_stderr')\n with open(tmp_stderr,'w') as sys.stderr:\n with EppLogger(tmp_file, prepend=False) as epp_logger:\n print('stderr nosetest', file=sys.stderr)\n sys.stderr = saved_stderr\n with open(tmp_stderr,'r') as stderr:\n stream_lines = stderr.readlines()\n assert 'stderr nosetest' in stream_lines[-1]\n\n with open(tmp_file,'r') as log_file:\n log_lines = log_file.readlines()\n assert 'stderr nosetest' in log_lines[-1]",
"def stderr_pipe(self):\r\n return self.stderr(PIPE)",
"def test_error_redirect(self):\n filep = six.moves.StringIO('w')\n with elcaminoreal.errors_to(filep):\n some_plugins.COMMANDS.run(['no-such-command'])\n error_message = filep.getvalue().splitlines()\n self.assertEquals(error_message.pop(0), 'Usage:')",
"def redirect_stderr(appname, errortext):\n ErrorDialog.appname = appname\n ErrorDialog.errortext = errortext\n def hook(type, value, tb):\n text = ''.join(traceback.format_exception(type, value, tb))\n QtWidgets.QApplication.postEvent(errorreceiver,ErrorReceiver.ErrorEvent(text))\n sys.excepthook = hook",
"def _WriteStderr(self, value):\n self._stderr.write(value)\n return",
"def _stderr_filed(func):\n def wrapper(self, msg, file=None):\n if file:\n return func(self, msg, file=file)\n elif self.io_manager:\n with self.io_manager.with_stderr() as stderr:\n return func(self, msg, file=stderr)\n else:\n return func(self, msg, file=sys.stderr)\n wrapper.__doc__ = func.__doc__\n return wrapper",
"def new_failed(self, new_failed):\n\n self._new_failed = new_failed",
"def set_stdout_stderr():\n\n class Writer(object):\n def write(self, msg):\n log.debug(msg)\n if verbose:\n chunk_send(msg)\n\n def flush(self):\n pass\n\n orig_stds = sys.stdout, sys.stderr\n w = Writer()\n sys.stdout = w\n sys.stderr = w\n\n def cleanup():\n \"\"\"\n Restores stdout and stderr\n \"\"\"\n sys.stdout = orig_stds[0]\n sys.stderr = orig_stds[1]\n client_sock.close()\n\n return cleanup",
"def log_syserr(logActive) :\n if logActive :\n #os.dup2(fd1, 1) # stdout\n os.dup2(fdLogErr, 2) # stderr\n return",
"def err(self, *values, **options):\n return self.out_to(self.stderr, *values, **options)",
"def on_stderr_receive(self, debug, message):\r\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Open csv's, read them, get all data, get plant names, get already analyzed genome names, return list of plant names & list of already analyzed genome names
|
def getInfo(filename1):
with open(filename1) as f1:
reader = csv.reader(f1) #opens csv file
data1 = [] #holds all information from rows in csv
#start for
for row in reader:
data1.append(row) #grabs the information from each row
#end for
plantNames = [] #holds list of names of plants to search
#start for
for i in range(len(data1)):
plantNames.append(data1[i][0]) #grabs the first value from each row
#end for
return plantNames #function returns list of plant names to search
|
[
"def read_kg_data(csv_file):\n print(f\"Started a model builder for data from: {csv_file}\")\n df = pd.read_csv(csv_file)\n df.columns = [\"h\", \"r\", \"t\"]\n entities = list(set(df[\"h\"].tolist() + df[\"t\"].tolist()))\n relations = list(set(df[\"r\"].tolist()))\n return entities, relations",
"def parse_facilities_csv():\n # Parse CSV files\n facilities = []\n for fname in os.listdir(cwd):\n if not fname.endswith('.csv'):\n continue\n\n file_path = os.path.join(cwd, fname) \n with open(file_path, 'rb') as f:\n logging.debug('Parsing: ' + f.name)\n\n reader = csv.reader(f, delimiter=',', quotechar='\"')\n headers = [h.strip('\"') for h in reader.next()]\n\n for row in reader:\n facility = {}\n for header, col in zip(headers, row):\n facility[header.lower()] = col\n facilities.append(facility)\n \n # Build output data structure\n lgas = {}\n for fac in facilities:\n lga_id = fac['unique_lga']\n fac_id = fac['uid']\n fac_name = fac['name']\n\n lga = lgas.setdefault(lga_id, [])\n lga.append({\n 'id': fac_id,\n 'name': fac_name\n })\n return lgas",
"def read_in_genres_from_csv():\n all_genres = {}\n \n with open(os.getcwd() + '/gaproject/genres.csv', 'r') as csv_file:\n genre_reader = csv.DictReader(csv_file)\n for genre in genre_reader:\n all_genres[genre['Id']] = genre['FriendlyName']\n return all_genres",
"def getWorksFromCsv():\n dir = str(Path.home()) + '/SMB/'\n files = [ 'obj_ifg__teil1.csv', 'obj_ifg__teil2.csv', 'obj_ifg__teil3.csv']\n\n for filename in files:\n fullfilename = dir + filename\n with open(fullfilename) as crapcsvfile:\n # Yes, I'm really using a regex to parse an csv file. The file is complete crap\n regex = '^\"(?P<id>\\d+),\"\"(?P<identnr>[^\\\"]*)\"\",\"\"(?P<titel>[^\\\"]*)\"\",\"\"(?P<beteiligte>[^\\\"]*)\"\",\"\"(?P<bereich>[^\\\"]*)\"\",\"\"(?P<matTech>[^\\\"]*)\"\",\"\"(?P<objekttyp>[^\\\"]*)\"\",\"\"(?P<datierung>[^\\\"]*)\"\"\"(?P<junk>[^\\\"]*)$'\n for match in re.finditer(regex, crapcsvfile.read(), flags=re.M):\n yield match.groupdict()\n #reader = csv.DictReader(csvfile)\n #for row in reader:\n # yield row",
"def prepare_csv_data():\n csv_lines = []\n\n # Get the files that we'll be using\n if AWARD_TYPE == \"ACTOR\":\n sag_award_file = AwardFile(\"Actors/SAG_Actors.csv\")\n gg_drama_award_file = AwardFile(\"Actors/GG_Drama_Actors.csv\")\n gg_musical_award_file = AwardFile(\"Actors/GG_Musical_Actors.csv\")\n cc_award_file = AwardFile(\"Actors/CC_Actors.csv\")\n bafta_award_file = AwardFile(\"Actors/BAFTA_Actors.csv\")\n\n oscar_nomination_file = \"Actors/best_actor_nominations.txt\"\n elif AWARD_TYPE == \"ACTRESS\":\n sag_award_file = AwardFile(\"Actresses/SAG_Actress.csv\")\n gg_drama_award_file = AwardFile(\"Actresses/GG_Drama_Actress.csv\")\n gg_musical_award_file = AwardFile(\"Actresses/GG_Musical_Actress.csv\")\n cc_award_file = AwardFile(\"Actresses/CC_Actress.csv\")\n bafta_award_file = AwardFile(\"Actresses/BAFTA_Actress.csv\")\n\n oscar_nomination_file = \"Actresses/best_actress_nominations.txt\"\n elif AWARD_TYPE == \"DIRECTOR\":\n gg_award_file = AwardFile(\"Directors/GG_Directors.csv\")\n cc_award_file = AwardFile(\"Directors/CC_Directors.csv\")\n bafta_award_file = AwardFile(\"Directors/BAFTA_Directors.csv\")\n\n oscar_nomination_file = \"Directors/best_director_nominations.txt\"\n\n # Get the names that were nominated for an Oscar so we can filter our data\n # to only include the relevent names of just those who were nominated for an\n # Oscar.\n oscar_nomination_lines = []\n with open(oscar_nomination_file, \"r\") as f:\n for line in f:\n oscar_nomination_lines.append(line.strip())\n\n oscar_line_index = 0\n\n for year in range(STARTING_YEAR,ENDING_YEAR+1):\n list_of_data_dictionaries = []\n\n # NOTE: Uncomment this to get data upto each year\n # if AWARD_TYPE == \"ACTOR\" or AWARD_TYPE == \"ACTRESS\":\n # sag_data = sag_award_file.get_summed_year_data(year)\n # gg_drama_data = gg_drama_award_file.get_summed_year_data(year)\n # gg_musical_data = gg_musical_award_file.get_summed_year_data(year)\n # cc_data = cc_award_file.get_summed_year_data(year)\n # bafta_data = bafta_award_file.get_summed_year_data(year)\n # elif AWARD_TYPE == \"DIRECTOR\":\n # gg_data = gg_award_file.get_summed_year_data(year)\n # cc_data = cc_award_file.get_summed_year_data(year)\n # bafta_data = bafta_award_file.get_summed_year_data(year)\n ##\n\n # NOTE: Comment this if you want to get data upto each year\n # Get the award dictionaries for each award show\n if AWARD_TYPE == \"ACTOR\" or AWARD_TYPE == \"ACTRESS\":\n sag_data = sag_award_file.get_year_data(year)\n gg_drama_data = gg_drama_award_file.get_year_data(year)\n gg_musical_data = gg_musical_award_file.get_year_data(year)\n cc_data = cc_award_file.get_year_data(year)\n bafta_data = bafta_award_file.get_year_data(year)\n elif AWARD_TYPE == \"DIRECTOR\":\n gg_data = gg_award_file.get_year_data(year)\n cc_data = cc_award_file.get_year_data(year)\n bafta_data = bafta_award_file.get_year_data(year)\n ##\n\n # put all of the dictionaries into a list so we can iterate through them\n\n if AWARD_TYPE == \"ACTOR\" or AWARD_TYPE == \"ACTRESS\":\n list_of_data_dictionaries.append(sag_data)\n list_of_data_dictionaries.append(gg_drama_data)\n list_of_data_dictionaries.append(gg_musical_data)\n list_of_data_dictionaries.append(cc_data)\n list_of_data_dictionaries.append(bafta_data)\n elif AWARD_TYPE == \"DIRECTOR\":\n list_of_data_dictionaries.append(gg_data)\n list_of_data_dictionaries.append(cc_data)\n list_of_data_dictionaries.append(bafta_data)\n\n entry_dictionary = {}\n\n # i is an index variable for the headers at the beginning of this file.\n # We start it at 4 to skip the first 4 headers which we don't need.\n i = 4\n for dictionary in list_of_data_dictionaries:\n for name,awards in dictionary.iteritems():\n # Check if the person was nominated for an oscar that year. If\n # they weren't, we don't care about them, so skip them.\n if name not in oscar_nomination_lines[oscar_line_index:oscar_line_index+5]:\n continue\n\n if name not in entry_dictionary:\n entry = Entry(Year=year)\n\n # NOTE: Uncomment this to get data upto each year\n # entry.update(headers[i], awards[0])\n # entry.update(headers[i+1], awards[1])\n ##\n\n # NOTE: Comment this if you want to get data upto each year\n # headers[i] will be the \"Won\" category for the award,\n # whereas headers[i] will be the \"Lost\" category.\n # If they won any awards, set \"Won\" category to however\n # many awards they won. If they didn't win any, set the\n # \"Lost\" category to 1.\n if awards > 0:\n entry.update(headers[i], awards)\n else:\n entry.update(headers[i+1], 1)\n ##\n\n if name == oscar_nomination_lines[oscar_line_index]:\n entry.update(\"OscarWon\", 1)\n else:\n entry.update(\"OscarLost\", 1)\n\n entry_dictionary[name] = entry\n else:\n # NOTE: Uncomment this to get data upto each year\n # entry_dictionary[name].update(headers[i], awards[0])\n # entry_dictionary[name].update(headers[i+1], awards[1])\n ##\n\n # NOTE: Comment this if you want to get data upto each year\n # headers[i] will be the \"Won\" category for the award,\n # whereas headers[i] will be the \"Lost\" category.\n # If they won any awards, set \"Won\" category to however\n # many awards they won. If they didn't win any, set the\n # \"Lost\" category to 1.\n if awards > 0:\n entry_dictionary[name].update(headers[i], awards)\n else:\n entry_dictionary[name].update(headers[i+1], 1)\n ##\n\n # Each major header has a \"Won\" and a \"Lost\" category. By\n # incrementing by 2, we go from one major header to the next\n # (e.g., SAG to GG).\n i += 2\n\n for name,entry in entry_dictionary.iteritems():\n csv_lines.append(name + \",\" + str(entry))\n\n # There are 5 oscar nominations per year, so add 5 to go the nominations\n # for the next year.\n oscar_line_index += 5\n\n # Make sure that all Oscar nominations are included in the final csv file\n counter = 0\n\n for i in range(0, len(oscar_nomination_lines)):\n if i == len(csv_lines):\n break\n\n name = oscar_nomination_lines[i].strip()\n\n names_already_in_csv = []\n for n in csv_lines[counter:counter+5]:\n names_already_in_csv.append(n.split(\",\")[0].strip())\n\n if name in names_already_in_csv:\n if (i+1) % 5 == 0:\n counter += 5\n\n continue\n else:\n new_line = name + \",\" + csv_lines[counter].split(\",\")[1] + \",\"\n\n if name == oscar_nomination_lines[counter]:\n # WINNER\n new_line += \"1,0,\"\n else:\n # LOSER\n new_line += \"0,1,\"\n\n for header in headers[4:]:\n new_line += \"0,\"\n\n new_line = new_line[:-1] + \"\\n\"\n\n csv_lines.insert(i, new_line)\n\n if (i+1 )% 5 == 0:\n counter += 5\n\n return csv_lines",
"def collect_all_genomes():\n\n def str2num(s,cat=False,force=True):\n \"\"\"\n Converts string to integer\n eg. ensembl92 to 92\n\n :param s: string\n :param cat: Whether to concatenate detected integers. eg. 20,23 to 2023\n :param force: If True, ignores decimal point error. \n \"\"\"\n import re \n if '.' in s and not force:\n raise ValueError(f\"A string can only be converted to integeres, found a '.' in {s}\")\n n=re.findall(r'\\d+',s)\n if len(n)==0:\n raise ValueError(\"No digits found in string {}\".format(s)) \n elif len(n)==1:\n return int(n[0])\n else:\n if cat:\n return int(''.join(n))\n else:\n return n\n\n from glob import glob\n from os.path import dirname,basename,exists\n import numpy as np\n import pandas as pd\n from pyensembl.species import normalize_species_name,Species\n \n # here's how I get the .cache directory eg. '/home/user/.cache/pyensembl'\n import datacache\n pyensembl_cache_dir=f\"{dirname(datacache.get_data_dir())}/pyensembl\" #FIXME if genomes are installed at other places than .cache\n\n # all the assemblies\n assemblies=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/*\")]\n # dataframe that contains all the info (and can be exported as a tsv).\n dspecies=pd.DataFrame(columns=['latin name','release','synonymn','assembly'])\n # assempy to release min max dict needed as an input to create Species object\n assembly2releasesminmax={}\n # following loop populates the dataframe \n genomei=0\n for assembly in assemblies:\n releases=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/{assembly}/*\")]\n for release in releases:\n releasei=str2num(release) #FIXME is realease is a float\n genome_dir=f\"{pyensembl_cache_dir}/{assembly}/{release}\"\n genome_files=glob(f\"{genome_dir}/*\")\n is_genome_installed=True if len(genome_files)>4 else False #FIXME need more than 4 (.gz) files to be strict\n if is_genome_installed:\n dspecies.loc[genomei,'assembly']=assembly\n dspecies.loc[genomei,'release']=releasei\n dspecies.loc[genomei,'synonymn']=basename(genome_files[0]).split('.')[0]\n dspecies.loc[genomei,'latin name']=normalize_species_name(dspecies.loc[genomei,'synonymn'])\n genomei+=1\n # following loop generates the Species object\n for spc in dspecies['latin name'].unique():\n assembly2releases={}\n for assembly in dspecies.loc[(dspecies['latin name']==spc),'assembly'].unique():\n d=dspecies.loc[((dspecies['latin name']==spc) & (dspecies['assembly']==assembly)),:]\n assembly2releases[assembly]=d['release'].min(),d['release'].max() #FIXME if MAX_ENSEMBL_RELEASE very important and has to be used\n Species.register(\n latin_name=spc,\n synonyms=dspecies.loc[(dspecies['latin name']==spc),'synonymn'].unique().tolist(),\n reference_assemblies=assembly2releases)\n Species.dspecies=dspecies\n return Species",
"def get_local_name_csv_matches(self, file_name):\n\n matches = self.match_repo.parse_csv_match_history(file_name)\n for match in matches:\n for hero in match[0:9]:\n print hero\n return matches",
"def main(path, csvfile, recursive):\n diags = []\n if os.path.isdir(path):\n if recursive:\n paths = mpiops.run_once(glob.glob, os.path.join(path, '**', '*.tif'), recursive=recursive)\n else:\n paths = mpiops.run_once(glob.glob, os.path.join(path, '*.tif'))\n else:\n paths = [path]\n if mpiops.chunk_index == 0:\n if not paths:\n print(f\"No geotiffs found.\")\n else:\n print(f\"Found {len(paths)} geotiffs, retrieving information...\")\n this_chunk_paths = np.array_split(paths, mpiops.chunks)[mpiops.chunk_index]\n for f in this_chunk_paths:\n diag = diagnostic(f)\n if diag is not None:\n diags.append(diag)\n print(f\"Processed '{f}'\")\n\n diags = mpiops.comm.gather(diags, root=0)\n mpiops.comm.barrier()\n\n if mpiops.chunk_index == 0:\n diags = list(itertools.chain.from_iterable(diags))\n\n fieldnames = ['name', 'driver', 'crs', 'dtype', 'width', \n 'height', 'bands', 'nodata', 'ndv_percent', \n 'min', 'max']\n if csvfile:\n with open(csvfile, 'w') as f:\n w = csv.DictWriter(f, fieldnames=fieldnames)\n w.writeheader()\n for diag in diags:\n w.writerow(diag)\n print(printer(diag))\n else:\n for diag in diags:\n print(printer(diag))",
"def process_csv(self, file_name: str):",
"def parse_and_analyze():\r\n all_soc_state_data=[]\r\n \r\n with open('./input/h1b_input.csv', encoding='utf-8', mode='r') as h1b:\r\n # Retrieve header row as we need to find the soc name, state, \r\n # as well as the case status column indexes\r\n header=next(h1b).split(';')\r\n # Use regex to find soc_name, state, and case status indexes \r\n # since different files have slightly different column names\r\n status_index=[i for i, s in enumerate(header) if re.findall('.*STATUS.*', s, re.IGNORECASE)][0]\r\n soc_name_index=[i for i, s in enumerate(header) if re.findall('.*SOC.*NAME.*', s, re.IGNORECASE)][0]\r\n state_index=[i for i, s in enumerate(header) if re.findall('.*WORK.*STATE.*', s, re.IGNORECASE)][0]\r\n # Then loop through each row and just take the certified rows with soc name and state\r\n soc_and_state=[]\r\n for row in h1b:\r\n row_list=row.split(';')\r\n if row_list[status_index]=='CERTIFIED':\r\n soc_and_state.append((row_list[soc_name_index], row_list[state_index]))\r\n all_soc_state_data.append(soc_and_state)\r\n # Now return all the data\r\n return(all_soc_state_data)",
"def read_and_process_csv(self, path=default_path):\n # Basic read-in of data\n hiking_data = pd.read_csv(path)\n\n # Split between solo and group hikes (Re-do this with dplython or something?)\n solo_hikes = hiking_data.loc[hiking_data.loc[:, 'Solo'] == True, :]\n group_hikes = hiking_data.loc[hiking_data.loc[:, 'Solo'] == False, :]\n\n return hiking_data, solo_hikes, group_hikes",
"def test_loci():\r\n\r\n temp_loci = []\r\n print('starting dictionary search test')\r\n\r\n with open('processed_loci.csv') as csvfile:\r\n lociCSV = csv.reader(csvfile, delimiter=',')\r\n print('reading processed_loci.csv')\r\n\r\n for row in lociCSV:\r\n temp_loci.append(row)\r\n errors = 0\r\n\r\n with open('test_numbers.csv') as f:\r\n testCSV = csv.reader(f, delimiter=',')\r\n print('reading test_numbers.csv')\r\n for row in testCSV:\r\n print('checking this val:', row[0])\r\n for x in temp_loci:\r\n if x[0] == row[0]:\r\n if x[1] == row[1]:\r\n print('value matches')\r\n else:\r\n print('error!')\r\n errors = errors + 1\r\n\r\n print('number of errors:', errors)\r\n print('done')",
"def load_data(f):\n import csv\n with open(f, newline='') as csvfile:\n ecgreader = csv.reader(csvfile, delimiter=' ')\n time, voltage, high_voltages = organize_data(ecgreader, f)\n return time, voltage, high_voltages",
"def parse_csv( fileLocation, Unique_Features=None ):\r\n\r\n print 'Parsing CSV: ', fileLocation\r\n\r\n headers = []\r\n\r\n # Change directory to the folder\r\n os.chdir( os.path.dirname( fileLocation) )\r\n\r\n # grab all the rows in the csv file\r\n coordFileToList = [ line for line in csv.reader(open( fileLocation, 'r')) ]\r\n\r\n # grabs the csv column headers \r\n headers = coordFileToList[0]\r\n\r\n # deletes list item, because it will get in the way later on if we don't\r\n del coordFileToList[0]\r\n\r\n # file name without the extension and minus the path\r\n rootFileName = os.path.splitext( os.path.basename( fileLocation ))[0]\r\n\r\n # If parsing csv for polylines or polygons\r\n if Unique_Features == True:\r\n\r\n # For kml_to_line and kml_to_polygon. Not used for kml_to_point. List \r\n # of names of the unique spatial feataures in csv file\r\n Unique_Features = list(set( [each[0] for each in coordFileToList ] ))\r\n\r\n return coordFileToList, rootFileName, headers, Unique_Features\r\n\r\n else:\r\n \r\n return coordFileToList, rootFileName, headers",
"def list_dataset():\n # Accumlator lists\n subtopics = []\n source_sentences = []\n target_sentences = []\n golds = []\n for topic in os.listdir(STE_PATH):\n # Dont process non directories\n if not os.path.isdir(os.path.join(STE_PATH, topic)):\n continue\n # Keep track of the current subtopic\n # being processed within the csv file\n current_subtopic = None\n source_sentence = None\n with open(os.path.join(STE_PATH, topic, 'results.csv'), 'r') as fp:\n LOG.debug('Processing topic: %s', topic)\n reader = csv.reader(fp)\n # ignore header\n next(reader)\n for row in reader:\n # ignore empty row\n if not row:\n continue\n sentence, _, _, gold, order, subtopic = row\n subtopic = os.path.join(topic, subtopic)\n if order == '0':\n current_subtopic = subtopic\n source_sentence = sentence\n continue\n if order == '1' and subtopic == current_subtopic:\n # Filter out sentences which have >100 words\n source_sentence = source_sentence.decode('utf-8').encode('ascii', 'ignore')\n sentence = sentence.decode('utf-8').encode('ascii', 'ignore')\n source_sents = [sent\n for sent in sent_tokenize(source_sentence)\n if len(word_tokenize(sent)) <= 100]\n target_sents = [sent\n for sent in sent_tokenize(sentence)\n if len(word_tokenize(sent)) <= 100]\n if source_sents and target_sents:\n subtopics.append(subtopic)\n source_sentences.append(source_sents)\n target_sentences.append(target_sents)\n golds.append(1 if gold == 'True' else 0)\n current_subtopic = None\n source_sentence = None\n LOG.debug('Completed processing topic: %s', topic)\n return subtopics, source_sentences, target_sentences, golds",
"def importData(filename):\n df = pd.DataFrame(columns = ['LocID', 'Location', 'Biotype', 'nuclA', 'nuclT',\n 'nuclG', 'nuclC', 'nuclN', 'nbTr'])\n dicoTmp = {}\n fastaOrigin = SeqIO.parse(open(filename),'fasta')\n for fasta in fastaOrigin:\n name, seq = fasta.id, str(fasta.seq)\n if name.split(':')[5]:\n location = name.split(':')[1]\n listTrBt = name.split(':')[5].split(';')[0].split('|')\n dicoTrBt = { TrBt.split('-')[0] : TrBt.split('-')[1] for TrBt in listTrBt}\n for tr in dicoTrBt:\n if not ((location == '3UTR' or location == '5UTR') and\n rF.addTypeTr(dicoTrBt[tr]) != 'Coding'):\n #if the annotation is good\n LocID = location+'-'+dicoTrBt[tr]\n if LocID not in dicoTmp:\n dicoTmp[LocID] = {'LocID' : LocID,\n 'Location' : location,\n 'Biotype' : dicoTrBt[tr],\n 'nuclA' : 0, 'nuclT' : 0,\n 'nuclG' : 0, 'nuclC' : 0,\n 'nuclN' : 0, 'nbTr' : [tr]}\n dicoTmp[LocID].update({'nuclA' : dicoTmp[LocID]['nuclA'] + seq.count('A'),\n 'nuclT' : dicoTmp[LocID]['nuclT'] + seq.count('T'),\n 'nuclG' : dicoTmp[LocID]['nuclG'] + seq.count('G'),\n 'nuclC' : dicoTmp[LocID]['nuclC'] + seq.count('C'),\n 'nuclN' : dicoTmp[LocID]['nuclN'] + seq.count('N')})\n dicoTmp[LocID]['nbTr'].append(tr)\n listTodf = []\n for locID in dicoTmp:\n listTodf.append(dicoTmp[locID])\n dfTmp = pd.DataFrame(listTodf)\n df = df.append(dfTmp)\n return(df)",
"def handle_bulk(self):\n\n csv_file = self.args.bulk_run\n # mol_data_from_csv handles defaults if no argument is given\n bulk_data = mol_data_from_csv(csv_file)\n\n names = list(bulk_data)\n\n home = os.getcwd()\n\n for name in names:\n printf(f\"Analysing: {name}\\n\")\n\n # Get pdb from smiles or name if no smiles is given\n if bulk_data[name][\"smiles\"] is not None:\n smiles_string = bulk_data[name][\"smiles\"]\n self.molecule = Ligand(smiles_string, name)\n\n else:\n # Initialise molecule, ready to add configs to it\n self.molecule = Ligand(f\"{name}.pdb\")\n\n # Read each row in bulk data and set it to the molecule object\n for key, val in bulk_data[name].items():\n setattr(self.molecule, key, val)\n\n self.molecule.skip = None\n\n # Using the config file from the .csv, gather the .ini file configs\n file_configs = Configure().load_config(self.molecule.config_file)\n for key, val in file_configs.items():\n setattr(self.molecule, key, val)\n\n # Handle configs which are changed by terminal commands\n for key, val in vars(self.args).items():\n if val is not None:\n setattr(self.molecule, key, val)\n\n # Now that all configs are stored correctly: execute.\n Execute(self.molecule)\n\n os.chdir(home)\n\n sys.exit(\n f\"{COLOURS.green}Bulk analysis complete.{COLOURS.end}\\n\"\n \"Use QUBEKit -progress to view the completion progress of your molecules\"\n )",
"def info_csv(path):\n petitions = 0\n cache = {}\n \n try:\n #Abrimos el CSV en la ruta dada\n with open(path) as f:\n reader = csv.DictReader(f) #Obtenemos el Iterable con cada fila como diccionario.\n \n for row in reader:\n\n #Del diccionario de cada fila obtenemos el origen (default mex) y el origen.\n origin = row.get('origin', 'MEX')\n destination = row.get('destination', row.get('destino'))\n\n if 'origin' in row:\n origin = getCityName(origin)\n destination = getCityName(destination)\n\n #Obtenemos la información del clima del origen y del destino. \n info_origin = request_cache(origin, cache, petitions)\n info_destination = request_cache(destination, cache, petitions)\n\n if petitions == 58:\n time.sleep(60)\n petitions = 0\n \n #Le daremos formato a la información y la imprimiremos.\n out = OutputFormat.output_format(info_origin,info_destination)\n print(out)\n\n except FileNotFoundError as error:\n print('File not found: ' + path)",
"def parse_csv(self, data_dir):\n metacsvfilepath = os.path.join(data_dir, self.metadataFile)\n #print(metacsvfilepath)\n with open(metacsvfilepath, 'r', newline='') as f:\n reader = csv.reader(f)\n #parsed_recordings = list(reader, delimiter=',')[1:]\n ids = []\n labels = []\n for line in reader:\n # line is a list of ['id', 'dataset', 'label']\n rec_id, label = line[0], line[-1]\n ids.append(rec_id)\n labels.append(label)\n\n return ids, labels"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Searches NCBI Assembly database using NCBI Eutilites API, returns assembly accession number, bioproject number and assembly publication date
|
def getAssemblyinfo(speciesName):
#---------------Create e-search URL & send request to API-----------------------
base_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
url = base_url + "esearch.fcgi?db=assembly&term=(%s[All Fields])&usehistory=y&api_key=f1e800ad255b055a691c7cf57a576fe4da08" % speciesName # creates e-search url
api_request = requests.get(url) #sends request to api
# grab the response content
xml_content = api_request.content
# parse with beautiful soup
soup = BeautifulSoup(xml_content, 'xml')
#--------------Get Query Key & Web Environments from xml------------------------
query_str = soup.find('QueryKey') #finds query key tag from xml
querykey = str(query_str) #converts result to string variable
querykey_num = querykey[10:len(querykey)-11] #parses out query key from string
web_env_str = soup.find('WebEnv') #finds web environment tag from xml
web_env = str(web_env_str) #converts result to string variable
web_env_num = web_env[8:len(web_env)-9] #parses out web environment from string
#-----------------Create e-summary URL and send request to API------------------
summary_url = base_url + "esummary.fcgi?db=assembly&query_key=%s&WebEnv=%s&api_key=f1e800ad255b055a691c7cf57a576fe4da08" % (querykey_num, web_env_num)
api_request_summary = requests.get(summary_url) #sends request to api
# grab the response content
xml_content_summary = api_request_summary.content
# parse with beautiful soup
soup_summary = BeautifulSoup(xml_content_summary, 'xml')
#------------Gets desired information from Assembly database--------------------
accession_str = soup_summary.find('AssemblyAccession') #finds Assembly accession number tag from xml
accession = str(accession_str) #converts result to string variable
accession_num = accession[19:len(accession)-20] #parses out accession number from string
bioproject_str = soup_summary.find('BioprojectAccn') #finds bioproject tag from xml
bioproject = str(bioproject_str) #converts result to string variable
bioproject_num = bioproject[16:len(bioproject)-17] #parses out bioproject number from string
pubdate_str = soup_summary.find('AsmReleaseDate_GenBank') #finds Assembly publication date tag from xml
pubdate = str(pubdate_str) #converts result to string variable
pubdate_num = pubdate[24:len(pubdate)-37] #parses out assembly publication date from string
return accession_num, bioproject_num, pubdate_num
|
[
"def download_assemblies(self):\n n = 0\n for name, barcode in self.__barcodes.items():\n # Put the assembly barcode into an URL for database search\n url = \"http://enterobase.warwick.ac.uk/api/v2.0/%s/assemblies?barcode=%s&limit=50\" % (self.__db, barcode)\n try:\n # Request the URL of the target assembly FASTA file\n response = urllib.request.urlopen(self.__create_request(url))\n data = json.load(response)\n fasta_url = data[\"Assemblies\"][0][\"download_fasta_link\"]\n try:\n # Request the FASTA file using its URL\n fasta_response = urllib.request.urlopen(self.__create_request(fasta_url))\n if fasta_response.getcode() == 200:\n if self.__append_barcode:\n fasta_out_filename = self.__outdir + \"__\".join([name, barcode]) + \".fna\"\n else:\n fasta_out_filename = self.__outdir + name + \".fna\"\n with open(fasta_out_filename, \"w\") as fasta_out:\n fasta_out.write(fasta_response.read()) # Successfully downloaded a FASTA file\n else:\n self.__fasta_error_log.append([name, barcode, fasta_url, fasta_response.getcode(), \"Failed download with an invalid server response.\"])\n except urllib.error.HTTPError as Response_error:\n self.__fasta_error_log.append([name, barcode, str(fasta_url), \"Failed download as %s, %s\" % (Response_error.read(), Response_error.msg)])\n except urllib.error.HTTPError as Response_error:\n self.__barcode_error_log.append([name, barcode, \"Query address: \" + str(url), \"Reason: %s, %s\" % (Response_error.read(), Response_error.msg)])\n n += 1\n sys.stdout.write(\"\\nProgress: %s with barcode %s (%i in total) has been processed.\" % (name, barcode, n))\n sys.stdout.flush()\n time.sleep(self.__time)\n\n # Write error messages\n self.__write_error_messages(self.__barcode_error_log, \"barcode_errors\")\n self.__write_error_messages(self.__barcode_error_log, \"fasta_errors\")\n \n return",
"def load_assemblies():\n summary = \"assembly_summary.txt\"\n print(\"Fetching assembly file\")\n urllib.request.urlretrieve(\"ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/bacteria/\" + summary, summary)\n with open(summary, 'r') as sum:\n sum.readline()\n assemblies =[row for row in csv.DictReader(sum, delimiter='\\t')]\n return assemblies",
"def ncbi_GetSeqsFromAcc(self, table_name, column_name='acc_id'):\n\n print(\n \"\"\"\n #########################################################\\n\n ############ NCBI ncbi accession to fasta #############\\n\n #########################################################\\n\n \"\"\")\n\n Entrez.api_key = self._key\n Entrez.email = self._email\n\n\n try:\n conn = sqlite3.connect(self.sqlite_db)\n cur = conn.cursor()\n except sqlite3.Error as e:\n print(e)\n return\n\n #set up sqlite\n cur.execute('''CREATE TABLE IF NOT EXISTS Acc2Seq (rowid INT PRIMARY KEY, acc_id TEXT, seq_description TEXT, sequences TEXT)''')\n\n try:\n # select the field contain acc id\n cur.execute('''SELECT {} FROM {}'''.format(column_name, table_name))\n all_acc = cur.fetchall()\n len_all_acc = len(all_acc)\n except sqlite3.Error as e:\n print(\"Error. Reading {} error\\n\".format(table_name))\n print(e)\n return\n\n cur.execute('''SELECT acc_id FROM Acc2Seq''')\n existed = cur.fetchall()\n if len(existed) > 0:\n existed_id = [i[0] for i in existed]\n else:\n existed_id = []\n\n if len_all_acc > 0:\n\n all_acc_flat = [i[0] for i in all_acc]\n\n print('\\nTotal Accession Numbers: {}\\n'.format(len(all_acc_flat)))\n n = len(existed_id)\n for i in range(len(existed_id), len_all_acc):\n\n current_id = all_acc_flat[i]\n\n if current_id in existed_id:\n print(\"{} existed in the database\".format(current_id))\n continue\n else:\n\n #Total number of records from the input set to be retrieved, up to a maximum of 10,000. \n if current_id == 'NA':\n cur.execute('''INSERT INTO Acc2Seq VALUES (?,?,?,?)''', (n, current_id, 'NA', 'NA'))\n conn.commit()\n n += 1\n else:\n try:\n fetch = Entrez.efetch(db = self.ncbi_db, id = current_id, retmode = 'text', rettype = 'fasta')\n outs = fetch.read()\n except:\n print(\"Entrez Error\")\n\n\n fetch.close()\n fasta = outs.strip().split('\\n')\n\n if len(fasta) > 1:\n\n header = fasta[0]\n acc, descript = header.split()[0].replace('>', ''), ' '.join(header.split()[1:])\n seqs = ''.join(fasta[1:])\n\n print('Saving into database:')\n print('{} Acc_ID: {}\\n'.format(i+1, acc))\n cur.execute('''INSERT INTO Acc2Seq VALUES (?,?,?,?)''', (n, current_id, descript, seqs))\n conn.commit()\n n += 1\n time.sleep(3)\n\n else:\n print('Empty sequences')\n cur.execute('''INSERT INTO Acc2Seq VALUES (?,?,?,?)''', (n, current_id, \"NA\", \"NA\"))\n conn.commit()\n n += 1\n time.sleep(3)\n else:\n print(\"No Accession ID in the Database. Please Check!\")\n return\n\n cur.close()\n conn.close()\n print('\\nCompleted!\\n')\n return self.track.append('P3')",
"def query_ads_bibtex(self, bibcodes):\n bc_ads = BibtexCollection()\n try:\n bibtex_string = ads.ExportQuery(bibcodes=bibcodes, format='bibtex').execute()\n bc_ads.read_from_string(bibtex_string)\n bibcodes_found = bc_ads.bibcode_entries.keys()\n nresults = len(bibcodes_found)\n nbibcodes = len(bibcodes)\n if nresults==nbibcodes:\n return bc_ads\n else:\n print('WARNING: did not retrieve bibtex for {} bibcodes:'.format(nresults-nbibcodes))\n for bc in bibcodes:\n if not bc in bibcodes_found:\n print(bc)\n \n except ads.exceptions.APIResponseError:\n print('ERROR: ADS APIResponseError. You probably exceeded your rate limit.')\n raise",
"def get_ExACDB_info(exac_col):\n data = {item:item for item in exac_col}\n url = 'http://exac.hms.harvard.edu/rest/bulk/variant'\n\n print('retrieving query requests from ExAC db REST API, pleaese be patient ...')\n query = requests.post(url, json = data)\n exac_dic = json.loads(query.text)\n\n consequence, alleleFreq, ENSGs, ENSTs = [], [], [], []\n for k in exac_dic:\n if exac_dic[k]['consequence'] != None:\n consequence.append(','.join(list(exac_dic[k]['consequence'].keys()))) \n else:\n consequence.append('NA') \n if 'allele_freq' in exac_dic[k]['variant']:\n alleleFreq.append(exac_dic[k]['variant']['allele_freq']) \n else:\n alleleFreq.append('NA') \n if 'genes' in exac_dic[k]['variant']:\n ENSGs.append(','.join(exac_dic[k]['variant']['genes'])) \n else:\n ENSGs.append('NA') \n if 'transcripts' in exac_dic[k]['variant']:\n ENSTs.append(','.join(exac_dic[k]['variant']['transcripts'])) \n else:\n ENSTs.append('NA')\n \n return (consequence, alleleFreq, ENSGs, ENSTs)",
"def parse_ncbi_taxonomy(self,\n taxonomy_dir,\n refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file,\n output_prefix):\n\n # parse organism name\n self._assembly_organism_name(refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file,\n output_prefix + '_organism_names.tsv')\n\n # parse metadata file and taxonomy files\n assembly_to_tax_id = self._assembly_to_tax_id(refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file)\n\n node_records = self._read_nodes(\n os.path.join(taxonomy_dir, 'nodes.dmp'))\n print('Read %d node records.' % len(node_records))\n\n name_records = self._read_names(\n os.path.join(taxonomy_dir, 'names.dmp'))\n print('Read %d name records.' % len(name_records))\n\n # traverse taxonomy tree for each assembly\n taxonomy_file = output_prefix + '_unfiltered_taxonomy.tsv'\n fout = open(taxonomy_file, 'w')\n\n print('Number of assemblies: %d' % len(assembly_to_tax_id))\n for assembly_accession, tax_id in assembly_to_tax_id.items():\n # traverse taxonomy tree to the root which is 'cellular organism' for genomes,\n # 'other sequences' for plasmids, and 'unclassified sequences' for metagenomic libraries\n taxonomy = []\n cur_tax_id = tax_id\n\n if cur_tax_id not in name_records:\n print('[Warning] Assembly %s has an invalid taxid: %s' % (assembly_accession, tax_id))\n continue\n\n roots = ['cellular organisms', 'other sequences',\n 'unclassified sequences', 'Viruses', 'Viroids']\n while name_records[cur_tax_id].name_txt not in roots:\n if cur_tax_id == '1':\n print('[Error] TaxId %s reached root of taxonomy tree: %s' % (tax_id, taxonomy))\n sys.exit(-1)\n\n try:\n node_record = node_records[cur_tax_id]\n\n if node_record.rank in Taxonomy.rank_labels:\n rank_index = Taxonomy.rank_labels.index(\n node_record.rank)\n rank_prefix = Taxonomy.rank_prefixes[rank_index]\n elif node_record.rank == 'subspecies':\n rank_prefix = 'sb__'\n else:\n # unrecognized rank\n rank_prefix = 'x__'\n if node_record.rank == 'superkingdom':\n rank_prefix = 'd__'\n\n taxonomy.append(\n rank_prefix + name_records[cur_tax_id].name_txt)\n\n cur_tax_id = node_record.parent_tax_id\n except:\n print(traceback.format_exc())\n print(taxonomy)\n\n taxonomy.reverse()\n taxa_str = ';'.join(taxonomy)\n fout.write('%s\\t%s\\n' % (assembly_accession, taxa_str))\n\n fout.close()\n\n self.standardize_taxonomy(taxonomy_file,\n output_prefix + '_standardized.tsv')",
"def imdb_metadata_search(content):\n\n name = content.simple_name().encode('utf-8')\n title, year = common.detect_title_year(name)\n\n logging.info(\"Finding IMDB ID for content named '%s'\" % name)\n\n if year is None:\n logging.info(\"Couldn't split '%s' into title/year, skipping IMDb ID detection.\" % name)\n return None\n\n year = int(year)\n\n years = \"%d,%d\" % (year - 1, year + 1)\n\n url = u'http://www.imdb.com/List'\n url = u'http://www.imdb.com/search/title?'\n\n data = {'title': title,\n 'release_date': years}\n\n try:\n url = url + urllib.urlencode(data)\n except Exception, e:\n logging.error(\"Could not URL encode %s\" % str(data))\n return None\n data = None\n _, page = common.get_page(url, data)\n\n if page is None:\n logging.info(\"Couldn't get IMDb search page for '%s'\" % name)\n return None\n\n # Cleanup dumbass IMDB stuff\n page = page.replace('rate\"\"', 'rate\"').replace('\"src', '\" src')\n\n document = B(page)\n\n results = document.findAll('tr', attrs={'class': re.compile('detailed')})\n\n\n for result_node in results:\n extras = {}\n\n link = result_node.findChild('a')\n if link is None:\n logging.error(\"Could not get link node of result for '%s', skipping.\" % name)\n continue\n\n extras['imdb_uri'] = imdb_uri = link.get('href')\n imdb_id_match = re.match('/title/(?P<imdb_id>tt[0-9]+)/*', imdb_uri)\n if not imdb_id_match:\n continue\n\n extras['imdb_id'] = imdb_id_match.groupdict()['imdb_id']\n\n imdb_name = link.get('title')\n imdb_title, imdb_year = common.detect_title_year(imdb_name)\n imdb_title = imdb_title.encode('utf-8')\n\n extras['imdb_canonical_title'] = imdb_name\n extras['imdb_title'] = imdb_name\n if imdb_year is not None:\n extras['imdb_year'] = imdb_year\n\n if not common.title_match(title, imdb_title):\n logging.info(\"Skipping IMDB title '%s' because it didn't match '%s'\" % (imdb_title, title))\n continue\n\n thumb_node = result_node.findChild('td', attrs={'class':'image'})\n thumb_image = thumb_node.findChild('img') if thumb_node is not None else None\n if thumb_image:\n extras['imdb_thumb_uri'] = thumb_image.get('src')\n extras['imdb_thumb_width'] = thumb_image.get('width')\n extras['imdb_thumb_height'] = thumb_image.get('height')\n\n runtime_node = result_node.findChild('span', attrs={'class': 'runtime'})\n if runtime_node:\n runtime_match = re.match(\"(?P<length>\\d+) mins.\", runtime_node.string)\n if runtime_match:\n extras['imdb_length'] = int(runtime_match.groupdict()['length'])\n\n outline_node = result_node.findChild('span', attrs={'class': 'outline'})\n if outline_node and outline_node.string:\n extras['imdb_outline'] = outline_node.string.strip()\n\n genre_node = result_node.findChild('span', attrs={'class': 'genre'})\n if genre_node:\n extras['imdb_genres'] = [genre.string for genre in genre_node.findAll('a')]\n\n rating_list = result_node.findChild('div', attrs={'class': re.compile('rating-list')})\n if rating_list is not None:\n rating_id = rating_list.get('id')\n # rating_id is formatted like:\n # tt0463854|imdb|7.1|7.1|advsearch\n elts = rating_id.split('|')\n\n if len(elts) < 2 or elts[1] != 'imdb':\n logging.warning(\"IMDB has changed. Got rating-list that is '%s' for '%s'\" % (rating_id, name))\n continue\n\n imdb_id = elts[0]\n if imdb_id != extras['imdb_id']:\n logging.warning(\"WARNING: Rating IMDB ID does not match the one we got from the link node.\")\n\n if len(elts) > 2:\n imdb_rating = elts[3]\n try:\n imdb_rating = float(imdb_rating)\n extras['imdb_rating'] = imdb_rating\n except Exception, e:\n logging.error(\"Exception while parsing rating: %s\" % e)\n # If we got here, we have an ID and we should return all the metadata we\n # found.\n return extras\n return None",
"def _assembly_to_tax_id(self, refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file):\n\n d = {}\n for assembly_file in [refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file, ]:\n with open(assembly_file) as f:\n headers = f.readline().strip().split('\\t')\n try:\n taxid_index = headers.index('taxid')\n except:\n # look for taxid on the next line as NCBI sometimes puts\n # an extra comment on the first line\n headers = f.readline().split('\\t')\n taxid_index = headers.index('taxid')\n\n for line in f:\n line_split = line.strip().split('\\t')\n assembly_accession = line_split[0]\n taxid = line_split[taxid_index]\n\n if assembly_accession in d:\n print('[Error] Duplicate assembly accession: %s' % assembly_accession)\n sys.exit(-1)\n\n d[assembly_accession] = taxid\n\n return d",
"def search(scan_params):\n import urllib\n import xmltodict\n\n url = 'http://www.rcsb.org/pdb/rest/search'\n\n queryText = xmltodict.unparse(scan_params, pretty=False)\n queryText = queryText.encode()\n\n req = urllib.request.Request(url, data=queryText)\n f = urllib.request.urlopen(req)\n result = f.read()\n\n if not result:\n warnings.warn('No results were obtained for this search')\n\n idlist = str(result)\n\n return idlist",
"async def fetch(\n session: aiohttp.ClientSession, accession: Union[int, str]\n) -> Optional[dict]:\n params = {\n \"db\": \"nuccore\",\n \"email\": EMAIL,\n \"id\": accession,\n \"retmode\": \"text\",\n \"rettype\": \"gb\",\n \"tool\": TOOL,\n }\n\n async with session.get(FETCH_URL, params=params) as resp:\n body = await resp.text()\n\n if resp.status != 200:\n if \"Failed to retrieve sequence\" not in body:\n logger.warning(\"Unexpected Genbank error: %s\", body)\n\n return None\n\n gb = Bio.SeqIO.read(io.StringIO(body), \"gb\")\n\n data = {\n \"accession\": gb.id,\n \"definition\": gb.description,\n \"sequence\": str(gb.seq),\n \"host\": \"\",\n }\n\n for feature in gb.features:\n if feature.type == \"source\":\n try:\n data[\"host\"] = feature.qualifiers[\"host\"][0]\n except (IndexError, KeyError):\n data[\"host\"] = \"\"\n\n return data",
"def collect_all_genomes():\n\n def str2num(s,cat=False,force=True):\n \"\"\"\n Converts string to integer\n eg. ensembl92 to 92\n\n :param s: string\n :param cat: Whether to concatenate detected integers. eg. 20,23 to 2023\n :param force: If True, ignores decimal point error. \n \"\"\"\n import re \n if '.' in s and not force:\n raise ValueError(f\"A string can only be converted to integeres, found a '.' in {s}\")\n n=re.findall(r'\\d+',s)\n if len(n)==0:\n raise ValueError(\"No digits found in string {}\".format(s)) \n elif len(n)==1:\n return int(n[0])\n else:\n if cat:\n return int(''.join(n))\n else:\n return n\n\n from glob import glob\n from os.path import dirname,basename,exists\n import numpy as np\n import pandas as pd\n from pyensembl.species import normalize_species_name,Species\n \n # here's how I get the .cache directory eg. '/home/user/.cache/pyensembl'\n import datacache\n pyensembl_cache_dir=f\"{dirname(datacache.get_data_dir())}/pyensembl\" #FIXME if genomes are installed at other places than .cache\n\n # all the assemblies\n assemblies=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/*\")]\n # dataframe that contains all the info (and can be exported as a tsv).\n dspecies=pd.DataFrame(columns=['latin name','release','synonymn','assembly'])\n # assempy to release min max dict needed as an input to create Species object\n assembly2releasesminmax={}\n # following loop populates the dataframe \n genomei=0\n for assembly in assemblies:\n releases=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/{assembly}/*\")]\n for release in releases:\n releasei=str2num(release) #FIXME is realease is a float\n genome_dir=f\"{pyensembl_cache_dir}/{assembly}/{release}\"\n genome_files=glob(f\"{genome_dir}/*\")\n is_genome_installed=True if len(genome_files)>4 else False #FIXME need more than 4 (.gz) files to be strict\n if is_genome_installed:\n dspecies.loc[genomei,'assembly']=assembly\n dspecies.loc[genomei,'release']=releasei\n dspecies.loc[genomei,'synonymn']=basename(genome_files[0]).split('.')[0]\n dspecies.loc[genomei,'latin name']=normalize_species_name(dspecies.loc[genomei,'synonymn'])\n genomei+=1\n # following loop generates the Species object\n for spc in dspecies['latin name'].unique():\n assembly2releases={}\n for assembly in dspecies.loc[(dspecies['latin name']==spc),'assembly'].unique():\n d=dspecies.loc[((dspecies['latin name']==spc) & (dspecies['assembly']==assembly)),:]\n assembly2releases[assembly]=d['release'].min(),d['release'].max() #FIXME if MAX_ENSEMBL_RELEASE very important and has to be used\n Species.register(\n latin_name=spc,\n synonyms=dspecies.loc[(dspecies['latin name']==spc),'synonymn'].unique().tolist(),\n reference_assemblies=assembly2releases)\n Species.dspecies=dspecies\n return Species",
"def getpubmedinfo(speciesName):\n#---------------Create e-search URL & send request to API-----------------------\n search_base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'\n \n search_url = search_base_url + \"esearch.fcgi?db=assembly&term=(%s[All Fields])&usehistory=y&api_key=f1e800ad255b055a691c7cf57a576fe4da08\" % (speciesName)\n \n api_request = requests.get(search_url) #sends request to api\n \n # grab the response content \n xml_content = api_request.content\n \n # parse with beautiful soup \n soup = BeautifulSoup(xml_content, 'xml')\n \n#--------------Get Query Key & Web Environments from xml------------------------ \n query_str = soup.find('QueryKey') #finds query key tag from xml\n\n querykey = str(query_str) #converts result to string variable\n \n querykey_num = querykey[10:len(querykey)-11] #parses out query key from string\n \n web_env_str = soup.find('WebEnv') #finds web environment tag from xml\n \n web_env = str(web_env_str) #converts result to string variable\n \n web_env_num = web_env[8:len(web_env)-9] #parses out web env from string\n \n#-----------------Create e-link URL and send request to API---------------------\n link_base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/';\n link_url = link_base_url + \"elink.fcgi?dbfrom=assembly&db=pubmed&query_key=%s&WebEnv=%s&linkname=assembly_pubmed&cmd=neighbor_history&api_key=f1e800ad255b055a691c7cf57a576fe4da08\" % (querykey_num, web_env_num)\n #print(link_url)\n \n api_request_2 = requests.get(link_url) #sends request to api\n \n # grab the response content \n xml_content_2 = api_request_2.content\n \n # parse with beautiful soup \n soupLink = BeautifulSoup(xml_content_2, 'xml')\n #print(soupLink)\n \n#--------------Get Query Key & Web Environments from xml------------------------\n query_str2 = soupLink.find('QueryKey') #finds query key tag from xml\n #print(query_str2)\n \n querykey2 = str(query_str2) #converts result to string variable\n \n querykey_num2 = querykey2[10:len(querykey2)-11] #parses out query key from string\n #print(querykey_num2)\n \n web_env_str2 = soupLink.find('WebEnv') #finds web env tag from xml\n \n web_env2 = str(web_env_str2) #converts result to string variable\n \n web_env_num2 = web_env2[8:len(web_env2)-9] #parses out web env from string\n \n#-----------------Create e-summary URL and send request to API------------------\n summary_url = search_base_url + \"esummary.fcgi?db=pubmed&query_key=%s&WebEnv=%s&api_key=f1e800ad255b055a691c7cf57a576fe4da08\" % (querykey_num2, web_env_num2)\n #print(summary_url)\n \n api_request_summary = requests.get(summary_url) #sends request to api\n \n # grab the response content \n xml_content_summary = api_request_summary.content\n \n # parse with beautiful soup \n soup_summary = BeautifulSoup(xml_content_summary, 'xml')\n #print(soup_summary)\n \n#------------Gets desired information from PubMed database----------------------\n title_str = soup_summary.find('Item', {'Name':\"Title\"}) #finds \"title\" tag from xml \n \n title = str(title_str) #converts result into string variable\n \n title_name = title[33:len(title)-7] #parses out article title from string\n \n title_name_strip = title_name.replace(\",\", \" \")\n \n pubmed_id_str = soup_summary.find('Item', {'Name':\"pubmed\"}) #finds \"pubmed\" tag from xml\n \n pubmed_id = str(pubmed_id_str) #converts result into string variable\n \n pubmed_id_num = pubmed_id[34:len(pubmed_id)-7] #parses out pubmed id from string\n \n return title_name_strip, pubmed_id_num",
"def _assembly_organism_name(self, refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file, output_organism_name_file):\n\n fout = open(output_organism_name_file, 'w')\n for assembly_file in [refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file]:\n with open(assembly_file) as f:\n f.readline()\n header = f.readline().strip().split('\\t')\n org_name_index = header.index('organism_name')\n\n for line in f:\n line_split = line.strip().split('\\t')\n\n gid = line_split[0]\n if gid.startswith('GCA_'):\n gid = 'GB_' + gid\n else:\n gid = 'RS_' + gid\n org_name = line_split[org_name_index]\n fout.write('%s\\t%s\\n' % (gid, org_name))\n fout.close()",
"def query_ads_bibcode(self, query):\n try:\n paper_query = ads.SearchQuery(**query)\n paper_list = []\n for p in paper_query:\n paper_list.append(p)\n nresults = len(paper_list)\n if nresults==0:\n print('ERROR: Could not find paper on ADS with query {} for paper {}'.format(query, self.name))\n elif nresults==1:\n self.paper = paper_list[0]\n self.bibcode = self.paper.bibcode\n else:\n print('ERROR: Found {} results on ADS with query {} for paper {}:'.format(nresults, query, self.name))\n for p in paper_list:\n print(p.bibcode)\n print('-----')\n except ads.exceptions.APIResponseError:\n print('ERROR: ADS APIResponseError. You probably exceeded your rate limit.')\n self.paper = None\n raise",
"def _get_all_barcodes(accessions=EBI_STUDY_ACCESSIONS):\n acc_from_barcode = {}\n fields_list = ['sample_accession', 'library_name']\n for acc in accessions:\n ebi_info_set = get_ebi_info_set(accession=acc, fields_list=fields_list)\n for sample_info in ebi_info_set:\n # Notes on barcodes: The standard barcode seems to be 9 digits,\n # but many don't match this pattern. Most are probably blanks and\n # other controls. To be safe, we save information for all of them.\n barcode = sample_info['library_name'].split(':')[0]\n acc_from_barcode[barcode] = sample_info['sample_accession']\n return acc_from_barcode",
"def _get_index_assembly(self):\n if not self.__index_by_assembly:\n self.__index_by_assembly = self.__index_data_for_property(self._get_species_data_dao(),\n Species.get_assembly)\n return self.__index_by_assembly",
"def getDiseaseAssoc(databaseName, path, idProt=\"Hepcidin\"):\n\t\n\t\n\t\n\tconnect, cursor = connection(path+\"/\"+databaseName)\n\t#cursor = connect.cursor()\n\t\n\t#PRINT SOME INFORMATIONS\n\tprint(\"SQL: SELECT DISTINCT LOWER(TargetLabel) FROM \"+bcolors.HEADER+\"tname\"+bcolors.ENDC+\" WHERE LOWER(SourceLabel) LIKE LOWER(\\\"%\"+bcolors.HEADER+idProt+bcolors.ENDC+\"%\\\") AND LOWER(SourceEntityType)=LOWER(\\\"p\\\") AND LOWER(TargetEntityType)=LOWER(\\\"i\\\")\")\n\tprint(\"ProtID querry: \"+bcolors.HEADER+idProt+bcolors.ENDC)\n\t\n\t#DO THE MATHS\n\tcursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\")\n\tfor ttuples in cursor.fetchall():\n\t\ttname = ttuples[0]\n\t\tprint(\"Searching assoc in \" +bcolors.HEADER+tname+bcolors.ENDC+ \" ...\")\n\t\t\n\t\tsqlstr = \"SELECT DISTINCT LOWER(TargetLabel) FROM \" +tname+ \" WHERE LOWER(SourceLabel) LIKE LOWER(\\\"%\"+idProt+\"%\\\") AND LOWER(SourceEntityType)=LOWER(\\\"p\\\") AND LOWER(TargetEntityType)=LOWER(\\\"i\\\")\"\n\t\tcursor.execute(sqlstr)\n\t\t\n\t\t#FILE WRITING\n\t\twith open(path+\"/requestResult/\"+idProt+\"_diseaseAssoc_\"+tname+\".txt\", \"w\") as f:\n\t\t\tfor elements in cursor.fetchall():\n\t\t\t\tf.write(elements[0]+\"\\n\")\n\n\tconnect.commit()\n\tcloseConnection(cursor, connect)",
"def list_assemblies(file):\n return [assembly[\"name\"] for assembly in file[\"bioAssemblyList\"]]",
"def getPASAinformation(configFile, DBname, folder, genome):\n # run some checks of the data to make sure it is same assembly\n mysqlDB, mysqlUser, mysqlPass = (None,) * 3\n pasaconf_file = os.path.join(PASA, \"pasa_conf\", \"conf.txt\")\n if os.environ.get(\"PASACONF\"):\n pasaconf_file = os.environ.get(\"PASACONF\").strip()\n with open(pasaconf_file, \"r\") as pasaconf:\n for line in pasaconf:\n line = line.replace(\"\\n\", \"\")\n if line.startswith(\"MYSQLSERVER=\"):\n mysqlDB = line.split(\"=\")[-1]\n if line.startswith(\"MYSQL_RW_USER=\"):\n mysqlUser = line.split(\"=\")[-1]\n if line.startswith(\"MYSQL_RW_PASSWORD=\"):\n mysqlPass = line.split(\"=\")[-1]\n pasaExistingGFF = os.path.join(folder, \"existing_pasa.gff3\")\n cmd = [\n os.path.join(PASA, \"scripts\", \"pasa_asmbl_genes_to_GFF3.dbi\"),\n \"-M\",\n DBname + \":\" + mysqlDB,\n \"-p\",\n mysqlUser + \":\" + mysqlPass,\n ]\n lib.runSubprocess(cmd, folder, lib.log, capture_output=pasaExistingGFF)\n if not lib.checkannotations(pasaExistingGFF):\n return False\n # now get number of genes and list of contigs\n pasaContigs = []\n geneCount = 0\n with open(pasaExistingGFF, \"r\") as infile:\n for line in infile:\n if line.startswith(\"\\n\"):\n continue\n cols = line.split(\"\\t\")\n if not cols[0] in pasaContigs:\n pasaContigs.append(cols[0])\n if cols[2] == \"gene\":\n geneCount += 1\n # now get fasta headers from genome\n genomeContigs = []\n with open(genome, \"r\") as fasta:\n for line in fasta:\n if line.startswith(\">\"):\n line = line.replace(\"\\n\", \"\")\n line = line.replace(\">\", \"\")\n if line not in genomeContigs:\n genomeContigs.append(line)\n # now make sure PASA headers in genome\n genomeContigs = set(genomeContigs)\n for contig in pasaContigs:\n if contig not in genomeContigs:\n return False\n lib.log.info(\n \"Existing PASA database contains {:,} gene models, validated FASTA headers match\".format(\n geneCount\n )\n )\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Searches NCBI PubMed database using NCBI Eutilites API, returns article title name and pubmed ID
|
def getpubmedinfo(speciesName):
#---------------Create e-search URL & send request to API-----------------------
search_base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'
search_url = search_base_url + "esearch.fcgi?db=assembly&term=(%s[All Fields])&usehistory=y&api_key=f1e800ad255b055a691c7cf57a576fe4da08" % (speciesName)
api_request = requests.get(search_url) #sends request to api
# grab the response content
xml_content = api_request.content
# parse with beautiful soup
soup = BeautifulSoup(xml_content, 'xml')
#--------------Get Query Key & Web Environments from xml------------------------
query_str = soup.find('QueryKey') #finds query key tag from xml
querykey = str(query_str) #converts result to string variable
querykey_num = querykey[10:len(querykey)-11] #parses out query key from string
web_env_str = soup.find('WebEnv') #finds web environment tag from xml
web_env = str(web_env_str) #converts result to string variable
web_env_num = web_env[8:len(web_env)-9] #parses out web env from string
#-----------------Create e-link URL and send request to API---------------------
link_base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/';
link_url = link_base_url + "elink.fcgi?dbfrom=assembly&db=pubmed&query_key=%s&WebEnv=%s&linkname=assembly_pubmed&cmd=neighbor_history&api_key=f1e800ad255b055a691c7cf57a576fe4da08" % (querykey_num, web_env_num)
#print(link_url)
api_request_2 = requests.get(link_url) #sends request to api
# grab the response content
xml_content_2 = api_request_2.content
# parse with beautiful soup
soupLink = BeautifulSoup(xml_content_2, 'xml')
#print(soupLink)
#--------------Get Query Key & Web Environments from xml------------------------
query_str2 = soupLink.find('QueryKey') #finds query key tag from xml
#print(query_str2)
querykey2 = str(query_str2) #converts result to string variable
querykey_num2 = querykey2[10:len(querykey2)-11] #parses out query key from string
#print(querykey_num2)
web_env_str2 = soupLink.find('WebEnv') #finds web env tag from xml
web_env2 = str(web_env_str2) #converts result to string variable
web_env_num2 = web_env2[8:len(web_env2)-9] #parses out web env from string
#-----------------Create e-summary URL and send request to API------------------
summary_url = search_base_url + "esummary.fcgi?db=pubmed&query_key=%s&WebEnv=%s&api_key=f1e800ad255b055a691c7cf57a576fe4da08" % (querykey_num2, web_env_num2)
#print(summary_url)
api_request_summary = requests.get(summary_url) #sends request to api
# grab the response content
xml_content_summary = api_request_summary.content
# parse with beautiful soup
soup_summary = BeautifulSoup(xml_content_summary, 'xml')
#print(soup_summary)
#------------Gets desired information from PubMed database----------------------
title_str = soup_summary.find('Item', {'Name':"Title"}) #finds "title" tag from xml
title = str(title_str) #converts result into string variable
title_name = title[33:len(title)-7] #parses out article title from string
title_name_strip = title_name.replace(",", " ")
pubmed_id_str = soup_summary.find('Item', {'Name':"pubmed"}) #finds "pubmed" tag from xml
pubmed_id = str(pubmed_id_str) #converts result into string variable
pubmed_id_num = pubmed_id[34:len(pubmed_id)-7] #parses out pubmed id from string
return title_name_strip, pubmed_id_num
|
[
"def search_for(search, reldate=None, mindate=None, maxdate=None,\n batchsize=100, delay=2, callback_fn=None,\n start_id=0, max_ids=None):\n class ResultParser(sgmllib.SGMLParser):\n # Parse the ID's out of the XML-formatted page that PubMed\n # returns. The format of the page is:\n # [...]\n # <Id>...</Id>\n # [...]\n def __init__(self):\n sgmllib.SGMLParser.__init__(self)\n self.ids = []\n self.in_id = 0\n def start_id(self, attributes):\n self.in_id = 1\n def end_id(self):\n self.in_id = 0\n _not_pmid_re = re.compile(r'\\D')\n def handle_data(self, data):\n if not self.in_id:\n return\n # If data is just whitespace, then ignore it.\n data = string.strip(data)\n if not data:\n return\n # Everything here should be a PMID. Check and make sure\n # data really is one. A PMID should be a string consisting\n # of only integers. Should I check to make sure it\n # meets a certain minimum length?\n if self._not_pmid_re.search(data):\n raise ValueError, \\\n \"I expected an ID, but %s doesn't look like one.\" % \\\n repr(data)\n self.ids.append(data)\n\n params = {\n 'db' : 'pubmed',\n 'term' : search,\n 'reldate' : reldate,\n 'mindate' : mindate,\n 'maxdate' : maxdate\n }\n for k, v in params.items():\n if v is None:\n del params[k]\n\n limiter = RequestLimiter(delay)\n ids = []\n while max_ids is None or len(ids) < max_ids:\n parser = ResultParser()\n \n # Check to make sure enough time has passed before my\n # last search. If not, then wait.\n limiter.wait()\n\n start = start_id + len(ids)\n max = batchsize\n if max_ids is not None and max > max_ids - len(ids):\n max = max_ids - len(ids)\n\n params['retstart'] = start\n params['retmax'] = max\n h = NCBI.esearch(**params)\n parser.feed(h.read())\n ids.extend(parser.ids)\n if callback_fn is not None:\n # Call the callback function with each of the new ID's.\n for id in parser.ids:\n callback_fn(id)\n if len(parser.ids) < max or not parser.ids: # no more id's to read\n break\n return ids",
"def getPubmedDoi(pmid):\n url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&email=maximilianh@gmail.com&retmode=xml&id=%s' % pmid\n xp = maxXml.XmlParser(url=url)\n doi = xp.getTextFirst(\"PubmedArticle/PubmedData/ArticleIdList/ArticleId\", reqAttrDict={'IdType':'doi'}, default=None)\n logging.debug(\"Found DOI: %s\" % doi)\n return doi",
"def get_abstract(pmid):\n abs_url = BASE + \"efetch.fcgi?db=pubmed&api_key=%s&id=%s&rettype=abstract&retmode=text\" % (APIKEY, pmid)\n try:\n abstract = requests.get(abs_url).content.decode('utf-8')\n return abstract\n except:\n e = sys.exc_info()[0]\n print(e)",
"def get_publication(pmid, escape=True):\n handle = Entrez.efetch(db=\"pubmed\", id=pmid, retmode=\"xml\")\n try:\n for rec in Entrez.parse(handle, escape=escape):\n return _parse_entrez_record(rec, escape)\n except ValueError:\n handle = Entrez.efetch(db=\"pubmed\", id=pmid, retmode=\"xml\")\n data = Entrez.read(handle, escape=escape)\n record = data['PubmedArticle'] + data['PubmedBookArticle']\n if record:\n return _parse_entrez_record(record[0], escape)\n finally:\n handle.close()",
"def get_metadata(pmid):\n summary_url = BASE + \"esummary.fcgi?db=pubmed&api_key=%s&id=%s&retmode=json\" % (APIKEY, pmid)\n summary = json.loads(requests.get(summary_url).content.decode('utf-8'))\n db_dict = {PMID: pmid}\n db_keys = [JOURNAL, PUB_DATE, TITLE]\n keys = ['fulljournalname', 'sortpubdate', 'title']\n for i in range(3):\n try:\n db_dict[db_keys[i]] = summary.get('result').get(pmid).get(keys[i])\n except AttributeError:\n if summary.get(ERROR) == API_RATE_LIMIT:\n raise APIRateLimitError\n break\n sortpubdate = datetime.strptime(db_dict.get(PUB_DATE), '%Y/%m/%d %H:%M')\n db_dict[PUB_DATE] = sortpubdate\n return db_dict",
"def searchPubmed(searchFields, sortby='relevance', num=\"10\", resultsFormat='json'):\n pubmedQueryUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=TERM&retmode=json&retmax=NUM'\n if len(searchFields) > 1:\n query = \" [MeSH Terms] AND \".join(searchFields)\n else:\n query = searchFields[0] + \" [MeSH Terms] AND\"\n try:\n url = pubmedQueryUrl.replace('TERMS', query).replace('NUM', num)\n http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())\n response = http.request(\"GET\", urllib.parse.quote(url))\n jsonResponse = response.read()\n resultDict = json.loads(jsonResponse)\n except urllib3.exceptions.InvalidHeader:\n raise urllib3.exceptions.InvalidHeader(\"Invalid HTTP header provided. {}.\\nURL:{}\".format(err,url))\n except urllib3.exceptions.ConnectTimeoutError:\n raise urllib3.exceptions.ConnectTimeoutError(\"Connection timeout requesting URL. {}.\\nURL:{}\".format(err,url))\n except urllib3.exceptions.ConnectionError:\n raise urllib3.exceptions.ConnectionError(\"Protocol error when downloading. {}.\\nURL:{}\".format(err,url))\n except urllib3.exceptions.DecodeError:\n raise urllib3.exceptions.DecodeError(\"Decoder error when downloading. {}.\\nURL:{}\".format(err,url))\n except urllib3.exceptions.SecurityWarning:\n raise urllib3.exceptions.SecurityWarning(\"Security warning when downloading. {}.\\nURL:{}\".format(err,url))\n except urllib3.exceptions.ProtocolError:\n raise urllib3.exceptions.ProtocolError(\"Protocol error when downloading. {}.\\nURL:{}\".format(err,url))\n except ftplib.error_reply as err:\n raise ftplib.error_reply(\"Exception raised when an unexpected reply is received from the server. {}.\\nURL:{}\".format(err,url))\n except ftplib.error_temp as err:\n raise ftplib.error_temp(\"Exception raised when an error code signifying a temporary error. {}.\\nURL:{}\".format(err,url))\n except ftplib.error_perm as err:\n raise ftplib.error_perm(\"Exception raised when an error code signifying a permanent error. {}.\\nURL:{}\".format(err,url))\n except ftplib.error_proto:\n raise ftplib.error_proto(\"Exception raised when a reply is received from the server that does not fit the response specifications of the File Transfer Protocol. {}.\\nURL:{}\".format(err,url))\n except Exception as err:\n raise Exception(\"Something went wrong. {}.\\nURL:{}\".format(err,url))\n\n result = []\n if 'esearchresult' in resultDict:\n result = resultDict['esearchresult']\n\n return result",
"def imdb_metadata_search(content):\n\n name = content.simple_name().encode('utf-8')\n title, year = common.detect_title_year(name)\n\n logging.info(\"Finding IMDB ID for content named '%s'\" % name)\n\n if year is None:\n logging.info(\"Couldn't split '%s' into title/year, skipping IMDb ID detection.\" % name)\n return None\n\n year = int(year)\n\n years = \"%d,%d\" % (year - 1, year + 1)\n\n url = u'http://www.imdb.com/List'\n url = u'http://www.imdb.com/search/title?'\n\n data = {'title': title,\n 'release_date': years}\n\n try:\n url = url + urllib.urlencode(data)\n except Exception, e:\n logging.error(\"Could not URL encode %s\" % str(data))\n return None\n data = None\n _, page = common.get_page(url, data)\n\n if page is None:\n logging.info(\"Couldn't get IMDb search page for '%s'\" % name)\n return None\n\n # Cleanup dumbass IMDB stuff\n page = page.replace('rate\"\"', 'rate\"').replace('\"src', '\" src')\n\n document = B(page)\n\n results = document.findAll('tr', attrs={'class': re.compile('detailed')})\n\n\n for result_node in results:\n extras = {}\n\n link = result_node.findChild('a')\n if link is None:\n logging.error(\"Could not get link node of result for '%s', skipping.\" % name)\n continue\n\n extras['imdb_uri'] = imdb_uri = link.get('href')\n imdb_id_match = re.match('/title/(?P<imdb_id>tt[0-9]+)/*', imdb_uri)\n if not imdb_id_match:\n continue\n\n extras['imdb_id'] = imdb_id_match.groupdict()['imdb_id']\n\n imdb_name = link.get('title')\n imdb_title, imdb_year = common.detect_title_year(imdb_name)\n imdb_title = imdb_title.encode('utf-8')\n\n extras['imdb_canonical_title'] = imdb_name\n extras['imdb_title'] = imdb_name\n if imdb_year is not None:\n extras['imdb_year'] = imdb_year\n\n if not common.title_match(title, imdb_title):\n logging.info(\"Skipping IMDB title '%s' because it didn't match '%s'\" % (imdb_title, title))\n continue\n\n thumb_node = result_node.findChild('td', attrs={'class':'image'})\n thumb_image = thumb_node.findChild('img') if thumb_node is not None else None\n if thumb_image:\n extras['imdb_thumb_uri'] = thumb_image.get('src')\n extras['imdb_thumb_width'] = thumb_image.get('width')\n extras['imdb_thumb_height'] = thumb_image.get('height')\n\n runtime_node = result_node.findChild('span', attrs={'class': 'runtime'})\n if runtime_node:\n runtime_match = re.match(\"(?P<length>\\d+) mins.\", runtime_node.string)\n if runtime_match:\n extras['imdb_length'] = int(runtime_match.groupdict()['length'])\n\n outline_node = result_node.findChild('span', attrs={'class': 'outline'})\n if outline_node and outline_node.string:\n extras['imdb_outline'] = outline_node.string.strip()\n\n genre_node = result_node.findChild('span', attrs={'class': 'genre'})\n if genre_node:\n extras['imdb_genres'] = [genre.string for genre in genre_node.findAll('a')]\n\n rating_list = result_node.findChild('div', attrs={'class': re.compile('rating-list')})\n if rating_list is not None:\n rating_id = rating_list.get('id')\n # rating_id is formatted like:\n # tt0463854|imdb|7.1|7.1|advsearch\n elts = rating_id.split('|')\n\n if len(elts) < 2 or elts[1] != 'imdb':\n logging.warning(\"IMDB has changed. Got rating-list that is '%s' for '%s'\" % (rating_id, name))\n continue\n\n imdb_id = elts[0]\n if imdb_id != extras['imdb_id']:\n logging.warning(\"WARNING: Rating IMDB ID does not match the one we got from the link node.\")\n\n if len(elts) > 2:\n imdb_rating = elts[3]\n try:\n imdb_rating = float(imdb_rating)\n extras['imdb_rating'] = imdb_rating\n except Exception, e:\n logging.error(\"Exception while parsing rating: %s\" % e)\n # If we got here, we have an ID and we should return all the metadata we\n # found.\n return extras\n return None",
"def getPubMedXML(pmid):\n link = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&format=xml&id='+str(pmid)\n r_text = makeRequest(link)\n return r_text",
"def from_pubmed_id(cls, pubmed_id):\n def get_doi(ref):\n for art_id in ref['articleids']:\n if art_id['idtype'] == 'doi':\n return enc(art_id['value'])\n\n def get_page_range(ref):\n rng = enc(ref['pages']).split('-')\n if len(rng) == 2 and len(rng[1]) < len(rng[0]):\n # map ranges like \"2730-43\" to 2730,2743 not 2730, 43\n rng[1] = rng[0][:len(rng[0]) - len(rng[1])] + rng[1]\n # Handle one page or empty page range\n if len(rng) == 1:\n rng = rng[0]\n if rng == '':\n rng = None\n return rng\n # JSON values are always Unicode, but on Python 2 we want non-Unicode\n # strings, so convert to ASCII\n if sys.version_info[0] < 3: # pragma: no cover\n def enc(s):\n return s.encode('ascii')\n else:\n def enc(s):\n return s\n\n url = ('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi'\n '?db=pubmed&retmode=json&rettype=abstract&id=%s' % pubmed_id)\n fh = urllib2.urlopen(url)\n j = json.load(fh)\n fh.close()\n ref = j['result'][str(pubmed_id)]\n authors = [enc(x['name']) for x in ref['authors']\n if x['authtype'] == 'Author']\n\n # PubMed authors are usually of the form \"Lastname AB\" but PDB uses\n # \"Lastname, A.B.\" so map one to the other if possible\n r = re.compile(r'(^\\w+.*?)\\s+(\\w+)$')\n\n def auth_sub(m):\n return m.group(1) + \", \" + \"\".join(initial + \".\"\n for initial in m.group(2))\n authors = [r.sub(auth_sub, auth) for auth in authors]\n\n return cls(pmid=pubmed_id, title=enc(ref['title']),\n journal=enc(ref['source']),\n volume=enc(ref['volume']) or None,\n page_range=get_page_range(ref),\n year=enc(ref['pubdate']).split()[0],\n authors=authors, doi=get_doi(ref))",
"def doi_to_pubmed(doi):\n \n # convert brackets in DOIs to dashes, this makes the search work\n doi = re.sub(\"\\(|\\)\", \"-\", doi)\n ncbi = \"http://www.ncbi.nlm.nih.gov\"\n url = \"{}/pubmed/?term={}&report=uilist&format=text\".format(ncbi, doi)\n \n headers = {}\n response, status_code, headers = open_url(url, headers)\n \n if status_code != 200:\n sys.exit(\"failed to access the pubmed search function at {}, \" \\\n \"returned status code: {}, with response:\".format(url, \\\n status_code, response))\n \n # the response is return as a xml list, there should only be one entry\n response = ET.fromstring(response)\n response = response.text.split(\"\\n\")\n response = [ x for x in response if x != \"\" ]\n \n # What if there are multiple pubmed IDs for an article? I'll handle this\n # when it happens\n assert len(response) == 1\n \n return response[0]",
"def title_search (self, title):\n meta = None\n timing = 0.0\n message = None\n t0 = time.time()\n try:\n query = \"query.bibliographic={}\".format(urllib.parse.quote(title))\n url = self._get_api_url(query)\n\n response = requests.get(url).text\n json_response = json.loads(response)\n\n items = json_response[\"message\"][\"items\"]\n first_item = items[0] if len(items) > 0 else {}\n titles = first_item.get(\"title\", []) \n result_title = titles[0] if len(titles) > 0 else None\n\n if self.title_match(title, result_title):\n raw_meta = first_item\n meta = dict()\n if 'title' in raw_meta:\n meta['title'] = raw_meta[\"title\"]\n else:\n meta['title'] = None\n \n if 'DOI' in raw_meta:\n meta['doi'] = raw_meta[\"DOI\"]\n else:\n meta['doi'] = None\n \n if 'container-title' in raw_meta:\n meta['journal'] = raw_meta[\"container-title\"][0]\n else:\n meta['journal'] = None\n \n if 'ISSN' in raw_meta:\n meta['issn'] = raw_meta[\"ISSN\"][0]\n else:\n meta['issn'] = None\n\n if \"published-print\" in raw_meta:\n meta['year'] = raw_meta[\"published-print\"]['date-parts'][0][0] \n else:\n meta['year'] = None\n \n if 'author' in raw_meta:\n meta['authors'] = raw_meta[\"author\"]\n else:\n meta['authors'] = None\n \n if 'URL' in raw_meta:\n meta['url'] = raw_meta[\"URL\"]\n else:\n meta['url'] = None\n # meta = raw_meta\n if self.parent.logger:\n self.parent.logger.debug(meta)\n except: \n print(traceback.format_exc())\n meta = None\n message = f\"ERROR: {title}\"\n print(message) \n \n timing = self._mark_elapsed_time(t0)\n return _ScholInfraResponse_Crossref(self, meta, timing, message)",
"def find_related(pmid):\n class ResultParser(sgmllib.SGMLParser):\n # Parse the ID's out of the HTML-formatted page that PubMed\n # returns. The format of the page is:\n # [...]\n # <Link>\n # <Id>######</Id>\n # <Score>######</Score>\n # [...]\n # </Link>\n # [...]\n def __init__(self):\n sgmllib.SGMLParser.__init__(self)\n self.ids = []\n self.in_link = 0\n self.in_id = 0\n def start_id(self, attributes):\n self.in_id = 1\n def end_id(self):\n self.in_id = 0\n def start_link(self, attributes):\n self.in_link = 1\n def end_link(self):\n self.in_link = 0\n _not_pmid_re = re.compile(r'\\D')\n def handle_data(self, data):\n if not self.in_link or not self.in_id:\n return\n # Everything here should be a PMID. Check and make sure\n # data really is one. A PMID should be a string consisting\n # of only integers. Should I check to make sure it\n # meets a certain minimum length?\n if self._not_pmid_re.search(data):\n raise ValueError, \\\n \"I expected an ID, but '%s' doesn't look like one.\" % \\\n repr(data)\n self.ids.append(data)\n\n parser = ResultParser()\n if type(pmid) is type([]):\n pmid = string.join(pmid, ',')\n h = NCBI.elink(dbfrom='pubmed', id=pmid)\n parser.feed(h.read())\n return parser.ids",
"def get_wikidata(person):\n eprint(person)\n url = wd + urllib.parse.quote_plus(person.strip(\".\"))\n rsp = requests.get(url)\n meta = rsp.json()\n found = meta[\"search\"]\n if len(found) == 1:\n return found[0][\"id\"]\n else:\n for result in found:\n desc = result.get(\"description\")\n if desc is None:\n continue\n for phrase in (\"writer\", \"novelist\", \"author\"):\n if phrase.lower() in desc.lower():\n return result[\"id\"]\n else:\n # eprint(\"*** \" + desc)\n pass",
"def extract_pubmed_articles(pubmed_ids):\n articles = []\n Entrez.email = my_email\n Entrez.api_key = mike_api_key\n\n for pubmed_id in tqdm(pubmed_ids[4225:]):\n article = {}\n try:\n handle = Entrez.efetch(db='pubmed', rettype='medline', retmode='text', id=pubmed_id)\n pulled_article = [*Medline.parse(handle)]\n article['pubmed_id'] = pubmed_id\n article['title'] = pulled_article[0].get('TI')\n try:\n article['created_date'] = pulled_article[0].get('CRDT')[0]\n except:\n article['created_date'] = np.nan\n article['key_words'] = pulled_article[0].get('OT')\n article['mesh_terms'] = pulled_article[0].get('MH')\n article['abstract'] = pulled_article[0].get('AB')\n articles.append(article)\n\n except:\n next\n\n append_article_to_csv(article, save_filepath='../data', filename='pubmed_articles_cancer_failsafe.csv')\n time.sleep(0.4) # use this to avoid exceeding the PubMed max pull of 3 URL requests per second\n\n return articles",
"def get_searched_publications(WebEnv, QueryKey, ids=None, escape=True):\n if isinstance(ids, str):\n ids = [ids]\n records = []\n query = {\n 'db': 'pubmed',\n 'webenv': WebEnv,\n 'query_key': QueryKey,\n 'retmode': 'xml'\n }\n if ids:\n query['ids'] = ids\n handle = Entrez.efetch(**query)\n try:\n for record in Entrez.parse(handle, escape=escape):\n record = _parse_entrez_record(record, escape)\n if record:\n records.append(record)\n except ValueError: # newer Biopython requires this to be Entrez.read\n handle = Entrez.efetch(**query)\n data = Entrez.read(handle, escape=escape)\n for record in data['PubmedArticle'] + data['PubmedBookArticle']:\n record = _parse_entrez_record(record, escape)\n # Entrez.read does not use the ids query key so we have to do this ourselves\n if record and (ids and record['pmid'] in ids or not ids):\n records.append(record)\n return records",
"def add_pubmed_info(nanopub: Nanopub) -> Nanopub:\n\n # print(json.dumps(nanopub, indent=4))\n\n if \"nanopub\" in nanopub:\n if \"citation\" in nanopub[\"nanopub\"] and nanopub[\"nanopub\"][\"citation\"]:\n if \"database\" in nanopub[\"nanopub\"][\"citation\"]:\n if nanopub[\"nanopub\"][\"citation\"][\"database\"][\"name\"].lower() == \"pubmed\":\n\n pmid = nanopub[\"nanopub\"][\"citation\"][\"database\"][\"id\"]\n if pmid:\n # pubmed = bel.nanopub.pubmed.get_pubmed(pmid)\n pubmed = get_pubmed_json(pmid)\n if pubmed:\n if pubmed[\"article\"].get(\"authors\", False):\n nanopub[\"nanopub\"][\"citation\"][\"authors\"] = pubmed[\"article\"][\n \"authors\"\n ]\n\n if pubmed[\"article\"].get(\"title\", False):\n nanopub[\"nanopub\"][\"citation\"][\"title\"] = pubmed[\"article\"][\"title\"]\n\n if pubmed[\"article\"].get(\"journal_title\", False):\n nanopub[\"nanopub\"][\"citation\"][\"source_name\"] = pubmed[\"article\"][\n \"journal_title\"\n ]\n\n if pubmed[\"article\"].get(\"pub_date\", False):\n nanopub[\"nanopub\"][\"citation\"][\"date_published\"] = pubmed[\n \"article\"\n ][\"pub_date\"]\n\n if pubmed[\"article\"].get(\"abstract\", False):\n nanopub[\"nanopub\"][\"metadata\"][\"gd_abstract\"] = pubmed[\"article\"][\n \"abstract\"\n ]\n import json\n return nanopub",
"def parse(self, pmid, *a, **kw):\n\n def to_medline(article):\n abbr = article.Journal.ISOAbbreviation\n pubdate = u' '.join(list(article.Journal.JournalIssue.PubDate))\n volume = article.Journal.JournalIssue.Volume\n issue = article.Journal.JournalIssue.Issue\n pages = article.Pagination.MedlinePgn\n result = u'%s %s;%s(%s):%s.' % (abbr, pubdate, volume, issue, pages)\n return result\n\n def to_author_list(article):\n author = article.AuthorList.Author\n if not isinstance(author, list):\n author = [author]\n return [u'%s %s' % (a.LastName, a.Initials) for a in author]\n\n def to_ids(id_):\n # assume info and miriam are all valid\n miriam_base = u'urn:miriam:pubmed:%s'\n info_base = u'info:pmid/%s'\n return [miriam_base % id_, info_base % id_]\n\n # XXX need to verify that we already have this item.\n\n try:\n raw = self.service.run_eFetch(db=self.db, id=pmid)\n except SOAPTimeoutError:\n # XXX handle timeout\n raise\n except Error:\n # XXX any SOAP error\n raise\n except:\n # Other errors will just be raised.\n raise\n\n # using info for now\n obj_id = str('pmid-%s' % pmid)\n article = raw.PubmedArticle.MedlineCitation.Article\n\n citation = Citation(obj_id)\n citation.ids = to_ids(pmid)\n citation.title = unicode(article.ArticleTitle)\n citation.creator = to_author_list(article)\n citation.issued = unicode(article.Journal.JournalIssue.PubDate.Year)\n try:\n # XXX sometimes this is not available.\n citation.abstract = unicode(article.Abstract.AbstractText)\n except:\n citation.abstract = u''\n citation.bibliographicCitation = to_medline(article)\n\n return [citation]",
"def __getitem__(self, id):\n # First, check to see if enough time has passed since my\n # last query.\n self.limiter.wait()\n \n try:\n handle = NCBI.efetch(\n db=\"pubmed\", id=id, retmode='text', rettype='medlars')\n except IOError, x:\n # raise a KeyError instead of an IOError\n # XXX I really should distinguish between a real IOError and\n # if the id is not in the database.\n raise KeyError, x\n if self.parser is not None:\n return self.parser.parse(handle)\n return handle.read()",
"def get_wikidata_id_text(wikidata_id, result_rdf=None):\n article_root = \"https://en.wikipedia.org/wiki/\"\n summary_root = \"https://en.wikipedia.org/api/rest_v1/page/summary/\"\n wiki_root = \"https://en.wikipedia.org/\"\n # Get wikidata data\n wikidata_uri, wikidata_url = get_wikidata_uri(wikidata_id)\n print(\"wikidata_uri: %s\"%(wikidata_uri,), file=sys.stderr)\n print(\"wikidata_url: %s\"%(wikidata_url,), file=sys.stderr)\n wikidata_rdf = get_rdf_graph(wikidata_url, format=\"turtle\")\n # print(wikidata_rdf.serialize(format='turtle', indent=4), file=sys.stdout)\n summary_url = None\n summary_data = None\n place_article = None\n if wikidata_rdf:\n # Find reference to english Wikipedia article\n #\n # <https://en.wikipedia.org/wiki/Opole> a schema:Article ;\n # schema:about wd:Q92212 ;\n # schema:inLanguage \"en\" ;\n # schema:isPartOf <https://en.wikipedia.org/> ;\n # schema:name \"Opole\"@en .\n #\n place_articles = list(wikidata_rdf[:RDF.type:SCHEMA.Article])\n for a in place_articles:\n if ( (URIRef(wikidata_uri) in wikidata_rdf[a:SCHEMA.about:]) and\n (URIRef(wiki_root) in wikidata_rdf[a:SCHEMA.isPartOf:]) and\n (Literal(\"en\") in wikidata_rdf[a:SCHEMA.inLanguage:]) ):\n place_article = a\n print(\"place_article: %s\"%(place_article,), file=sys.stderr)\n if place_article:\n # Construct URI of summary page (use path segment from wikipedia page)\n if place_article and place_article.toPython().startswith(article_root):\n article_name = place_article[len(article_root):]\n summary_url = summary_root + article_name\n if summary_url:\n # Read Summary as JSON, extract \n # Content-Type: application/json; charset=utf-8; profile=\"https://www.mediawiki.org/wiki/Specs/Summary/1.4.0\"\n # \"extract\": \"Opole (listen) is a city located in southern Poland on the Oder River and the historical capital of Upper Silesia. With a population of approximately 127,792, it is currently the capital of the Opole Voivodeship and, also the seat of Opole County. With its long history dating back to the 8th century, Opole is one of the oldest cities in Poland.\",\n # \"extract_html\": \"<p><b>Opole</b> <span class=\\\"nowrap\\\" style=\\\"font-size:85%;\\\">(<span class=\\\"unicode haudio\\\"><span class=\\\"fn\\\"><span><figure-inline><span><img src=\\\"//upload.wikimedia.org/wikipedia/commons/thumb/8/8a/Loudspeaker.svg/11px-Loudspeaker.svg.png\\\" height=\\\"11\\\" width=\\\"11\\\" srcset=\\\"//upload.wikimedia.org/wikipedia/commons/thumb/8/8a/Loudspeaker.svg/22px-Loudspeaker.svg.png 2x, //upload.wikimedia.org/wikipedia/commons/thumb/8/8a/Loudspeaker.svg/17px-Loudspeaker.svg.png 1.5x\\\" /></span></figure-inline></span>listen</span></span>)</span> is a city located in southern Poland on the Oder River and the historical capital of Upper Silesia. With a population of approximately 127,792, it is currently the capital of the Opole Voivodeship and, also the seat of Opole County. With its long history dating back to the 8th century, Opole is one of the oldest cities in Poland.</p>\"\n req_headers = (\n { \"accept\": \"application/json\" \n })\n response = requests.get(summary_url, headers=req_headers)\n response.raise_for_status() # raise an error on unsuccessful status codes\n summary_data = json.loads(response.text)\n if summary_data:\n # Assemble result graph (using EMPlaces structure)\n emp_id_wikidata, emp_uri_wikidata, emp_node_wikidata = get_emplaces_uri_node(wikidata_id, suffix=\"_wikidata\")\n if result_rdf is None:\n result_rdf = Graph()\n result_rdf.bind(\"em\", EM.term(\"\"))\n result_rdf.bind(\"place\", PLACE.term(\"\"))\n summary_text = summary_data[\"extract\"]\n result_rdf.add((emp_node_wikidata, EM.description, Literal(summary_text)))\n return result_rdf"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates dictionary of plant names with appropriate values
|
def appendPlantDict(plantDict, speciesName, Accession_num, bioproject_num, pubdate, title, pubmed_id):
key = speciesName #sets the dictionary key to the species name
values = [Accession_num, bioproject_num, pubdate, title, pubmed_id] #sets dictionary values to appropriate information
plantDict.update({key : values}) #updates existing plantDict for every entry into dictionary
return plantDict #returns completed dictionary
|
[
"def create_supervisor_to_projects_map():\n\n mode = MAX_SUPERVISOR_PROJECTS * 0.75\n\n supervisor_project_numbers = (\n np.random.triangular(\n left=1,\n mode=mode,\n right=MAX_SUPERVISOR_PROJECTS,\n size=len(supervisor_names),\n )\n .round()\n .astype(int)\n )\n\n supervisor_to_projects = {}\n for name, number_of_projects in zip(\n supervisor_names, supervisor_project_numbers\n ):\n supervisor_to_projects[name] = [\n name + str(i) for i in range(number_of_projects)\n ]\n\n return supervisor_to_projects",
"def production_names(self):\n _prodnames = {}\n for _sched in self._associated_schedules_:\n _prodnames[_sched.ID] = (_sched, _sched.production_name)\n return _prodnames",
"def make_name_dicts() -> list[dict[str, str]]:\n names = []\n df = fjc_create.load_file(instructions.NAMES_PATH)\n df['name_perm'] = df['name_perm'].str.upper()\n df['concat_name1'] = convert_judge_name_series(\n df['year'], \n df['court_num'], \n df['name_perm'], \n dict_type = 1)\n df['concat_name2'] = convert_judge_name_series(\n df['year'], \n df['circuit_num'], \n df['name_perm'], \n dict_type = 1)\n df['concat_name3'] = convert_judge_name_series(\n df['year'], \n df['court_num'], \n df['name_perm'], \n dict_type = 2)\n df['concat_name4'] = convert_judge_name_series(\n df['year'], \n df['circuit_num'], \n df['name_perm'], \n dict_type = 2)\n names.append(df.set_index('concat_name1').to_dict()['judge_name'])\n names.append(df.set_index('concat_name2').to_dict()['judge_name'])\n names.append(df.set_index('concat_name3').to_dict()['judge_name'])\n names.append(df.set_index('concat_name4').to_dict()['judge_name'])\n names.append(df.set_index('name_perm').to_dict()['judge_name'])\n return names",
"def __create_info_dict(self):\n d = ['mtype', 'stype', 'sval']\n keys = ['_'.join(i) for n in range(5) for i in itertools.permutations(d, n) if not len(i) == 0]\n out = {i: {} for i in keys}\n return out",
"def plant_fields(plant):\n values = (\n plant.name,\n plant.org_type,\n plant.planted,\n plant.get_level(\"impact\"),\n plant.get_level(\"prevalence\"),\n plant.get_level(\"trend\"),\n plant.status.get(),\n )\n return [summary_field_format(value) for value in values]",
"def get_plant_name(self):\n if not self.plant_name:\n self.plant_name = self._search('botanische naam')\n return self.plant_name",
"def new_plant(cls):\n # ENHANCEMENT choose randomly\n variety = PLANT_SPECIES['Pisum Sativum']\n plant = Plant(name=variety['name'],\n common_name=variety['common_name'],\n look=\"It's a %s seed\" % variety['common_name'])\n plant._update_look()\n plant.put()\n logging.debug('plant', plant)\n return plant",
"def construct_ingredient_dict(self, scale_factor):\n ingredient_dict = {}\n for item in self.ingredients_list:\n quantity_string = \"\"\n item_name_string = \"\"\n for token in item.split(' '):\n if token in Recipe.measurement_set or Recipe.is_int(token):\n if Recipe.is_int(token):\n token = str(int(token) * scale_factor)\n quantity_string += token + ' '\n else:\n item_name_string += token + ' '\n ingredient_dict[item_name_string.strip()] = quantity_string.strip()\n return ingredient_dict",
"def primer_dict( db, plates ):\n dd = defaultdict(list)\n for well, primer in MASTER_MIX_TEMPLATE.items():\n dd[primer].append(well)\n for plate in plates:\n primers, _ = plate_to_custom_primers( db, plate )\n primer_counts = [ (primer.name, primers[pp])\n for pp in sorted(primers)\n for primer in (pp.fwd_primer, pp.rev_primer) ]\n for primer_name, lines in rows_for_custom_primers( primer_counts ):\n for wells in lines:\n dd[primer_name].extend( wells )\n return dict( (primer_name, sorted(l)) for primer_name, l in dd.items() )",
"def create_patients():\n xchallenge_directory = r\"/home/andrewg/PycharmProjects/assignments/data/PROSTATEx\"\n xchallenge_directory_contents = os.listdir(xchallenge_directory)\n patient_dict = dict()\n for patient_directory in xchallenge_directory_contents:\n patient = \"{}/{}\".format(xchallenge_directory, patient_directory)\n patient_number = int(patient[-4:])\n patient = \"{}/{}\".format(patient, os.listdir(patient)[0]) # There is always one directory in the patient folder\n t2, adc, bval = get_nrrd_files(patient) # Gets three different modalities for the patient\n patient_dict[patient_number] = {}\n current_patient = patient_dict[patient_number]\n current_patient[\"t2\"] = t2\n current_patient[\"adc\"] = adc\n current_patient[\"bval\"] = bval\n return patient_dict",
"def products_infos(self):\n product_dict = {}\n for product in self.products_with_words():\n name_brand_string = product.formatted_name\n name_brand_string += \" \"\n name_brand_string += product.formatted_brands\n list_name_brand_string = name_brand_string.split()\n product_dict[product.id] = list_name_brand_string\n return product_dict",
"def plant_me():\n plants = COMPLIMENTS[\"plant\"]\n print(\"Check out this house plant to make you smile: \" \n + plants.sample().to_string(index = False))",
"def _generate_chip_name_table(table):\n result = {}\n for chip_id in table:\n name = table[chip_id].name\n lookup_name = name.lower().replace('-', '_')\n result[lookup_name] = table[chip_id]\n return result",
"def create_krayt_dragon() -> dict:\r\n monster_dictionary = {'Name': 'Krayt Dragon', 'Class': 'Monster', 'HP': 5,\r\n 'Strength': 18, 'Dexterity': 8,\r\n 'Constitution:': 16, 'Intelligence': 6, 'Wisdom': 8,\r\n 'Charisma': 3, 'XP': 0, 'Inventory': []}\r\n\r\n return monster_dictionary",
"def create_tusken_raider() -> dict:\r\n monster_dictionary = {'Name': 'Tusken Raider', 'Class': 'Monster', 'HP': 5,\r\n 'Strength': 5, 'Dexterity': 10,\r\n 'Constitution:': 12, 'Intelligence': 12, 'Wisdom': 5,\r\n 'Charisma': 2, 'XP': 0, 'Inventory': []}\r\n\r\n return monster_dictionary",
"def create_rename_dict(dataset):\n rename_dict = {}\n for target_name, source_names in standard_names_mapping.items():\n for sn in source_names:\n if (sn in dataset) or (sn in dataset.dims):\n rename_dict[sn] = target_name\n break\n return rename_dict",
"def init_name_maps(self):\n map_1 = {}\n with open(self.organisms_code_names_path) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n for code in content:\n s = code.split('\t')\n map_1[s[0]] = s[1]\n self.short_name_to_full_name_map = map_1\n\n map_2 = {}\n # tree_str = self.newick\n # tree_names = re.split('[\\s+\\n+\\\"\\'\\:\\)\\(\\,\\:\\'\\']', tree_str)\n # tree_names = list(filter(lambda x: x != \"\" and x != ';', tree_names))\n for short_name in self.short_name_to_full_name_map.keys():\n full_name = self.short_name_to_full_name_map[short_name]\n map_2[full_name] = short_name\n\n self.full_name_to_short_name_map = map_2",
"def build_well_dict(self, data, boreID='BoreID'):\n data.loc[:, 'name'] = data.loc[:, boreID].values.astype(str)\n wells = data.name.unique()\n well_dict = {}\n for well in wells:\n well_dict[\"{0}\".format(well)] = data[data.name == well]\n return well_dict",
"def muslim_names() -> Dict[str, List[str]]:\n return load_names(ethnicity=\"muslim\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes plantDict, prints dictionary to .csv file
|
def printFile(plantDict):
Comma = ','
Results = open("plantInfo.csv", 'a') #creates or opens existing csv file, appends data to file
#Results.write("%s%c%s%c%s%c%s%c%s%c%s\n" % ("Species Name", Comma, "Accession Number", Comma,
#"Bioproject Number", Comma, "Publication Year", Comma, "Article Title", Comma, "Pubmed ID")) #creates headings in csv
#start for
for key in plantDict.keys():
Results.write("%s, %s\n" % (key, plantDict[key])) #writes dictionary to csv file
#end for
Results.close() #closes csv file
|
[
"def print_customers(self, dict):\n cust_dict = dict\n for customer_id, loc in cust_dict:\n result = self.data[self.data['customer_no']==customer_id]['timestamp']\n return pd.row_to_csv('result.csv', index=False)",
"def write_data(df_dict, gps_trips_dir):\n for key, value in df_dict.items():\n value.to_csv(gps_trips_dir + key + \".csv\", index=False) # do not output the dataframe index",
"def make_csv(self, filename, freq_dict):\n if filename.endswith('.csv'):\n file = filename\n else:\n file = str(filename)+'.csv'\n fout = open(file, 'w')\n freq_dict = sorted(freq_dict.items())\n for i, v in freq_dict:\n x = ''\n for j in str(i)[0:3]:\n x += j\n x+='.'\n for j in str(i)[4:7]:\n x += j\n fout.write(str(x)+','+str(v)+'\\n')\n fout.close()\n return True",
"def dict2csv(dictionary, filename):\r\n dict = dictionary;\r\n w = csv.writer(open(filename, \"w\"))\r\n for key, val in dict.items():\r\n w.writerow([key, val])",
"def __output_to_file(self):\n\n fn = self.out_dir + self.output_file\n map_keys = self.ordered_keys\n row_count = len(self.output_map[map_keys[0]])\n\n with open(fn, 'w') as csvfile:\n wr = writer(csvfile)\n wr.writerow(map_keys)\n\n for row in range(row_count):\n temp = []\n for col in map_keys:\n temp.append(self.output_map[col][row])\n\n wr.writerow(temp)",
"def single_valued_dict_csv(dic, file):\n keys = list(dic.keys())\n vals = [str(dic[k]) for k in keys]\n with open(file, 'w') as out:\n out.write(', '.join(keys) + '\\n')\n out.write(', '.join(vals) + '\\n')",
"def writeToCSV(dict_with_des, destination_dir):\n\n for label in dict_with_des:\n csv_filename = \"{0}/{1}.csv\".format(destination_dir, label)\n with open(csv_filename, 'w') as csvfile:\n filewriter = csv.writer(csvfile, delimiter='|')\n\n for photoId, description in d[label]:\n photo_for_csv = getPhotoForCsv(photoId)\n filewriter.writerow([photo_for_csv, description])",
"def writePolicyToCSV(policy_dict, policy_keys_to_print=None, file_name='policy'):\n if policy_keys_to_print is not None:\n csv_dict = {key: policy_dict[key] for key in policy_keys_to_print}\n else:\n csv_dict = policy_dict\n with open(file_name + '.csv', 'w+') as csv_file:\n writer = csv.writer(csv_file)\n for key, value in csv_dict.items():\n for subkey, sub_value in value.items():\n writer.writerow([key,subkey,sub_value])",
"def asteroids_csv(self, payload):\n csv_file=open(f\"/tmp/asteroids_{self.today}.csv\",'w', newline='\\n')\n fields=list(payload[0].keys())\n writer=csv.DictWriter(csv_file, fieldnames=fields)\n writer.writeheader()\n writer.writerows(payload)\n csv_file.close()",
"def write_output_csv(file, data):\r\n with open(file, \"w\") as f:\r\n f.write(\"Datum;Sunny Hours\\n\")\r\n for key, value in data.items():\r\n f.write(key + \";\" + \"{:f}\".format(value) + \"\\n\")",
"def addDictionaryToCSV(self, outfilename, out_dict, tool_name):\n\n # write to a CSV\n with open(outfilename, 'a', newline='') as csvfile:\n fieldnames = self.headernames\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n if not (os.path.getsize(outfilename) > 0):\n writer.writeheader()\n {\"tool name\" : tool_name}\n writer.writerow(out_dict)",
"def write_csv(featurized_files):\n with open(TEST_FEAT_DEST,'w',encoding='utf8',newline='') as dest:\n writer = csv.DictWriter(dest, fieldnames=FEATURE_NAMES)\n writer.writeheader()\n for featurized_file in featurized_files:\n for featurized_word in featurized_file:\n row = dict()\n for feature_name in FEATURE_NAMES:\n feature = featurized_word[feature_name]\n if type(feature) is list:\n feature = ''.join(feature)\n row[feature_name] = feature\n writer.writerow(row)",
"def write(self, nodes_stats_dict):\n for node_ip, snapshot in nodes_stats_dict.iteritems():\n with self._prepare_file(node_ip) as csv_file:\n row = converter.stats_to_list(snapshot, self._include_lists)\n csv.writer(csv_file).writerow(row)",
"def _writePlanting(self, fout, pdt):\n fout.write(\"*PLANTING DETAILS\\r\\n\")\n fout.write(\" {0} -99 75.0 25.0 T H 20. 0. 2.0 0. 23. 26.0 3.0 0.0\\r\\n\".format(pdt.strftime(\"%Y%j\")))",
"def csv_export(self, f):\n fieldnames = [\n \"uuid\", \"name\", \"start_time\", \"end_time\",\n \"description\", \"tags\"\n ]\n writer = csv.DictWriter(f, fieldnames)\n writer.writeheader()\n for p in self.projects:\n writer.writerow(p.dict())",
"def printBedDict(bedDict, chromSizesLocation, outputBedFileName, out=\"bed\", separator=\"\\t\"):\n if(out == \"bed\"): outFile = open(outputBedFileName,\"w\")\n elif(out == \"bb\"): outFile = open(outputBedFileName+\"temp\",\"w\")\n for k in constants.getChromList(reference=[bedDict]):\n for e in bedDict[k]: outFile.write(\"\\t\".join([k]+[str(m) for m in e])+\"\\n\")\n outFile.close()\n if(out == \"bb\"): bedToBigBed(outputBedFileName+\"temp\", chromSizesLocation, outputBedFileName, removeBed=True)\n return 0",
"def exportcsvfile(self, callsign, Headers=True):\n csvdata = None\n log = self.parseLog(callsign)\n \"\"\"\n print (log.keys())\n print(log['QSOSUM'].keys())\n print(log['MULTS'])\n print(log['MOQPCAT']['MOQPCAT'])\n \"\"\"\n if (log):\n \n if (Headers): \n csvdata = COLUMNHEADERS\n \n else:\n csvdata = ''\n\n csvdata += ('%s\\t'%(log['HEADER']['CALLSIGN']))\n csvdata += ('%s\\t'%(log['HEADER']['OPERATORS']))\n csvdata += ('%s\\t'%(log['HEADER']['CATEGORY-STATION']))\n csvdata += ('%s\\t'%(log['HEADER']['CATEGORY-OPERATOR']))\n csvdata += ('%s\\t'%(log['HEADER']['CATEGORY-POWER']))\n csvdata += ('%s\\t'%(log['HEADER']['CATEGORY-MODE']))\n csvdata += ('%s\\t'%(log['HEADER']['LOCATION']))\n csvdata += ('%s\\t'%(log['HEADER']['CATEGORY-OVERLAY']))\n csvdata += ('%s\\t'%(log['QSOSUM']['CW']))\n csvdata += ('%s\\t'%(log['QSOSUM']['PH']))\n csvdata += ('%s\\t'%(log['QSOSUM']['DG']))\n csvdata += ('%s\\t'%(log['QSOSUM']['QSOS']))\n csvdata += ('%s\\t'%(log['QSOSUM']['VHF']))\n csvdata += ('%s\\t'%(log['MULTS'])) \n csvdata += ('%s\\t'%(log['SCORE']['SCORE'])) \n csvdata += ('%s\\t'%(log['SCORE']['W0MA'])) \n csvdata += ('%s\\t'%(log['SCORE']['K0GQ'])) \n csvdata += ('%s\\t'%(log['SCORE']['CABRILLO'])) \n csvdata += ('%s\\t'%(log['SCORE']['TOTAL'])) \n csvdata += ('%s\\t'%(log['MOQPCAT']['MOQPCAT']))\n csvdata += ('%s\\t'%(log['MOQPCAT']['DIGITAL']))\n csvdata += ('%s\\t'%(log['MOQPCAT']['VHF']))\n csvdata += ('%s'%(log['MOQPCAT']['ROOKIE']))\n\n for err in log['ERRORS']:\n if ( err != [] ):\n csvdata += err\n \n else:\n csvdata = 'No log data in databas for %s.'%(callsign)\n return csvdata",
"def csv():\n\n print \"HOST,GUEST,PERSISTENT,ACTIVE,LUN,LV,MASK,SNAP,POOL,SIZE\"\n\n for host in config.HOSTS:\n doms = guests(host, alldoms=True)\n\n for dom in doms:\n printcsv(host, dom)",
"def generate_csv_report(config, trial_results):\n\n with open(config['CSV_REPORT_PATH'], 'w', newline='') as file:\n writer = csv.writer(file)\n\n writer.writerow([\"Test Number\", \"Days Survived\", \"Max Vegetation\"])\n\n for trial in trial_results:\n writer.writerow(trial_results[trial].values())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Simple helper to prepare data generators factories used to train model
|
def get_data_generators_factories(config):
categories = config["categories"]
indices_to_colors_map, void_color = net.data.get_colors_info(len(categories))
voc_train_config = {
"data_directory": config["voc"]["data_directory"],
"data_set_path": config["voc"]["train_set_path"],
}
hariharan_train_config = {
"data_directory": config["hariharan"]["data_directory"],
"data_set_path": config["hariharan"]["train_set_path"],
}
training_data_segmentation_samples_generator_factory = net.data.CombinedPASCALDatasetsGeneratorFactory(
voc_train_config, hariharan_train_config, config["size_factor"],
len(config["categories"]))
training_data_generator_factory = net.data.VOCSegmentationsLabelsSamplesGeneratorFactory(
training_data_segmentation_samples_generator_factory, indices_to_colors_map, void_color,
config["train"]["batch_size"], use_augmentation=True)
validation_data_segmentation_samples_generator_factory = net.data.VOCSamplesGeneratorFactory(
config["voc"]["data_directory"], config["voc"]["validation_set_path"], config["size_factor"])
validation_data_generator_factory = net.data.VOCSegmentationsLabelsSamplesGeneratorFactory(
validation_data_segmentation_samples_generator_factory, indices_to_colors_map, void_color,
config["train"]["batch_size"], use_augmentation=False)
return training_data_generator_factory, validation_data_generator_factory
|
[
"def create_generators(args):\r\n common_args = {\r\n 'batch_size': args.batch_size,\r\n 'config': args.config,\r\n 'image_min_side': args.image_min_side,\r\n 'image_max_side': args.image_max_side,\r\n # 'preprocess_image': preprocess_image,\r\n }\r\n\r\n # create random transform generator for augmenting training data\r\n # if args.random_transform:\r\n # transform_generator = random_transform_generator(\r\n # min_rotation=-0.1,\r\n # max_rotation=0.1,\r\n # min_translation=(-0.1, -0.1),\r\n # max_translation=(0.1, 0.1),\r\n # min_shear=-0.1,\r\n # max_shear=0.1,\r\n # min_scaling=(0.9, 0.9),\r\n # max_scaling=(1.1, 1.1),\r\n # flip_x_chance=0.5,\r\n # flip_y_chance=0.5,\r\n # )\r\n # else:\r\n # transform_generator = random_transform_generator(flip_x_chance=0.5)\r\n\r\n if args.dataset_type == 'csv':\r\n train_generator = DataGenerator(\r\n args.annotations,\r\n shuffle=True,\r\n is_train=True,\r\n # args.classes,\r\n # transform_generator=transform_generator,\r\n **common_args\r\n )\r\n if args.val_annotations:\r\n validation_generator = DataGenerator(\r\n args.val_annotations,\r\n shuffle=True,\r\n is_train=False,\r\n **common_args\r\n )\r\n else:\r\n validation_generator = None\r\n else:\r\n raise ValueError(\r\n 'Invalid data type received: {}'.format(args.dataset_type))\r\n\r\n return train_generator, validation_generator\r\n # return train_generator\r",
"def create_generators(args, preprocess_image):\n common_args = {\n 'batch_size': args.batch_size,\n 'config': args.config,\n 'image_min_side': args.image_min_side,\n 'image_max_side': args.image_max_side,\n 'preprocess_image': preprocess_image,\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n train_generator = CocoGenerator(\n args.coco_path,\n 'train2017',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = CocoGenerator(\n args.coco_path,\n 'val2017',\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'pascal':\n train_generator = PascalVocGenerator(\n args.pascal_path,\n 'trainval',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = PascalVocGenerator(\n\n args.pascal_path,\n 'test',\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'csv':\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations,\n args.classes,\n shuffle_groups=False,\n **common_args\n )\n else:\n validation_generator = None\n elif args.dataset_type == 'oid':\n train_generator = OpenImagesGenerator(\n args.main_dir,\n subset='train',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = OpenImagesGenerator(\n args.main_dir,\n subset='validation',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'kitti':\n train_generator = KittiGenerator(\n args.kitti_path,\n subset='train',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = KittiGenerator(\n args.kitti_path,\n subset='val',\n shuffle_groups=False,\n **common_args\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return train_generator, validation_generator",
"def init_generator(dataset):\n d = data.to_dataset(dataset)\n if dataset in ('mnist', 'fashion', 'svhn'):\n return models.ImageGenerator(d.ny, d.nc)\n else:\n return models.DenseGenerator(d.ny, d.nx, n_layers=2)",
"def build_models(self):\n self.models, self.optimizers, self.lr_schedulers = generator_init(self.config)",
"def _produce_train_dataset(self):\r\n pass",
"def create_test_generators(self):\n \n test_datagen = ImageDataGenerator( \n preprocessing_function = self.preprocess) \n\n\n test_generator = test_datagen.flow_from_directory(\n self.test_path,\n target_size=(self.image_size,self.image_size),\n batch_size= self.batch_size,\n class_mode='categorical', \n shuffle=False) # keep data in same order as labels\n \n return test_generator",
"def _generator(folder_path =None, is_train_set=True):\n if is_train_set:\n if folder_path is None:\n folder_path = './datasets/training_set'\n return train_datagen.flow_from_directory(folder_path,target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\n\n \n if folder_path is None:\n folder_path = './datasets/test_set'\n return test_datagen.flow_from_directory(folder_path,target_size=(64, 64),\n batch_size=32,\n class_mode='binary')",
"def get_dataset():\n dataset = DatasetGenerator({\n 'num_rows': 100,\n 'output': 'list',\n 'schema': {'name': 'faker.name',\n 'phone_number': 'faker.phone_number',\n 'group_id': range(2, 5),\n 'called_by': ['robo', 'associate', 'manager']},\n 'start_time': datetime(2017, 1, 1, 23, 22),\n 'end_time': datetime(2017, 7, 1, 22, 14),\n 'increments': 'hours'})\n dataset.generate()\n yield from dataset.to_output()",
"def create_loaders(self):\n self.spam_data.text_to_tensors()\n print('creating dataloaders')\n train_data = TensorDataset(self.spam_data.train_inputs, \n self.spam_data.train_masks, \n self.spam_data.train_labels)\n train_sampler = RandomSampler(train_data)\n self.train_dataloader = DataLoader(train_data, \n sampler=train_sampler, \n batch_size=self.batch_size)\n\n validation_data = TensorDataset(self.spam_data.validation_inputs, \n self.spam_data.validation_masks, \n self.spam_data.validation_labels)\n validation_sampler = SequentialSampler(validation_data)\n self.validation_dataloader = DataLoader(validation_data, \n sampler=validation_sampler, \n batch_size=self.batch_size)\n \n test_data = TensorDataset(self.spam_data.test_inputs, \n self.spam_data.test_masks, \n self.spam_data.test_labels)\n test_sampler = SequentialSampler(test_data)\n self.test_dataloader = DataLoader(test_data, \n sampler=test_sampler, \n batch_size=self.batch_size)\n print('finished creating dataloaders')",
"def create_data_loaders(self, symbol, start_date, end_date, seq_len, batch_size):\n # Save the parameters to use in other functions\n self.start_date = start_date\n self.end_date = end_date\n self.symbol = symbol\n\n # Dataloaders\n train_data = StockData(seq_len, \"train\", symbol=symbol, start_date = start_date, end_date= end_date)\n self.train_loader = data.DataLoader(train_data, batch_size=batch_size, shuffle=False)\n val_data = StockData(seq_len, \"val\", symbol=symbol, start_date = start_date, end_date= end_date)\n self.val_loader = data.DataLoader(val_data, batch_size=batch_size, shuffle=False)\n test_data = StockData(seq_len, \"test\", symbol=symbol, start_date = start_date, end_date= end_date)\n self.test_loader = data.DataLoader(test_data, batch_size=batch_size, shuffle=False)\n\n # We will use this scaler to inverse scale of model outputs.\n self.scaler = train_data.scaler",
"def train_generator():\n train_gen = ImageDataGenerator(\n rotation_range=30,\n shear_range=0.1,\n zoom_range=0.1,\n width_shift_range=0.2,\n height_shift_range=0.2,\n )\n\n val_gen = ImageDataGenerator()\n return train_gen, val_gen",
"def prepare_example_generator(self):\n generator = self.example_iterator_type()\n generator.configure(self)\n return generator;",
"def test_source_dataset_factory_create_batch(self):\n source_datasets = factories.SourceDatasetFactory.create_batch(100)\n for one in source_datasets:\n self.assertIsInstance(one, models.SourceDataset)",
"def prepare_data(self):\n self.tokenizer = custom_tokenizer_from_pretrained(\n self.tokenizer_name_or_path, self.cache_dir\n )\n try:\n self.train_examples = ExamplesBuilder(\n self.data_dir,\n Split.train,\n delimiter=self.delimiter,\n ).examples\n self.val_examples = ExamplesBuilder(\n self.data_dir,\n Split.dev,\n delimiter=self.delimiter,\n ).examples\n self.test_examples = ExamplesBuilder(\n self.data_dir,\n Split.test,\n delimiter=self.delimiter,\n ).examples\n\n if self.num_samples > 0:\n self.train_examples = self.train_examples[: self.num_samples]\n self.val_examples = self.val_examples[: self.num_samples]\n self.test_examples = self.test_examples[: self.num_samples]\n\n # create label vocabulary from dataset\n all_examples = self.train_examples + self.val_examples + self.test_examples\n all_labels = sorted(\n {\n tag.label\n for ex in all_examples\n for tag in ex.labels\n if tag.bio != BIO.O\n }\n )\n self.label_list = [BIO.O.value] + sorted(all_labels)\n label_types = sorted(\n {\n tag.tagtype.value\n for ex in all_examples\n for tag in ex.labels\n if tag.bio != BIO.O\n }\n )\n with open(self.labels_path, \"w\") as fp:\n for l in label_types:\n fp.write(l)\n fp.write(\"\\n\")\n\n self.label_to_id = {l: i for i, l in enumerate(self.label_list)}\n self.id_to_label = self.label_list\n\n start = time.time()\n self.train_dataset = self.create_dataset(\n self.train_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(train): {read_time}\")\n\n start = time.time()\n self.val_dataset = self.create_dataset(\n self.val_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(val): {read_time}\")\n\n start = time.time()\n self.test_dataset = self.create_dataset(\n self.test_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(test): {read_time}\")\n\n self.dataset_size = len(self.train_dataset)\n\n logger.info(self.val_examples[:3])\n logger.info(self.val_dataset[:3])\n\n except NoLocalFileError as e:\n logger.error(e)\n exit(1)",
"def myCustomGen(data_gen = None,dff = None,train = True,test=False,batch_size=None,img_size=None,embeddings=32,color='grayscale'):\n flow = create_flow(data_gen,dff,batch_size,img_size,train,test,color) \n for x, y in flow:\n indices, filenames = get_indices_from_keras_generator(flow,batch_size)\n # boneages = my_val.loc[my_val['id'].isin(filenames)].values\n # boneages = reduce(pd.DataFrame.append, map(lambda i: dff[dff.id == i], filenames)).boneage.values\n genders = reduce(pd.DataFrame.append, map(lambda i: dff[dff.id == i], filenames)).gender_01.values\n genders = create_embeddings2(genders,embeddings)\n # if next_print:\n # print(boneages,y)\n # next_print = True\n\n if len(x) != len(genders):\n yield [x,genders[-len(y):]],y\n else:\n yield [x,genders],y",
"def instantiate_trainers(self):\n self.base_task_times, self.old_task_times, self.all_task_times, self.exemp_task_times = [], [], [], []\n if self.args.average_over == 'tp':\n for tp in TRAIN_PERCENTAGES:\n for seed in SEEDS:\n handler = self.get_dataset_handler(self.args.dataset, self.args.base_classes,\n self.args.new_classes,\n tp, seed, self.args.vis, self.args.corr_vis,\n keep_val=any([x in self.args.method for x in ['bic']]))\n yield handler, tp, seed\n elif self.args.average_over == 'holdout':\n for holout_size in HOLDOUT_SIZES:\n for seed in SEEDS:\n handler = self.get_dataset_handler(self.args.dataset, self.args.base_classes, self.args.new_classes,\n self.args.tp, seed, self.args.vis, self.args.corr_vis,\n keep_val=any([x in self.args.method for x in ['bic']]))\n yield handler, holout_size, seed",
"def test_source_dataset_factory_build_batch(self):\n source_datasets = factories.SourceDatasetFactory.build_batch(10)\n for one in source_datasets:\n self.assertIsInstance(one, models.SourceDataset)",
"def create_datasets(self, text_placeholder, audio_placeholder, label_placeholder, test_text_data, test_audio_data,\n test_labels, val_text_data, val_audio_data, val_labels, batch_size, num_epochs):\n with tf.name_scope('dataset'):\n # creating the training dataset\n train_dataset = tf.data.Dataset.from_tensor_slices((text_placeholder, audio_placeholder, label_placeholder))\n train_dataset = train_dataset.repeat(num_epochs)\n train_dataset = train_dataset.batch(batch_size)\n\n # creating the test dataset\n test_dataset = tf.data.Dataset.from_tensor_slices((test_text_data, test_audio_data, test_labels))\n test_dataset = test_dataset.batch(test_labels.shape[0])\n\n # creating the validation dataset\n val_dataset = tf.data.Dataset.from_tensor_slices((val_text_data, val_audio_data, val_labels))\n val_dataset = val_dataset.batch(1)\n\n # creating the iterators from the datasets\n train_iterator = train_dataset.make_initializable_iterator()\n test_iterator = test_dataset.make_initializable_iterator()\n val_iterator = val_dataset.make_initializable_iterator()\n\n # creating the handle\n handle = tf.placeholder(tf.string, shape=[], name='handle')\n\n # creating iterator\n iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types,\n train_dataset.output_shapes)\n\n # getting the next element\n text_input, audio_input, label_batch = iterator.get_next()\n\n return train_iterator, test_iterator, val_iterator, text_input, audio_input, label_batch, handle",
"def __init__(self, feature_generators=[], instances=[], num_processes = 1):\n self.feature_generators = feature_generators\n self.instances = instances\n self.num_processes = num_processes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Should return a render_template of event_list
|
def event_list():
return render_template("event_list.html", user=current_user)
|
[
"def event(request, index_id, event_id):\n context = {\"index_id\": index_id, \"event_id\": event_id}\n return render(request, 'event.html', context)",
"def news_and_events(request):\n return render(request, 'mysite/news_and_events.html')",
"def event_list(request, category_slug=None):\n category = None\n categories = Category.objects.all()\n if category_slug:\n category = get_object_or_404(Category, slug=category_slug)\n events = Event.objects.filter(category=category).select_related('host')\n else:\n events = Event.objects.all()\n\n return render(request,\n 'event_list.html',\n {'category':category,\n 'categories':categories,\n 'events':events,})",
"def render_timeline(context, user_project):\n events = UserProjectEvent.objects.filter(user_project=user_project).order_by('created')\n events_with_template = []\n for event in events:\n events_with_template.append({\n 'obj': event,\n 'tpl': 'projects/templatetags/timeline/type_{}.html'.format(event.event_type)\n })\n return {\n 'user_project': user_project,\n 'events': events_with_template,\n 'user': context.request.user,\n }",
"def viewAllEvents(request):\n\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n # Select all the events from the events table\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT * FROM posts\")\n data = dictfetchall(cursor)\n\n # Get context to be displayed in template\n pic_url = getProfile(request)\n context = {'data': data, 'user_id': user_id,\n 'pfp': pic_url}\n\n return render(request, 'showevents.html', context)",
"def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['next_events_list'] = Event.objects.filter(end_date__gte = timezone.now()).order_by('-end_date')[:3]\n return context",
"def rnd_evnt(request):\n assert isinstance(request, HttpRequest)\n\n all_event = models.GetAllEvent()\n\n from random import randint\n ent_indx = randint(0, len(all_event) - 1)\n rnd_event = all_event[ent_indx]\n\n meet_event_date_max = models.GetMaxEvtDateFromEvent(rnd_event.eventid)\n\n return render(\n request,\n 'app/event.html',\n {\n 'title': 'Випадкова подія',\n 'message': 'Your application description page.',\n 'view_decript': 'Найвипадковіша подія у світі, зустрічайте',\n 'year': datetime.now().year,\n 'event': rnd_event,\n 'date_now': datetime.now(),\n 'meet_event_date_max':meet_event_date_max\n }\n )",
"def list_event(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_event\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1EventList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def return_event_profile():\n\n # eventname = session['event']\n\n event = crud.return_dance_event(eventname)\n\n\n return render_template('event.html', event=event)",
"def get(self, request):\n context = {}\n form = EventForm(request.POST or None)\n context['form'] = form\n return render(request,'event/create_event.html', context)",
"def calendar(request):\n context = get_events_context_from_request(request)\n context[\"str_start\"] = context[\"start\"].strftime('%Y-%m-%dT%H:%M')\n context[\"str_end\"] = context[\"end\"].strftime('%Y-%m-%dT%H:%M')\n return render(request, 'calendar.html', context)",
"def my_events(request):\n events = Event.objects.filter(host=request.user)\n\n return render(request, 'my_events.html', {'events':events})",
"def watch_event_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_event_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/watch/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get(self):\n return render_template(\"appointments.html\",\n apps=get_db().get_all_apps())",
"def committee_list():\n\n committees = Committee.query.all()\n return render_template(\"committee_list.html\", committees=committees)",
"def suggested_events(request):\n might_like_evs= dbSearch.get_might_like_events(request.user) if request.user.is_authenticated else False\n context={\n 'suggested_events': might_like_evs,\n 'user': request.user,\n 'http_protocol': getHTTP_Protocol()\n }\n return render(request, 'users/suggested_events.html', context=context)",
"def viewGroups(request):\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n # Select all the events from the events table and save them into a dictionary,\n # pass to the showevents template\n\n context = getViewGroupsData(request)\n return render(request, 'showgroups.html', context)",
"def get_events():\n req = request\n start_date = request.args.get(\"start_date\")\n end_date = request.args.get(\"end_date\")\n desc = request.args.get(\"event_desc\")\n sqlx, sqlx_count = DBAccess.bld_query_sql(start_date, end_date, desc)\n \n list_result = DBAccess.get_events(sqlx, sqlx_count)\n if list_result[0] == 'error':\n sj = jsonify({\"events_error\": list_result[1]})\n else:\n sj = jsonify({\"events_details\": list_result[1]})\n return sj",
"def tag_listing():\n\n return render_template(\"tag_listing.html\", tags=Tag.query.all())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a directory item showing a particular season in a series. Seasons contain episodes, so this passes responsibility on to SeasonMenu() to construct that list.
|
def makeSeasonItem(season):
art = R(CRUNCHYROLL_ART)
if Dict['series'][str(season['seriesId'])]['tvdbId'] is not None:
artUrl = getSeasonThumb(Dict['series'][str(season['seriesId'])]['tvdbId'], season['seasonnum'])
#Log.Debug("arturl: %s"%artUrl)
if artUrl is not None:
art = Function(GetArt,url=artUrl)
seasonItem = Function(
DirectoryItem(
SeasonMenu,
season['title'],
summary=season['description'].encode("utf-8"),
#thumb=Function(getThumb,url=season['thumb']),
art=art
),
seriesId=season['seriesId'],
season=season['seasonnum']
)
return seasonItem
|
[
"def SeasonMenu(sender,seriesId=None,season=None):\n\tdir = MediaContainer(disabledViewModes=[\"Coverflow\"], title1=sender.title1, title2=\"Series\")\n\tepList = getSeasonEpisodeListFromFeed(seriesId, season)\n\tfor episode in epList:\n\t\tdir.Append(makeEpisodeItem(episode))\n\treturn dir",
"def add_new_season(form):\n new_season = Seasons(name=form['name'],\n short=form['short'],\n description=form['description'])\n session.add(new_season)\n session.commit()\n return",
"def get_season(self, season: int) -> Season:\n if self.lang == \"de\":\n url = f\"https://www.southpark.de/feeds/carousel/video/e3748950-6c2a-4201-8e45-89e255c06df1/30/1/json/!airdate/season-{season}\"\n elif self.lang == \"se\" and season < 23: # SE doesn't have the 23rd season.\n url = f\"https://www.southparkstudios.nu/feeds/carousel/video/9bbbbea3-a853-4f1c-b5cf-dc6edb9d4c00/30/1/json/!airdate/season-{season}\"\n elif self.lang == \"uk\":\n url = f\"https://www.southparkstudios.co.uk/feeds/carousel/video/02ea1fb4-2e7c-45e2-ad42-ec8a04778e64/30/1/json/!airdate/season-{season}\"\n # cc.com is the ony one with jsons so descriptions will be in english\n else:\n url = f\"https://southpark.cc.com/feeds/carousel/video/06bb4aa7-9917-4b6a-ae93-5ed7be79556a/30/1/json/!airdate/season-{season}?lang={self.lang}\"\n\n season_data = json.loads(http_get(url))\n\n episodes = []\n for e in season_data[\"results\"]:\n episodes.append(Episode(\n id=e.get(\"itemId\").strip(),\n title=e.get(\"title\").strip(),\n description=e.get(\"description\").strip(),\n short_description=e.get(\"shortDescription\").strip(),\n thumbnail=e.get(\"images\").strip(),\n date=int(e.get(\"originalAirDate\", 0).strip()),\n episode_number=e.get(\"episodeNumber\").strip(),\n episode_number_in_season=e.get(\"episodeNumber\", \"0\")[-2:].strip(),\n season=e.get(\"episodeNumber\", \"0\")[:2].strip(),\n _lang=self.lang\n ))\n\n return Season(season, episodes)",
"def _add_episode(self, title, show_dir, season, episode, video_id, build_url):\n season = int(season)\n episode = int(episode)\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n\n self.log('Adding S{}E{} (id={}) of {} (dest={})'\n .format(season, episode, video_id, title.encode('utf-8'),\n show_dir))\n\n # add season\n if self.season_exists(title=title, season=season) is False:\n self.log(\n 'Season {} does not exist, adding entry to internal library.'\n .format(season))\n self.db[self.series_label][title]['seasons'].append(season)\n\n # add episode\n episode_meta = 'S%02dE%02d' % (season, episode)\n episode_exists = self.episode_exists(\n title=title,\n season=season,\n episode=episode)\n if episode_exists is False:\n self.log(\n 'S{}E{} does not exist, adding entry to internal library.'\n .format(season, episode))\n self.db[self.series_label][title]['episodes'].append(episode_meta)\n\n # create strm file\n filename = episode_meta + '.strm'\n filepath = os.path.join(show_dir, filename)\n if xbmcvfs.exists(filepath):\n self.log('strm file {} already exists, not writing it'\n .format(filepath))\n return\n url = build_url({'action': 'play_video', 'video_id': video_id})\n self.write_strm_file(\n path=filepath,\n url=url,\n title_player=title + ' - ' + episode_meta)",
"def insert_season(self, tvshowid, seasonid):\n if not self.season_id_exists(tvshowid, seasonid):\n insert_query = ('INSERT INTO video_lib_seasons (TvShowID, SeasonID) '\n 'VALUES (?, ?)')\n self._execute_non_query(insert_query, (tvshowid, seasonid))",
"def remove_season(self, title, season):\n title = re.sub(r'[?|$|!|:|#]', r'', title.encode('utf-8'))\n season = int(season)\n season_list = []\n episodes_list = []\n show_meta = '%s' % (title)\n for season_entry in self.db[self.series_label][show_meta]['seasons']:\n if season_entry != season:\n season_list.append(season_entry)\n self.db[self.series_label][show_meta]['seasons'] = season_list\n alt_title = self.db[self.series_label][show_meta]['alt_title']\n show_dir = self.nx_common.check_folder_path(\n path=os.path.join(self.tvshow_path, alt_title))\n if xbmcvfs.exists(show_dir):\n show_files = [f for f in xbmcvfs.listdir(show_dir) if xbmcvfs.exists(os.path.join(show_dir, f))]\n for filename in show_files:\n if 'S%02dE' % (season) in filename:\n xbmcvfs.delete(os.path.join(show_dir, filename))\n else:\n episodes_list.append(filename.replace('.strm', ''))\n self.db[self.series_label][show_meta]['episodes'] = episodes_list\n self._update_local_db(filename=self.db_filepath, db=self.db)\n return True",
"def test_create_show_with_embed_season_url(self):\n show = Show(show_id=1, embed_url='?embed=seasons')\n self.assertIsInstance(show.seasons[0], Season)\n self.assertTrue(show.seasons[0].episodes)\n self.assertEqual(1, show.seasons[0].number)",
"def add_to_season(self, *games):\n for item in games:\n item = item.convert_dict()\n self.season.append(item)\n self._gather_stats()",
"def season_exists(self, title, season):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n if self.show_exists(title) is False:\n return False\n show_entry = self.db[self.series_label][title]\n return season in show_entry['seasons']",
"def load_episodes(self):\n self.episode_menu.clear_items()\n for episode in get_episode_list(self.show['pk']):\n self.episode_menu.add_item(MenuItem(\n \"%d x %d : %s \" % (\n episode['season_number'],\n episode['episode_number'],\n episode['name'],\n ), \n episode))\n self.episode_menu.select_item(0)",
"def uploadSeasons(self, container, label, event):\n if self.model.faclist.dataframe is None:\n messagebox.showinfo(\"Facilities List Option File Missing\",\n \"Please upload a Facilities List Options file before selecting\"+\n \" a particle file.\")\n\n fullpath = self.openFile(askopenfilename())\n if fullpath is not None: \n self.uploader.uploadDependent(\"seasons\", fullpath, \n self.model.gasdryfacs)\n \n if self.model.seasons.dataframe.empty == False:\n\n\n # Update the UI\n [self.nav.nav.log.scr.insert(tk.INSERT, msg) for msg in self.model.seasons.log]\n # container.configure(bg='light green')\n \n self.seasonlbl.set('')\n self.seasonlbl.set(fullpath.split(\"\\\\\")[-1])",
"def add_season_to_rower(rower, season_id):\n for season in rower.season:\n if season.id == season_id:\n return\n rower.season.append(get_season_from_season_id(season_id))\n return",
"def fill_season(self, change):\n\n months, years = self.get_months_years(change[\"new\"])\n\n month_items = [\n {\"text\": text, \"value\": value}\n for value, text in param.MONTHS_DICT.items()\n if value in months\n ]\n\n self.date_selector.months_items = month_items\n self.date_selector.years_items = years\n\n self.date_selector.selected_months = month_items\n self.date_selector.selected_years = years",
"def test_season_with_episodes(self):\n season = Season(season_id=1, with_episodes=True)\n for episode in season.episodes:\n self.assertEqual(1, episode.season)",
"def setyearseasons(connection):\n connection.execute(\"\"\" INSERT INTO yearseason (yearseasonid, season) VALUES (0,\"Winter\"),(1,\"Spring\"),(2,\"Summer\"),(3,\"Fall\");\"\"\")",
"def add_season(self, date_column=None):\n\n # Select the date column to use\n date_column = date_column if date_column is not None else self.date_column\n\n # Get the years\n years = self.data[date_column].dt.year.unique()\n\n # Put the season by default on winter\n self.data['season'] = 'winter'\n\n # Check for each year if the season should be summer\n for year in years:\n # Get the start dates for the two seasons and check which dates match the summer season\n after_start_summer = self.data[date_column] >= start_summer_season(year)\n before_start_winter = self.data[date_column] < start_winter_season(year)\n\n # Update the season for the matches\n self.data.at[np.logical_and(after_start_summer, before_start_winter), 'season'] = 'summer'",
"def is_season_dir(p):\n if not os.path.isdir(p):\n return False\n\n try:\n get_season_number(p)\n except ValueError:\n return False\n\n return True",
"def __init__(self, json_node, series=None, build_full_object=False):\n self.series_id = 0\n self.series_name = None\n self.anidb_aid = 0\n self.anidb_eid = 0\n self.actors = []\n self.url = None\n self.item_type = 'episode'\n if series is not None:\n self.series_id = series.id\n self.series_name = series.name\n self.actors = series.actors\n self.anidb_aid = series.anidb_aid\n if series.is_movie:\n self.item_type = 'movie'\n\n Directory.__init__(self, json_node, True)\n # don't redownload info on an okay object\n if build_full_object and self.size < 0:\n json_node = self.get_full_object()\n Directory.__init__(self, json_node)\n # check again, as we might have replaced it above\n if isinstance(json_node, int) or pyproxy.is_unicode_or_string(json_node):\n eh.spam(self)\n return\n\n self.episode_number = pyproxy.safe_int(json_node.get('epnumber', ''))\n self.episode_type = json_node.get('eptype', 'Other')\n if self.anidb_aid == 0:\n self.anidb_aid = pyproxy.safe_int(json_node.get('aid', 0))\n self.anidb_eid = pyproxy.safe_int(json_node.get('eid', 0))\n self.date = model_utils.get_airdate(json_node)\n self.tvdb_episode = json_node.get('season', '0x0')\n self.update_date = None\n self.hash_content = None\n\n self.process_children(json_node)\n\n if self.name is None:\n self.name = 'Episode ' + str(self.episode_number)\n self.alternate_name = model_utils.get_title(json_node, 'x-jat', 'main')\n\n self.watched = pyproxy.safe_int(json_node.get('view', 0)) != 0\n self.watched_date = str(json_node.get('view_date', ''))\n self.year = pyproxy.safe_int(json_node.get('year', ''))\n\n self.rating = float(str(json_node.get('rating', '0')).replace(',', '.'))\n self.user_rating = float(str(json_node.get('userrating', '0')).replace(',', '.'))\n self.overview = model_utils.make_text_nice(pyproxy.decode(json_node.get('summary', '')))\n self.votes = pyproxy.safe_int(json_node.get('votes', ''))\n self.outline = \" \".join(self.overview.split(\".\", 3)[:2]) # first 3 sentence\n self.tags = model_utils.get_tags(json_node.get('tags', {}))\n\n if self.episode_type != 'Special':\n season = str(json_node.get('season', '1'))\n if 'x' in season:\n season = season.split('x')[0]\n else:\n season = '0'\n self.season = pyproxy.safe_int(season)\n\n eh.spam(self)",
"def create_playlist(client, url, public=False):\n domain, html = retrieve_episode(url)\n parser = parse_episode(domain, html)\n create_playlist_from_parser(client, parser, public=public)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.