query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Set next handler of the chain
|
def set_next(self, handler):
self.next = handler
return handler
|
[
"def set_handler(self, handler):\n self.next_handler = handler",
"def _handler_changed(self, handler):\n if self.next is not None:\n self.next.handler = handler",
"def handle_next(self, payload: str) -> None:\n data = json_loads(payload)\n data['hashes'] = [bytes.fromhex(h) for h in data['hashes']]\n args = NextPayload(**data)\n\n key = 'next'\n deferred = self.deferred_by_key.pop(key, None)\n if deferred:\n deferred.callback(args)",
"def create_next():\n counter = 0\n\n def next_func(request, response, next_):\n nonlocal counter\n func = HANDLERS[counter]\n counter += 1\n return func(request, response, next_)\n return next_func",
"def chain(self, chain):\n\n self._chain = chain",
"def set_next(self,next):\n if isinstance(next,Pype) :\n self.tail.set_next(next.head)\n elif isinstance(next,Segment) :\n self.tail.set_next(next)",
"def _wrapped_handler_ref_changed(self, wrapped_handler_ref):\n if self.next is not None:\n self.next.wrapped_handler_ref = wrapped_handler_ref",
"def send_next(self):\n event = next(self)\n self.send(event)\n return event",
"def handler(self, handler):\n self._handler = handler",
"def handler(self, handler):\n\n self._handler = handler",
"def set_handler (self, handler_class):\n h = self.handler\n self.handler = handler_class(h.parent, h.prefix, h.args)",
"def add_handler(self, path, handler):\n if path: # guard against Null path, we assume handler could be Null\n path_list = self.split_path(path)\n self.trie.insert(step_list=path_list, handler=handler)",
"def set_chain(self, val):\n self.__chain = val",
"def add_handler(self, handler):\r\n pass",
"def next(self, event):\n self.result = 1",
"def set_next(self, new_next):\n self.next = new_next",
"def _set_link(self, value, handler):\n self._mapping[value] = handler",
"def add_handler(self, handler):\n pass",
"def set_handler(self, handler):\n self._handler = handler"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
G_in:入力関数のyデータ [1us刻み] G_input_x:入力関数のx軸(ms) GIRF:伝達関数のyデータ(complex) GIRF_x:周波数軸(kHz)
|
def GIRF_Interpolation(G_in,G_input_x, GIRF, GIRF_x):
"""
G_INを1us刻みでGIRFに合わせて延長
周波数分解能を逆数をとる
"""
"""
戻り値:
G_input_long: 入力関数をGIRF関数の逆数の時間Tまで延長したもの
G_input_x_long: ↑の時間軸(x軸) [ms]
GIRF_interp: GIRF関数の周波数軸を、入力関数の周波数軸に合わせてinterpolateしたもの
fG_input_x: ↑の周波数軸(x軸) [kHz]
"""
T_GIRF = 1/(GIRF_x[1]-GIRF_x[0])#[ms]
# N_long = int(T/1e-3)
T_G_input = G_input_x[len(G_input_x)-1]
if T_GIRF >= T_G_input:
G_input_x_long = np.arange(0,T_GIRF,1e-3)
G_input_long = np.zeros(len(G_input_x_long))
G_input_long[0:len(G_in)] = G_in
else:
G_input_long = G_in
G_input_x_long = G_input_x
N = len(G_input_x_long)
dt = G_input_x_long[1]-G_input_x_long[0]
#入力関数のフーリエ変換の周波数軸[kHz]
fG_input_x = np.fft.fftshift(np.fft.fftfreq(N,dt))
GIRF_real = np.real(GIRF)
GIRF_imag = np.imag(GIRF)
GIRF_interp_real = np.interp(fG_input_x, GIRF_x, GIRF_real)
GIRF_interp_imag = np.interp(fG_input_x, GIRF_x, GIRF_imag)
GIRF_interp = np.vectorize(complex)(GIRF_interp_real, GIRF_interp_imag)
#GIRF_xの範囲外はゼロにする
GIRF_interp = np.where(abs(fG_input_x) > GIRF_x.max(), 0, GIRF_interp)
return G_input_long, G_input_x_long, GIRF_interp,fG_input_x
|
[
"def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F",
"def fft_gr_to_fq(g, rstep, rmin):\n if g is None:\n return g\n padrmin = int(round(rmin / rstep))\n npad1 = padrmin + len(g)\n\n # pad to the next power of 2 for fast Fourier transformation\n npad2 = (1 << int(math.ceil(math.log(npad1, 2)))) * 2\n # sine transformations needs an odd extension\n\n npad4 = 4 * npad2\n # gpadc array has to be doubled for complex coefficients\n gpadc = np.zeros(npad4)\n # gpadc2 = np.zeros(npad4)\n # copy the original g signal\n # ilo = 0\n # ilo = padrmin\n # ilo = len(g)\n gpadc[:2 * len(g):2] = g[:]\n # gpadc2[:2 * len(g):2] = g[:]\n '''\n for i in range(len(g)):\n gpadc[2 * ilo] = g[i]\n ilo += 1\n # '''\n # assert_allclose(gpadc2, gpadc)\n # copy the odd part of g skipping the first point,\n # because it is periodic image of gpadc[0]\n gpadc[-2:-2 * len(g) + 1:-2] = -1 * g[1:]\n # gpadc2[-2:-2 * len(g) + 1:-2] = -1 * g[1:]\n '''\n ihi = 2 * npad2 - 1\n for ilo in range(1, npad2):\n gpadc[2 * ihi] = -1 * gpadc[2 * ilo]\n ihi -= 1\n '''\n # assert_allclose(gpadc2, gpadc)\n # plt.plot(gpadc)\n # plt.show()\n\n # gpadcfft = np.fft.ihfft(gpadc)\n gpadcfft = np.fft.ifft(gpadc)\n # plt.plot(gpadcfft.imag)\n # plt.show()\n\n f = np.zeros(npad2, dtype=complex)\n # f2 = np.zeros(npad2, dtype=complex)\n f[:] = gpadcfft[:npad2 * 2:2] * npad2 * rstep\n # f2[:] = gpadcfft[:npad2 * 2:2] * npad2 * rstep\n '''\n for i in range(npad2):\n # f[i] = gpadcfft[2 * i + 1] * npad2 * rstep\n f[i] = gpadcfft[2 * i] * npad2 * rstep\n assert_allclose(f2, f)\n # '''\n return f.imag",
"def gyroHF2(self, GYRO, PFC):\n print(\"Calculating gyro orbit heat loads\")\n log.info(\"Calculating gyro orbit heat loads\")\n #get divertor HF\n qDiv = PFC.qDiv[PFC.PFC_GYROmap] / self.elecFrac\n Pdiv = qDiv * PFC.areas[PFC.PFC_GYROmap]\n #Get fractional multipliers for each helical trace\n gyroFrac = 1.0/GYRO.N_gyroPhase\n vPhaseFrac = 1.0/GYRO.N_vPhase\n vSliceFrac = GYRO.energyFracs\n #qMatrix = np.zeros((GYRO.N_gyroPhase,GYRO.N_vPhase,GYRO.N_vSlice,len(q)))\n Pgyro = np.zeros((GYRO.Nt))\n PNaN = 0.0\n sum=0\n sum1=0\n #loop through intersect record and redistribute power using multipliers\n for gyroPhase in range(GYRO.N_gyroPhase):\n for vPhase in range(GYRO.N_vPhase):\n for vSlice in range(GYRO.N_vSlice):\n idx = GYRO.intersectRecord[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap]\n hdotn = np.abs(GYRO.hdotn[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap])\n isNanFrom = np.where(np.isnan(idx)==True)[0] #include NaNs (NaNs = no intersection) index we map from\n notNanFrom = np.where(np.isnan(idx)==False)[0] #dont include NaNs (NaNs = no intersection) index we map from\n notNanTo = idx[~np.isnan(idx)] #indices we map power to\n notNanTo = notNanTo.astype(int) #cast as integer\n isNanTo = idx[np.isnan(idx)] #indices we map power to\n isNanTo = isNanTo.astype(int) #cast as integer\n\n if len(notNanFrom)>0:\n #multiple sources can load the same target face, so we loop\n for i in range(len(notNanFrom)):\n Pgyro[notNanTo[i]] += Pdiv[notNanFrom[i]]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[notNanFrom[i],vSlice]\n\n if len(isNanFrom)>0:\n PNaN += np.sum(Pdiv[isNanFrom]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[isNanFrom,vSlice])\n\n\n GYRO.gyroPowMatrix += Pgyro\n GYRO.gyroNanPower += PNaN\n return",
"def g0_hz(sp,cal_Veff = 5e-3, V_pi = None, askFit = True,T = 293,PM_calib_file = \"M:\\\\phaseModulationCalibrations\\\\V_pi1550.spe\"):#V_pi = 7.1\n \n cal_Veff = addDefaultUnit(cal_Veff,V)\n T = addDefaultUnit(T,K)\n f = sp.fitter\n if not isinstance(sp.fitter,fit.FitterLorentzAndGauss):\n if askFit:\n yn = \"dummy\"\n while yn is not \"y\":\n yn = raw_input(\"spectrum %s was fitted with model %s. refit it with model \\\"LorentzGauss\\\"(y/n)?\"%(sp.name,f.ID_STR))\n if yn ==\"n\":\n raise ValueError(\"spectrum should be fitted with LorentzGauss for determining effective mass\")\n sp.fit(model = \"LorentzGauss\")\n\n\n if V_pi == None:\n f = load(PM_calib_file)\n V_pi = utils.misc.interpFromPlottable(f,sp[\"x0_2_hz\"])\n else:\n V_pi = 7.1\n V_pi = addDefaultUnit(V_pi,V)\n print \"value of V_pi used is \" + str(V_pi)\n\n ratio = sp[\"area_2\"]/sp[\"area\"]\n \n phi0 = np.pi*cal_Veff*sqrt(2)/V_pi\n \n# omega = 2.0*np.pi*cst.c/(lambda_nm*1e-9)\n\n Omega = 2.0*np.pi*sp[\"x0_2_hz\"]*Hz\n\n nbar = k*T/(hbar*Omega)\n g0 = ((Omega**2*phi0**2)/(4*nbar*ratio))**.5/(2*pi)\n yn = raw_input(\"would you like results to be pasted in Origin to be copied in clipboard? [y]/n\")\n if yn is not \"n\":\n #import utils\n utils.misc.copyToClipboard(str(sp[\"x0_hz\"]*1e-6) +\"\\t\"+str(sp[\"gamma_hz\"]) + \"\\t\"+\"0\" +\"\\t\"+ str(g0.asNumber()))\n return g0",
"def _g(self,pp,p,k):\n \n # define prefact \n # get the corresponding legendre polynomial \n Pk = legendre(k)\n # define momentum transfer dependent on angles \n qval=np.sqrt(p**2+pp**2-2*p*pp*self.xp)\n \n # build integral of regularized OBE \n return float(np.sum(Pk(self.xp)/((qval**2+self.mpi**2))*self.xw*np.exp(-(qval**2+self.mpi**2)/self.cutoff**2)))",
"def _irfft2d(f_x) :",
"def gyroHF(self, GYRO, PFC):\n print(\"Calculating gyro orbit heat loads\")\n log.info(\"Calculating gyro orbit heat loads\")\n #get divertor HF\n qDiv = PFC.qDiv[PFC.PFC_GYROmap] / self.elecFrac\n Pdiv = qDiv * PFC.areas[PFC.PFC_GYROmap]\n #Get fractional multipliers for each helical trace\n gyroFrac = 1.0/GYRO.N_gyroPhase\n vPhaseFrac = 1.0/GYRO.N_vPhase\n vSliceFrac = GYRO.energyFracs\n #qMatrix = np.zeros((GYRO.N_gyroPhase,GYRO.N_vPhase,GYRO.N_vSlice,len(q)))\n Pgyro = np.zeros((GYRO.Nt))\n PNaN = 0.0\n sum=0\n sum1=0\n #loop through intersect record and redistribute power using multipliers\n for gyroPhase in range(GYRO.N_gyroPhase):\n for vPhase in range(GYRO.N_vPhase):\n for vSlice in range(GYRO.N_vSlice):\n idx = GYRO.intersectRecord[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap]\n isNanFrom = np.where(np.isnan(idx)==True)[0] #include NaNs (NaNs = no intersection) index we map from\n notNanFrom = np.where(np.isnan(idx)==False)[0] #dont include NaNs (NaNs = no intersection) index we map from\n notNanTo = idx[~np.isnan(idx)] #indices we map power to\n notNanTo = notNanTo.astype(int) #cast as integer\n isNanTo = idx[np.isnan(idx)] #indices we map power to\n isNanTo = isNanTo.astype(int) #cast as integer\n\n if len(notNanFrom)>0:\n #multiple Froms can light up the same To, so we loop\n for i in range(len(notNanFrom)):\n Pgyro[notNanTo[i]] += Pdiv[notNanFrom[i]]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[notNanFrom[i],vSlice]\n\n if len(isNanFrom)>0:\n PNaN += np.sum(Pdiv[isNanFrom]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[isNanFrom,vSlice])\n\n #print(\"\\nTEST2\")\n #print(GYRO.intersectRecord[0,0,0,1711])\n #print(Pgyro[1711])\n\n GYRO.gyroPowMatrix += Pgyro\n GYRO.gyroNanPower += PNaN\n return",
"def get_g2F(self, ikpt, iband, jband):\n\n nkpt = self.nkpt\n nband = self.nband\n nmode = self.nmode\n\n nomegase = self.nomegase\n omega_se = self.omegase\n\n g2F = zeros((nomegase), dtype=np.float)\n\n omega_q = self.ddb.omega[:].real\n\n # nkpt, nband, nband, nmode\n fan_g2, ddw_g2 = self.get_fan_ddw_gkk2_active()\n\n g2 = fan_g2[ikpt, iband, jband, :]\n\n for imode in range(nmode):\n\n domega = omegase - omega_q[imode]\n F = delta_lorentzian(domega, self.smearing)\n g2F += g2[imode] * F\n\n g2F *= self.wtq\n\n return g2F",
"def pyin(x, Fs=22050, N=2048, H=256, F_min=55.0, F_max=1760.0, R=10, thresholds=np.arange(0.01, 1, 0.01),\n beta_params=[1, 18], absolute_min_prob=0.01, voicing_prob=0.5):\n\n if F_min > F_max:\n raise Exception(\"F_min must be smaller than F_max!\")\n\n if F_min < Fs/N: \n raise Exception(f\"The condition (F_min >= Fs/N) was not met. With Fs = {Fs}, N = {N} and F_min = {F_min} you have the following options: \\n1) Set F_min >= {np.ceil(Fs/N)} Hz. \\n2) Set N >= {np.ceil(Fs/F_min).astype(int)}. \\n3) Set Fs <= {np.floor(F_min * N)} Hz.\")\n\n x_pad = np.concatenate((np.zeros(N // 2), x, np.zeros(N // 2))) # Add zeros for centered estimates\n\n # Compute Beta distribution\n thr_idxs = np.arange(len(thresholds))\n beta_distr = comb(len(thresholds), thr_idxs) * beta(thr_idxs+beta_params[0],\n len(thresholds)-thr_idxs+beta_params[1]) / beta(beta_params[0],\n beta_params[1])\n\n # YIN with multiple thresholds, yielding observation matrix\n B = int(np.log2(F_max / F_min) * (1200 / R))\n F_axis = F_min * np.power(2, np.arange(B) * R / 1200) # for quantizing the estimated F0s\n O, rms, p_orig, val_orig = yin_multi_thr(x_pad, Fs=Fs, N=N, H=H, F_min=F_min, F_max=F_max, thresholds=thresholds,\n beta_distr=beta_distr, absolute_min_prob=absolute_min_prob, F_axis=F_axis,\n voicing_prob=voicing_prob)\n\n # Transition matrix, using triangular distribution used for pitch transition probabilities\n max_step_cents = 50 # Pitch jump can be at most 50 cents from frame to frame\n max_step = int(max_step_cents / R)\n triang_distr = triang.pdf(np.arange(-max_step, max_step+1), 0.5, scale=2*max_step, loc=-max_step)\n A = compute_transition_matrix(B, triang_distr)\n \n # HMM smoothing\n C = np.ones((2*B, 1)) / (2*B) # uniform initialization\n f0_idxs = viterbi_log_likelihood(A, C.flatten(), O) # libfmp Viterbi implementation\n \n # Obtain F0-trajectory\n F_axis_extended = np.concatenate((F_axis, np.zeros(len(F_axis))))\n f0 = F_axis_extended[f0_idxs]\n\n # Suppress low power estimates\n f0[0] = 0 # due to algorithmic reasons, we set the first value unvoiced\n f0[rms < 0.01] = 0\n\n # confidence\n O_norm = O[:, np.arange(O.shape[1])]/np.max(O, axis=0)\n conf = O_norm[f0_idxs, np.arange(O.shape[1])]\n\n # Refine estimates by choosing the closest original YIN estimate\n refine_estimates = True\n if refine_estimates:\n f0 = refine_estimates_yin(f0, p_orig, val_orig, Fs, R)\n\n t = np.arange(O.shape[1]) * H / Fs # Time axis\n \n return f0, t, conf",
"def FIR_estimate(self):\r\n raise NotImplementedError",
"def init_igrf():\n\n global igrf, nmn,mns, nyear,years,yruts\n\n # print('Load IGRF coefficients ...')\n\n bfn = 'igrf13coeffs.txt'\n locffn = os.path.join(os.path.dirname(__file__), bfn)\n\n nheader = 3\n with open(locffn, 'r') as file:\n for i in range(nheader):\n next(file)\n header = file.readline().rstrip()\n cols = header.split()\n # first 3 columns are g/h flag, n, m.\n years = np.array([np.int32(j[0:4]) for j in cols[3:]])\n nyear = len(years)\n lines = file.read().splitlines()\n\n cols = lines[-1].split()\n k = np.int32(cols[1]) + 1\n nmn = np.int32((k + 1) * k * 0.5)\n igrf = np.zeros((nmn, nyear, 2), dtype=float)\n mns = np.empty((nmn, 2), dtype=np.int32)\n l = 0\n for i in range(k):\n for j in range(i + 1):\n mns[l, :] = [i, j]\n l += 1\n\n for line in lines:\n cols = line.split()\n if cols[0] == 'g':\n i = 0\n else:\n i = 1\n n, m = np.int32(cols[1:3])\n mn = np.int32(n * (n + 1) * 0.5 + m)\n igrf[mn, :, i] = [np.float32(j) for j in cols[3:]]\n\n # treat the last column\n years[-1] += 5\n igrf[:, -1, :] = igrf[:, -2, :] + igrf[:, -1, :] * 5\n yruts = np.empty(nyear, dtype=float)\n t0_datetime = datetime.datetime(1970,1,1)\n for i in range(nyear):\n yruts[i] = (datetime.datetime(years[i],1,1)-t0_datetime).total_seconds()\n # separate g/h\n igrf = {'g': igrf[:,:,0], 'h': igrf[:,:,1]}",
"def read_input():\n \n argv = sys.argv\n\n # Read file names from sd input\n f_dy = argv[1] # matdyn.modes\n f_pat = argv[2] # path.out (should be in crystal coords)\n f_ph = argv[3] # ph.x output (Gamma point)\n\n # Read input card\n f_inp = open(\"input.dat\",'r')\n l1 = f_inp.readline()\n l2 = f_inp.readline()\n l3 = f_inp.readline().split()\n f_inp.close()\n\n # Open files\n\n f = open(f_dy,'r') # matdyn.modes \n f_dyn = f.readlines()\n f.close()\n\n f = open(f_pat,'r') # path.out\n f_path = f.readlines()\n f.close()\n\n f = open(f_ph,'r') # ph.x output\n f_zs = f.readlines()\n f.close()\n\n # Assign values to a0, nat, M, nqp\n a0, vol = float(l1.split()[0]), float(l1.split()[1])\n nat = int(l2) \n mass = np.zeros(nat)\n for iat in range(nat):\n mass[iat] = float(l3[iat])\n\n # Assign values to G (reciprocal lattice vec)\n ig = 0 ; i = 0\n for line in f_zs:\n if \"reciprocal axes:\" in line:\n ig = i + 1 \n break\n i += 1 \n\n rG = np.zeros((3,3))\n for ic in range(3):\n rGtext = f_zs[ig+ic][23:48].split()\n rG[ic,:] = np.array([float(rGtext[0]), float(rGtext[1]), float(rGtext[2])])\n\n # Read Z* tensor from f_zs\n i = 0\n iz = 0\n zstart = []\n for line in f_zs:\n if \"(d P / du)\" in line:\n iz = i + 3\n if \"Px\" in line:\n zstart.append(i)\n\n i += 1\n\n # Read the dielectric tensor from f_zs\n i = 0\n ie = 0\n for line in f_zs:\n if \"Dielectric constant in cartesian axis\" in line:\n ie = i + 2\n break\n\n i += 1\n\n # Assign Z* values\n zs = np.zeros((nat,3,3)) # initialize Z*\n\n for iat in range(nat):\n for ic in range(3):\n ztext = f_zs[zstart[iat]+ic][19:56].split()\n for jc in range(3):\n zs[iat][ic][jc] = float(ztext[jc])\n\n # Assing the dielectric tensor\n eps = np.zeros((3,3))\n\n for ic in range(3):\n epstext = f_zs[ie+ic][16:66].split()\n for jc in range(3):\n eps[ic][jc] = float(epstext[jc])\n\n # Number of modes and q-points\n nmodes = 3 * nat\n nqpt = int(f_path[0].split()[0])\n\n # Read the q-points\n q = np.zeros((nqpt,4)) # 4th dimension is lenght for q-points on a line, weights for q-points on a grid \n for iq in range(1,nqpt+1):\n q[iq-1,] = np.array([float(f_path[iq].split()[0]),float(f_path[iq].split()[1]), \\\n float(f_path[iq].split()[2]),float(f_path[iq].split()[3])])\n\n # Read the eigenvalues(om) and eigenvectors(eig) \n # Initiate first\n om = np.zeros((nmodes,nqpt))\n eig = np.zeros((nmodes,nqpt,nat,3), dtype=complex) \n\n # Get the starting lines for each q-pt\n i = 0\n i_q = []\n for line in f_dyn:\n if \"q =\" in line:\n i_q.append(i+2)\n i += 1\n\n #Assign values to om and eig\n for iq in range(nqpt):\n for imod in range(nmodes):\n omtext = f_dyn[i_q[iq]+imod*(nat+1)][43:55]\n om[imod][iq] = float(omtext)\n for iat in range(nat):\n etext = f_dyn[i_q[iq]+imod*(nat+1)+iat+1][2:72].split()\n for ic in range(3):\n eig.real[imod][iq][iat][ic]=float(etext[2*ic])*np.sqrt(mass[iat])\n eig.imag[imod][iq][iat][ic]=float(etext[2*ic+1])*np.sqrt(mass[iat])\n\n #Normalize the eigenvectors\n t1 = eig[imod,iq,:,:]\n t_nu = np.sum(np.sum(np.conjugate(t1)*t1,axis=0))\n eig[imod,iq,:,:] = eig[imod,iq,:,:]/np.sqrt(np.abs(t_nu))\n\n # Check normalization\n delta = np.zeros((nmodes,nmodes), dtype=complex)\n for iat in range(nat):\n for ic in range(3):\n t2 = eig[:,iq,iat,ic]\n delta += np.outer(np.conjugate(t2),t2)\n\n unit = np.diag(np.diag(np.ones((nmodes,nmodes)))) # Unit vector\n test = np.abs( (delta-unit) )\n if ( np.max(test) > 1e-3):\n print \"Non-orthonormal eigenvector at iq=\", q[iq,:]\n\n return om, eig, q, zs, eps, mass, a0, vol, rG, nmodes, nqpt, nat",
"def fractalTransformationCG(F,G,M=256,N=50,its=16,\n deBruijn=True,return_Q=False):\n assert isinstance(F,DynamicalSystem) and isinstance(G,DynamicalSystem)\n assert F.check_validity(True,False) and G.check_validity(True,False)\n if deBruijn:\n its = int(its)\n if its>32:\n print(\"fractalTransformationCG: Warning: A very long sequence \"+\n \"length has been requested! (2**\",its,\")\")\n else:\n if its<=30:\n its = int(2.0**its)\n else:\n its = int(its)\n rho = F.get_rho()\n tau_L = F.tau(rho,N+1)\n tau_R = F.tau_plus(rho,N+1)\n sigma = np.zeros(N+1,dtype=np.int8)\n X = np.linspace(0.0,1.0,M+1)\n H = X.copy()\n Q = np.zeros(M+1,dtype=np.int)\n Q[0],Q[M] = N,N # since the end points are always correct\n q,x,y = 0,1.0,1.0\n def address_distance(alpha,beta):\n k = np.argmin(alpha==beta)\n return (beta[k]-alpha[k])*0.5**k\n if deBruijn:\n db_2 = DeBruijnGenerator(2,its)\n #for _ in range(db_2.length()): # beware of overflow!\n while not db_2.is_complete(): # this is better\n sigma = np.roll(sigma,1)\n sigma[0] = db_2()\n if sigma[0]==0:\n x = F.if0(x)\n y = G.if0(y)\n else:\n x = F.if1(x)\n y = G.if1(y)\n if sigma[0]==0:\n if address_distance(sigma,tau_L)<0:\n q = 0\n else:\n if address_distance(tau_R,sigma)<0:\n q = 0\n k = int(0.5+x*M)\n # Should really check k is in the right range (i.e. 0,1,...,M)\n # but this shouldn't happen and is somewhat expensive to check\n if Q[k] < q:\n H[k] = y\n Q[k] = q\n q += 1\n # end while\n else:\n for _ in range(its):\n sigma = np.roll(sigma,1)\n sigma[0] = np.random.randint(2)\n if sigma[0]==0:\n x = F.if0(x)\n y = G.if0(y)\n else:\n x = F.if1(x)\n y = G.if1(y)\n if sigma[0]==0:\n if address_distance(sigma,tau_L)<0:\n q = 0\n else:\n if address_distance(tau_R,sigma)<0:\n q = 0\n k = int(0.5+x*M)\n # Should really check k is in the right range (i.e. 0,1,...,M)\n # but this shouldn't happen and is somewhat expensive to check\n if Q[k] < q:\n H[k] = y\n Q[k] = q\n q += 1\n # end for\n # end if/else\n if return_Q:\n return X,H,Q\n return X,H",
"def get_integration(self, intg):\n return self.data[intg, :, :, :]",
"def integrand(x):\n return np.exp(x * x) * special.erfc(x)",
"def SetInput(self, , , p_float_6):\n ...",
"def convert_gsc2rfin_to_jhk(data, output_mag):\n if isinstance(data, pd.Series):\n r_f_mag = data['FpgMag']\n r_f_err = data['FpgMagErr']\n i_n_mag = data['NpgMag']\n i_n_err = data['NpgMagErr']\n elif isinstance(data, tuple):\n r_f_mag = data[0]\n r_f_err = data[1]\n i_n_mag = data[2]\n i_n_err = data[3]\n else:\n raise TypeError(f\"{type(data)} is not a valid type for data. Must be a tuple (mag, mag_err) \"\n f\"or a pd.Series output from the Guide Star Catalog\")\n\n if output_mag.upper() == 'J':\n def calc_j(r_f, i_n): return r_f + 0.01 * (r_f - i_n) ** 2 - 1.56 * (r_f - i_n) - 0.44\n j = calc_j(r_f_mag, i_n_mag)\n err_j_r_f = calc_j(r_f_mag + r_f_err, i_n_mag) - j\n err_j_i_n = calc_j(r_f_mag, i_n_mag + i_n_err) - j\n sigma_j_eqn = 0.246\n j_err = np.sqrt(err_j_r_f**2 + err_j_i_n**2 + sigma_j_eqn**2)\n return j, j_err\n elif output_mag.upper() == 'H':\n def calc_h(r_f, i_n): return r_f + 0.25 * (r_f - i_n) ** 2 - 2.17 * (r_f - i_n) - 0.67\n h = calc_h(r_f_mag, i_n_mag)\n err_h_r_f = calc_h(r_f_mag + r_f_err, i_n_mag) - h\n err_h_i_n = calc_h(r_f_mag, i_n_mag + i_n_err) - h\n sigma_h_eqn = 0.321\n h_err = np.sqrt(err_h_r_f**2 + err_h_i_n**2 + sigma_h_eqn**2)\n return h, h_err\n elif output_mag.upper() == 'K':\n def calc_k(r_f, i_n): return r_f + 0.28 * (r_f - i_n) ** 2 - 2.35 * (r_f - i_n) - 0.73\n k = calc_k(r_f_mag, i_n_mag)\n err_k_r_f = calc_k(r_f_mag + r_f_err, i_n_mag) - k\n err_k_i_n = calc_k(r_f_mag, i_n_mag + i_n_err) - k\n sigma_k_eqn = 0.374\n k_err = np.sqrt(err_k_r_f**2 + err_k_i_n**2 + sigma_k_eqn**2)\n return k, k_err\n else:\n raise ValueError(\"output_mag must be set to either J, H, or K\")",
"def show_gf(self, x):\n g = np.zeros((len(x[0]), self._num_fu), dtype=np.float64)\n for j in range(self._num_fu):\n x1 = self._gf[j*5]\n x2 = self._gf[j*5+1]\n x3 = self._gf[j*5+2]\n w = self._gf[j*5+3]\n a = self._gf[j*5+4]\n r1 = pow((x[0]-x1), 2)+pow((x[1]-x2), 2)+pow((x[2]-x3), 2)\n g[:, j] = a*np.exp(-r1/abs(w))\n\n return g",
"def bergman_input(self):\n return self.m_t"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Look for and return any unexplored point including the given seed. Calling map.find_above(MSS) after map.block_down(MSS) will thus find strict supersets of the MSS, as the MSS itself has been blocked.
|
def find_above(self, seed):
superset_exists = self.solver.solve((i + 1) for i in seed)
if superset_exists:
return self.get_seed()
else:
return None
|
[
"def solve_seed(seed: int):\n return solve(Position.deal(seed), GOAL)",
"def get_species_saddle_point(self):\n energies = [self.species[i].energy for i in range(self.n_points)]\n\n if any(energy is None for energy in energies):\n raise FitFailed\n\n # Peaks have lower energies both sides of them\n peaks = [i for i in range(1, self.n_points - 1) if energies[i-1] < energies[i] and energies[i+1] < energies[i]]\n\n # Yield the peak with the highest energy first\n for peak in sorted(peaks, key=lambda p: -self.species[p].energy):\n yield self.species[peak]\n\n return None",
"def Step1(self):\n import random\n print('get mask for seedpoints NELLIX is used')\n # Check if we can go\n if self._vol is None or self._params is None:\n raise ValueError('Data or params not yet given.')\n \n t0 = time.time()\n \n # Detect points\n th = self._params.seed_threshold\n pp = get_stent_likely_positions(self._vol, th) # call below\n \n # Create nodes object from found points\n nodes = stentgraph.StentGraph()\n for p in pp:\n p_as_tuple = tuple(p.flat) # todo: perhaps seed detector should just yield list of tuples.\n nodes.add_node(p_as_tuple)\n \n t1 = time.time()\n if self._verbose:\n print()\n print('Found %i seed points, which took %1.2f s.' % (len(nodes), t1-t0))\n \n # Store the nodes\n self._nodes1 = nodes\n \n # Draw?\n if self._draw:\n self.Draw(1)\n \n return nodes",
"def get_stent_likely_positions(data, th):\n \n # Get mask\n mask = get_mask_with_stent_likely_positions(data, th)\n \n # Convert mask to points\n indices = np.where(mask==2) # Tuple of 1D arrays\n pp = PointSet( np.column_stack(reversed(indices)), dtype=np.float32)\n \n # Correct for anisotropy and offset\n if hasattr(data, 'sampling'):\n pp *= PointSet( list(reversed(data.sampling)) ) \n if hasattr(data, 'origin'):\n pp += PointSet( list(reversed(data.origin)) ) \n \n return pp",
"def _findNearest(self, point):\n def distance(x, y):\n return math.sqrt(Numeric.sum((y - x)**2))\n\n # Initialize the distance to infinity\n dist = 1e300000\n winner = None\n\n for coord, frame in self.mapping.iteritems():\n delta = distance(coord, point)\n if delta < dist:\n dist = delta\n winner = frame\n\n return winner",
"def find_closest(self, p, ps):\n # drop the point if in points\n points = self.remove_point(p, ps)\n dist_2 = np.sum((points - p)**2, axis=1)\n return points[np.argmin(dist_2), :], np.min(dist_2)",
"def get_seed_points(img,seed_values):\n\n m,n = img.shape\n coordinates = [(i,j) for i,j in it.product(range(m),range(n)) if img[i,j] in seed_values]\n\n return coordinates",
"def test_find_best_W_mers_2(self):\n self.options.min_num_sites = self.options.max_num_sites = num_to_find = 2\n \n # load data and create STEME object\n fasta_file = os.path.normpath(get_fasta_file('T00759-small.fa'))\n \n #\n # Load sequences and build index\n #\n algorithm = stempy.Algorithm(self.options)\n algorithm._initialise(fasta_file)\n data = algorithm.input_sequences.data\n\n for seed in (\n 'ATGCAGAAAAATTAAG',\n 'TTTAAAATACTTTAAA',\n ):\n # create and seed a model\n W = len(seed)\n model = algorithm.create_model_of_input(W)\n model.bs.seed(seed, True)\n model.set_lambda_for_sites(data.num_sequences)\n \n # look for best W-mers under model\n best_w_mer_finder = stempy.create_best_w_mer_finder(data, model, num_to_find)\n best_w_mer_finder()\n avg_Z = 0.\n for _eval in best_w_mer_finder.best_w_mers:\n logging.info(\n 'Seed: %s; Site: %s; p(binding): %.2e; p(not binding): %.2e',\n seed, data.get_W_mer(W, _eval.global_pos), _eval.Z, 1.-_eval.Z\n )\n avg_Z += _eval.Z\n logging.info('Seed: %s; Average Z: %.6f', seed, avg_Z / len(best_w_mer_finder.best_w_mers))\n \n #\n # Check we found the seed\n #\n for _eval in best_w_mer_finder.best_w_mers:\n if data.get_W_mer(W, _eval.global_pos) == seed:\n break\n else:\n raise RuntimeError('Could not find seed in best W-mers')\n \n #\n # Log the product of p-values\n #\n best_w_mer_finder.update_model(num_to_find, use_pseudo_counts=False)\n logging.info('Seed: %s; log PoP: %.6f', seed, algorithm.significance.log_product_p_values(model))",
"def getSeedPositions(self):\n return self._pointList[:-1] if self.isValid() else []",
"def get_nearest_offgrid_pin(self, pin, insufficient_list):\n # Find the coordinate with the most overlap\n best_coord = None\n best_dist = math.inf\n for coord in insufficient_list:\n track_pin = self.convert_track_to_pin(coord)\n min_dist = pin.distance(track_pin)\n if min_dist<best_dist:\n best_dist=min_dist\n best_coord=coord\n \n return set([best_coord])",
"def find_S_and_E(self, color):\n E = {} # For each one point eye, record the blocks it connects\n S = {} # Each block is indexed by its anchor, which is the \n # smallest point in the block\n S_eyes = {} # For each block, record one point eyes it connects\n \n # find E\n empty_points = self.get_empty_points()\n for point in empty_points:\n if self.is_eye(point, color):\n E[point] = set()\n \n # find S\n anchor_dic = {}\n for x in range(1, self.size+1):\n for y in range(1, self.size+1):\n point = self._coord_to_point(x,y)\n if self.get_color(point) != color:\n continue\n if point in anchor_dic:\n continue\n stack_points = [point]\n block_points = [point]\n min_index = point\n one_point_eyes = set()\n while stack_points:\n current_point = stack_points.pop()\n neighbors = self._neighbors(current_point)\n for n in neighbors :\n if n not in block_points:\n if self.get_color(n) == BORDER:\n continue\n if self.get_color(n) == color:\n stack_points.append(n)\n block_points.append(n)\n if n < min_index:\n min_index = n\n if n in E:\n one_point_eyes.add(n)\n for p in block_points:\n anchor_dic[p] = min_index\n S_eyes[min_index] = one_point_eyes\n for e in one_point_eyes:\n assert e in E\n E[e].add(min_index)\n S[min_index] = block_points\n return S, E, S_eyes",
"def find_min_energy_region(mfcc, fs, hop, frame_size=32, randomize=False, rand_frame=5):\n\n # windowed [r x m x f]\n x_win = np.squeeze(view_as_windows(mfcc[36, :], frame_size, step=1))\n\n # best onset position\n bon_pos = np.argmin(np.sum(x_win, axis=1))\n\n # randomize a bit\n if randomize:\n bon_pos += np.random.randint(-rand_frame, rand_frame)\n if bon_pos > x_win.shape[0]-1:\n bon_pos = x_win.shape[0]-1\n elif bon_pos < 0:\n bon_pos = 0\n\n return frames_to_time(bon_pos, fs, hop), bon_pos",
"def nearest_bee(self, beehive):\n # BEGIN Problem 3 and 4\n loc = self.place\n distance = 0\n\n while loc.name != 'Hive':\n if random_or_none(loc.bees) == None:\n loc = loc.entrance\n distance +=1\n else:\n if distance >= self.min_range and distance <= self.max_range:\n return random_or_none(loc.bees)\n else:\n loc = loc.entrance\n distance += 1\n return None\n # END Problem 3 and 4",
"def find_sandwich_top_below(blk):\n if blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return blk\n # Always follow the main branch of a flow: the last connection.\n _blk = blk.connections[len(blk.connections) - 1]\n while _blk is not None:\n if _blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return _blk\n _blk = _blk.connections[len(_blk.connections) - 1]\n return None",
"def ClosestPrecedingFinger(self, id):\r\n for i in range(M_BITS, 0, -1):\r\n if self.IsInRange(self.fingerTable[i].Node.HashValue, self.nodeInfo.HashValue, False, id, False):\r\n return self.fingerTable[i].Node\r\n return self.nodeInfo",
"def selectBestDefenisblePosition(self):\n minDistance=min(self.defendCoordinates[x] for x in self.defendCoordinates.keys())\n bestDefensivePosition = filter(lambda x: self.defendCoordinates[x] == minDistance, self.defendCoordinates.keys())\n #print (\"Best Defensive Position\",bestDefensivePosition)\n return random.choice(bestDefensivePosition)",
"def MyDBSCAN(D, eps, MinPts):\n\n # This list will hold the final cluster assignment for each point in D.\n # There are two reserved values:\n # -1 - Indicates a noise point\n # 0 - Means the point hasn't been considered yet.\n # Initially all labels are 0.\n labels = [0] * len(D)\n\n # C is the ID of the current cluster.\n C = 0\n\n # This outer loop is just responsible for picking new seed points--a point\n # from which to grow a new cluster.\n # Once a valid seed point is found, a new cluster is created, and the\n # cluster growth is all handled by the 'expandCluster' routine.\n\n # For each point P in the Dataset D...\n # ('P' is the index of the datapoint, rather than the datapoint itself.)\n for P in range(0, len(D)):\n\n # Only points that have not already been claimed can be picked as new\n # seed points.\n # If the point's label is not 0, continue to the next point.\n if not (labels[P] == 0):\n continue\n\n # Find all of P's neighboring points.\n NeighborPts = regionQuery(D, P, eps)\n\n # If the number is below MinPts, this point is noise.\n # This is the only condition under which a point is labeled\n # NOISE--when it's not a valid seed point. A NOISE point may later\n # be picked up by another cluster as a boundary point (this is the only\n # condition under which a cluster label can change--from NOISE to\n # something else).\n if len(NeighborPts) < MinPts:\n labels[P] = -1\n # Otherwise, if there are at least MinPts nearby, use this point as the\n # seed for a new cluster.\n else:\n C += 1\n growCluster(D, labels, P, NeighborPts, C, eps, MinPts)\n\n # All data has been clustered!\n return labels",
"def _block_of(self, stone):\n marker = np.full(self.maxpoint, False, dtype = bool)\n pointstack = [stone]\n color = self.get_color(stone)\n assert is_black_white(color)\n marker[stone] = True\n while pointstack:\n p = pointstack.pop()\n neighbors = self.neighbors_of_color(p, color)\n for nb in neighbors:\n if not marker[nb]:\n marker[nb] = True\n pointstack.append(nb)\n return marker",
"def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Maximize a given seed within the current set of constraints. The Boolean direction parameter specifies up (True) or down (False)
|
def maximize_seed(self, seed, direction):
while True:
comp = self.complement(seed)
x = self.solver.new_var() + 1
if direction:
# search for a solution w/ all of the current seed plus at
# least one from the current complement.
self.solver.add_clause([-x] + [i + 1 for i in comp]) # temporary clause
# activate the temporary clause and all seed clauses
havenew = self.solver.solve([x] + [i + 1 for i in seed])
else:
# search for a solution w/ none of current complement and at
# least one from the current seed removed.
self.solver.add_clause([-x] + [-(i + 1) for i in seed]) # temporary clause
# activate the temporary clause and deactivate complement clauses
havenew = self.solver.solve([x] + [-(i + 1) for i in comp])
self.solver.add_clause([-x]) # remove the temporary clause
if havenew:
seed = self.get_seed()
else:
return seed
|
[
"def minimax_agent(board, d, model):\n max = -9999998\n for move in list(board.legal_moves):\n board.push(chess.Move.from_uci(str(move)))\n value_i = -negaMax(board, d - 1, model)\n board.pop()\n if value_i > max:\n max = value_i\n best_move = move\n print('###############\\nAI moves:\\n', board.lan(best_move))\n return max, chess.Move.from_uci(str(best_move))",
"def maximize_init(self):\n repeat = True\n while repeat:\n repeat = False\n\n for n in self.neighbors:\n b = len(set(self.G.neighbors(n)) & self.state_set)\n self.tau[n] = b\n # If we can add n to state set\n if not b:\n # add it and repeat from the begining\n self.add({n})\n repeat = True\n break",
"def maximize(self, problem, state, current_depth):\n \n \"*** YOUR CODE GOES HERE ***\"\n\n ##make sure the current player is a maximize player\n if problem.get_maximizing_player() == 0:\n \n ##if search to terminal node or to the end of search depth, stop search \n if problem.terminal_test(state) or current_depth == self.depth:\n \n ##make the value and action to be pair and return\n return (self.evaluation(problem, state),'no action')\n\n else:\n ##if the current state is not the terminal state, make value is -infinity\n max_utility = - float(\"inf\")\n \n ##evaluate the next depth based on current state\n for nextmove in problem.get_successors(state):\n\n ##get the value in the following search result from minimize method based on 'move' state\n value = self.minimize(problem, nextmove[0], (current_depth + 1))\n \n ##compare with value and max_utility, then choose the big one\n if value > max_utility:\n max_utility = value\n max_action = nextmove[-2]\n \n ##make max_action and max_utility to become a pair and return\n \n return (max_utility, max_action)",
"def toMaximize(self):\n self.minimax = minimaxType['maximize']",
"def maximizing(state):\n # type: (State) -> bool\n return state.whose_turn() == 1",
"def _maximize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass",
"def simulationBasedMinimization(self):\n\n # 1. Compute a bottom-up ordering of all states\n (bottomUpOrder,reverseBottomUpOrder) = self.getBottomUpAndReverseBottomUpOrder()\n\n # 2. Compute simulation relation\n simulationRelation = self.computeSimulationRelation(bottomUpOrder,reverseBottomUpOrder)\n\n # 3. Print Simulation relation\n # print(\"Simulation:\")\n # for orderPosA in range(0,len(bottomUpOrder)):\n # for orderPosB in range(0,len(bottomUpOrder)):\n # if (simulationRelation[orderPosA][orderPosB]):\n # print(\"States \"+str(bottomUpOrder[orderPosA])+\" \"+str(bottomUpOrder[orderPosB])+\" sim\\n\")\n\n # 4. Minimize using bisimulation\n # -> prepare state mapper\n stateMapper = {}\n for j in range(0,len(bottomUpOrder)):\n found = False\n for i in range(0,j):\n if simulationRelation[i][j] and simulationRelation[j][i]:\n if not found:\n stateMapper[bottomUpOrder[j]] = bottomUpOrder[i]\n found = True\n if not found:\n stateMapper[bottomUpOrder[j]] = bottomUpOrder[j]\n\n # Renumber according to state wrapper\n for i in range(0,len(bottomUpOrder)):\n newTransitions = []\n for (a,b) in self.transitions[i]:\n newTransitions.append((stateMapper[a],b))\n self.transitions[i] = newTransitions\n self.initialStates = [stateMapper[a] for a in self.initialStates]",
"def solve(self):\n\n self.remove_impossible_targets()\n random.shuffle(self.targets)\n best_move = list(self.targets)\n best_perf = self.compute_performance()\n for i in range(settings.MAX_RANDOM_PLANNER_ITERATION):\n random.shuffle(self.state)\n perf = self.compute_performance()\n if perf < best_perf:\n best_move = list(self.state)\n\n self.state = best_move",
"def collapse(self, direction=\"top-left\"):\n parts = direction.split(\"-\")\n # top and bottom are exclusive\n if \"top\" in parts:\n self.max_row_p = self.min_row_p\n self.max_row = self.min_row\n elif \"bottom\" in parts:\n self.min_row_p = self.max_row_p\n self.min_row = self.max_row\n # left and right are exclusive\n if \"left\" in parts:\n self.max_col_p = self.min_col_p\n self.max_col = self.min_col\n elif \"right\" in parts:\n self.min_col_p = self.max_col_p\n self.min_col = self.max_col",
"def _move_satisfy_random_constraint(self):\n secure_random = random.SystemRandom()\n done = False\n while not done:\n c = secure_random.choice(self.constraints)\n if self._is_constraint_violated(c):\n done = True\n # swap 2 wizards to move closer\n self._swap_wizards(c[random.randint(0, 1)], c[2])\n # with probability 0.5, swap the two border wizards\n if random.randint(0, 1) == 1:\n self._swap_wizards(c[0], c[1])\n if not done: print(\"Nothing to do...\")",
"def optimize(self, epsilon = 0.001, maxiters = 250, verbose = 1):\n\t\tif verbose != 0: verbose = 1\n\t\tif self._optimized: \n\t\t\tprint(\"Optimal alignment already determined.\")\n\t\t\treturn\n\t\tnm=2*self.hdr['nimg']\n\t\tguess=[np.random.randint(self._min,self._max) for i in range(nm)]\n\t\tsm=Simplex(self._compares,guess,[5]*nm,data=self)\n\t\tmn=sm.minimize(epsilon = epsilon, maxiters = maxiters, monitor = verbose) \n\t\tprint(\"\\n\\nBest Parameters: {}\\nError: {}\\nIterations: {}\\n\".format(mn[0],mn[1],mn[2]))\n\t\tself._optimized = True",
"def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)",
"def setMaintainUpDirection(self, flag): \n\t\tLogging.info(\"Will maintain up direction: %s\" % (flag != 0), kw = \"animator\")\n\t\tself.maintainUpDirection = flag",
"def optimize_library_descent(self, target, direction='max', steps=100,\n multiprocessing=False, ret_info=False,\n args=None):\n # get the target function to call\n target_function = getattr(self, target)\n if args is not None:\n target_function = functools.partial(target_function, **args)\n\n # initialize the optimizer\n value = target_function()\n value_best, state_best = value, self.sens_mat.copy()\n \n if ret_info:\n # store extra information\n start_time = time.time()\n info = {'values': {}}\n values_count = self.parameters['optimizer_values_count']\n values_step = max(1, steps // values_count)\n \n if multiprocessing:\n # run the calculations in multiple processes\n pool_size = self.get_number_of_cores()\n pool = mp.Pool(processes=pool_size)\n if ret_info:\n values_step = max(1, values_step // pool_size)\n \n # iterate for given number of steps\n for step in range(int(steps) // pool_size):\n joblist = []\n init_arguments = self.init_arguments\n for _ in range(pool_size):\n # modify the current state and add it to the job list\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n params = init_arguments['parameters'] \n params['sensitivity_matrix'] = self.sens_mat\n params['initialize_state']['sensitivity'] = 'exact'\n \n joblist.append((copy.deepcopy(init_arguments), target))\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # run all the jobs\n results = pool.map(_run_job, joblist)\n \n # find the best result \n if direction == 'max':\n res_best = np.argmax(results)\n if results[res_best] > value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n elif direction == 'min':\n res_best = np.argmin(results)\n if results[res_best] < value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n else:\n raise ValueError('Unsupported direction `%s`' % direction)\n \n if ret_info and step % values_step == 0:\n info['values'][step * pool_size] = results[res_best]\n \n else:\n # run the calculations in this process\n for step in range(int(steps)):\n # modify the current state\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # get the value of the new state\n value = target_function()\n \n improved = ((direction == 'max' and value > value_best) or\n (direction == 'min' and value < value_best))\n if improved:\n # save the state as the new best value\n value_best, state_best = value, self.sens_mat.copy()\n else:\n # undo last change\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n if ret_info and step % values_step == 0:\n info['values'][step] = value_best\n\n # sort the best state and store it in the current object\n state_best = self.sort_sensitivity_matrix(state_best)\n self.sens_mat = state_best.copy()\n\n if ret_info:\n info['total_time'] = time.time() - start_time \n info['states_considered'] = steps\n info['performance'] = steps / info['total_time']\n return value_best, state_best, info\n else:\n return value_best, state_best",
"def optimize_library_anneal(self, target, direction='max', steps=100,\n ret_info=False, args=None):\n # lazy import\n from .optimizer import ReceptorOptimizerAnnealer # @UnresolvedImport\n \n # prepare the class that manages the simulated annealing\n annealer = ReceptorOptimizerAnnealer(self, target, direction, args,\n ret_info=ret_info)\n annealer.steps = int(steps)\n annealer.Tmax = self.parameters['anneal_Tmax']\n annealer.Tmin = self.parameters['anneal_Tmin']\n if self.parameters['verbosity'] == 0:\n annealer.updates = 0\n\n # do the optimization\n MI, state = annealer.optimize()\n\n # sort the best state and store it in the current object\n state = self.sort_sensitivity_matrix(state)\n self.sens_mat = state.copy()\n \n if ret_info:\n return MI, state, annealer.info\n else:\n return MI, state",
"def gen_heuristic(state, problem):\n # Enter your code here and remove the pass statement below\n sammy, medal = state\n if medal:\n return max(manhattan_distance_cost(sammy, medl, problem) for medl in medal)\n else:\n return 0",
"def default_fitness(maximise):\n if maximise:\n return -100000.0\n else:\n return 100000.0",
"def greedy_MAP_assignment(theta,random_runs = 10,heur = 'first'):\r\n N = theta.shape[0]\r\n scipy.random.seed()\r\n max_p = -scipy.inf\r\n for k in range(random_runs):\r\n A = scipy.random.randint(2,size = N)\r\n improved = True\r\n p = A.dot( theta.dot(A) )\r\n while improved:\r\n improved = False\r\n if heur == 'first':\r\n p2 = -scipy.inf\r\n perm = scipy.random.permutation(N)\r\n for s in perm:\r\n #dp: change in p if A[i] bit is reversed\r\n dp = (1-2*A[s])*( A.dot(theta[s,:]+ theta[:,s]) ) + theta[s,s]\r\n if dp>0:\r\n p2 = dp\r\n break\r\n\r\n if heur == 'best':\r\n dp = (1-2*A)*( A.dot(theta + theta.T) ) + scipy.diag(theta)\r\n p2,s = dp.max(), dp.argmax()\r\n if p2 > 0:\r\n A[s] = 1-A[s]\r\n improved = True\r\n p += p2\r\n if p>max_p:\r\n greedy_A,max_p = A.copy(),p\r\n return greedy_A.astype(int),max_p",
"def greedyOptimize(self, cpoints):\n # the currently best known energy is the current energy\n best_energy = self.totalEnergy(cpoints.values())\n best_before = best_energy\n cpoints_ = cpoints.copy()\n # iterate over each control point in order to find the movement\n # that improves it i.e. the snakes overall energy best\n cv = cpoints_.values()\n for i in range(len(cpoints_)):\n best_step = None \n # test all possible steps\n for step in self.step_directions:\n c1 = cpoints_[i]\n # only check a step if it ends within the image bounds\n if self.inImageBound(cpoints_[i] + step):\n # apply the step to the control point\n cpoints_[i] = cpoints_[i] + step\n # compute the new energy\n new = self.totalEnergy(cpoints_.values())\n # check wether it is a true improvement\n if new < best_energy:\n assert new < best_energy\n # update the currently best known energy\n best_energy = new\n best_step = step\n cv = cpoints_.values()\n cpoints_[i] = cpoints_[i] - step\n assert (c1[0], c1[1]) == (cpoints_[i][0], cpoints_[i][1])\n \n # apply the best step to the control point\n if best_step != None:\n cpoints_[i] = cpoints_[i] + best_step\n \n # ensure saneness\n assert np.array_equal(cv, cpoints_.values())\n self.bestenergy_debug = best_energy\n assert best_before >= best_energy, '(%s !>= %s) the optimized energy is not euqal-smaller than the energy before' % (best_before, best_energy)\n assert self.totalEnergy(cpoints_.values()) == best_energy, '(%s != %s) the new calculated energy does not equal the best calculated energy' % (self.totalEnergy(cpoints_.values()), best_energy)\n return cpoints_"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the complement of a given set w.r.t. the set of mapped constraints.
|
def complement(self, aset):
return self.all_n.difference(aset)
|
[
"def get_complement(seta):\n complement_set = set()\n\n for elem in seta:\n new_elem_tuple = (elem[0], 1 - elem[1])\n complement_set.add(new_elem_tuple)\n\n return complement_set",
"def complement(self) -> 'RangeSet':\n return RangeSet(Range()) - self",
"def get_complement(seta):\n\n complement_set = set()\n\n for elem in seta:\n new_elem_tuple = (elem[0], float(D('1.0') - D(str(elem[1]))))\n complement_set.add(new_elem_tuple)\n\n return complement_set",
"def _complement(self, other):\n if isinstance(other, AbstractSet):\n vars1 = self.variables\n vars2 = other.variables\n if not type_match(vars1, vars2):\n return other\n\n vars12 = rename_variables_in(vars1, free_symbols(self) | free_symbols(other))\n expr12 = And(other.expr.xreplace(dict(zip(vars2, vars12))),\n Not(self.expr.xreplace(dict(zip(vars1, vars12)))))\n return AbstractSet(vars12, expr12)",
"def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)",
"def complement(self):\n n = self.n\n ri_staircase = RootIdeals().init_staircase(n)\n ri_complement_set = set(ri_staircase) - set(self)\n ri_complement = RootIdeal(ri_complement_set, n)\n return ri_complement",
"def complement(self):\n N = self._size + 1\n new_covers = [[N - i[0], N - i[1]] for i in self._poset.cover_relations_iterator()]\n return TamariIntervalPoset(N - 1, new_covers)",
"def complement(self) -> 'FuzzySet':\n\n return FuzzySet(self.logic.complement(self.membership_function),\n self.logic)",
"def complement(COMPLEMENT, set,segment_size=None):\n from Redist.pyfuzzy.set.Polygon import Polygon\n ret = Polygon()\n\n prev_x,prev_y = None,None\n for x,y in _complement_generator(COMPLEMENT,set):\n if (segment_size is not None) and (prev_x is not None) and (abs(y-prev_y)>0.01):\n diff = x-prev_x\n if diff > 2.*segment_size:\n n = int(diff/segment_size)\n dx = diff/n\n for i in range(1,n):\n x_ = prev_x+i*dx\n ret.add(x_,COMPLEMENT(set(x_)))\n ret.add(x,y)\n prev_x,prev_y = x,y\n\n return ret",
"def negated_constraints(self):\n ms = self.members\n result = BoundTuple()\n for m in ms:\n mc = m.negated_constraints()\n if mc: result = result + mc\n return result",
"def negated_constraints(self):\n ms = self.members\n result = ms.negated_constraints()\n for m in ms[1:]:\n if not result: return result\n mc = m.negated_constraints()\n if not mc: return mc\n result = result & mc\n return result",
"def test_constraints_eliminate_all_solutions(self):\n b = be('(A xor B) and C')\n with b.constrain(A=True, B=True):\n res = list(b.sat_all())\n self.assertEqual(0, len(res))",
"def _unused(key_set, tup_set):\n if not tup_set:\n return key_set\n return set((key for key in key_set if not key in zip(*tup_set)[0]))",
"def notList(set):\n returnSet = []\n if len(set) == 9: #assume booleans\n for bit in set:\n returnSet.append(not bit)\n return returnSet\n else:\n for x in range(9):\n if x not in set:\n returnSet.append(x)\n return returnSet",
"def complement(l, universe=None):\n\tif universe is not None:\n\t\tuniverse = set(universe)\n\telse:\n\t\tuniverse = set(range(min(l), max(l)+1))\n\treturn sorted(universe - set(l))",
"def complement_map(cls):\n comp_map = {\n 'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',\n 'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',\n 'H': 'D', 'V': 'B', 'N': 'N', 'a': 'u', 'u': 'a', 'g': 'c',\n 'c': 'g', 'y': 'r', 'r': 'y', 's': 's', 'w': 'w', 'k': 'm',\n 'm': 'k', 'b': 'v', 'd': 'h', 'h': 'd', 'v': 'b', 'n': 'n'\n }\n\n comp_map.update({c: c for c in cls.gap_alphabet()})\n return comp_map",
"def complement_map(cls):\n comp_map = {\n 'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',\n 'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',\n 'H': 'D', 'V': 'B', 'N': 'N', 'a': 't', 't': 'a', 'g': 'c',\n 'c': 'g', 'y': 'r', 'r': 'y', 's': 's', 'w': 'w', 'k': 'm',\n 'm': 'k', 'b': 'v', 'd': 'h', 'h': 'd', 'v': 'b', 'n': 'n'\n }\n\n comp_map.update({c: c for c in cls.gap_alphabet()})\n return comp_map",
"def minimal_do(G: CausalDiagram, Y: str, Xs: AbstractSet[str]) -> FrozenSet[str]:\n return frozenset(Xs & G.do(Xs).An(Y))",
"def get_complemented(self, *args):\n def getcomp(begin, end=self.get_num() - 1):\n return CPX_PROC.getindconstr_constant(\n self._env._e, self._cplex._lp, begin, end)[2]\n return apply_freeform_two_args(getcomp, self._conv, args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a given clause to the Map solver.
|
def add_clause(self, clause):
self.solver.add_clause(clause)
if self.dump is not None:
self.dump.write(" ".join(map(str, clause)) + " 0\n")
|
[
"def add_clause(self, clause):\n return self._compile(clause)",
"def add_clause(self, clause):\n self.abstract_clauses.append(clause)",
"def add_clause(self, clause):\n # TODO: Do some simplifications, and check whether clause contains p\n # and -p at the same time.\n\n if not isinstance(clause, Clause):\n clause = Clause(clause, learned=False)\n\n if len(clause) == 0:\n # Clause is guaranteed to be false under the current variable\n # assignments.\n self.status = False\n elif len(clause) == 1:\n # Unit facts are enqueued.\n self.enqueue(clause[0])\n else:\n p, q = clause[:2]\n self.watches[-p].append(clause)\n self.watches[-q].append(clause)\n\n self.clauses.append(clause)",
"def tell (self, clause):\n self.clauses.add(clause)",
"def add_clause(self, clause, soft=False):\n\n # first, map external literals to internal literals\n # introduce new variables if necessary\n cl = list(map(lambda l: self._map_extlit(l), clause))\n\n if not soft:\n # the clause is hard, and so we simply add it to the SAT oracle\n self.oracle.add_clause(cl)\n else:\n self.soft.append(cl)\n\n # soft clauses should be augmented with a selector\n sel = cl[0]\n if len(cl) > 1 or cl[0] < 0:\n self.topv += 1\n sel = self.topv\n\n self.oracle.add_clause(cl + [-sel])\n\n self.sels.append(sel)",
"def add_clause(self, *vars):\n\n self._clauses.append(set(vars))",
"def AddUseClause(self, use_clause):\n assert _IsValidUseClause(use_clause), use_clause\n self.use_clauses.append(use_clause)",
"def add_match_clause_restriction(self, sqlvar, expression, clause=None):\n self.get_match_clause_restrictions(clause=clause).add((sqlvar, expression))",
"def add_where_clause(self, clause):\r\n if not isinstance(clause, WhereClause):\r\n raise StatementException(\"only instances of WhereClause can be added to statements\")\r\n clause.set_context_id(self.context_counter)\r\n self.context_counter += clause.get_context_size()\r\n self.where_clauses.append(clause)",
"def add_statement(rq_dict, statement, result_data_contents=\"graph\"):\n rq_dict[\"statements\"].append({\"statement\": statement})\n rq_dict[\"statements\"][-1][\"resultDataContents\"] = [result_data_contents]",
"def add(self, stmt, params=()):\n self._query_buffer[stmt] = params",
"def add_facet(self, name, q):\r\n \r\n self.queries.append((name, q))\r\n self.map = None",
"def parse_and_add_clause(self, line):\n clause = list()\n for literal in line.split():\n negated = 1 if literal.startswith('!') else 0\n variable = literal[negated:]\n if variable not in self.variable_table:\n self.variable_table[variable] = len(self.variables)\n self.variables.append(variable)\n encoded_literal = self.variable_table[variable] << 1 | negated\n clause.append(encoded_literal)\n self.clauses.append(tuple(clause))",
"def add(self, condition):\n\t\tself.conditions.append(condition)",
"def set_match_clause(self, clause):\n self.match_clause = clause",
"def add_change_clause(sv, nod, tree, vlu):\r\n clau=((Change, tree, None), vlu) \r\n if not clau in nod.clauses: nod.clauses+=[clau] # avoid duplicates\r",
"def add(condition):",
"def add_match_clause_join(self, var1, var2, clause=None):\n self.get_match_clause_joins(clause=clause).add((var1, var2))",
"def add_phrase(self, phrase: Phrase) -> None:\n self.phrase_string_map[phrase.phrase_string] = phrase\n self.phrase_type[phrase.phrase_string].add(\"phrase\")\n self.phrase_index[phrase.phrase_string] = phrase\n self.phrase_length_index[len(phrase.phrase_string)].add(phrase.phrase_string)\n self._index_phrase_words(phrase)\n self._index_phrase_tokens(phrase)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sends a POST request containing `data` to url. `auth` should be a tuple containing (username, password).
|
def post(url, data, auth=None, retries=10):
if not url.startswith('http://'):
url = 'http://' + url
request = urllib2.Request(url)
if auth:
request.add_header('Authorization', 'Basic %s' % b64encode('%s:%s' % auth))
params = urllib.urlencode(data)
response = urllib2.urlopen(request, params)
return response.read()
|
[
"async def post(self, url, data, header, auth: Union[BasicAuth, Dict], timeout: int) \\\n -> Union[ClientBackendResponse, Dict]:\n if isinstance(auth, Dict):\n login = auth.get(\"username\", \"\")\n password = auth.get(\"password\", \"\")\n auth_method = BasicAuth(login, password)\n else:\n auth_method = auth\n\n return await self.request_http(\n method=\"post\",\n url=url,\n data=data,\n headers=header,\n auth=auth_method,\n timeout=ClientTimeout(total=timeout),\n )",
"def post(self, data):\n req = self._new_http_request()\n req.add_data(self._prepare_data(data))\n\n return self._urllib2.urlopen(req)",
"def post(self, url, data=None, **kwargs):\n return self.oauth_request(url, 'post', data=data, **kwargs)",
"def execute_post_request_as_authorized_user(url, data=None, json_data=None):\n response = requests.post(url, json=json_data, data=data, headers=get_authorization_headers())\n\n return {\"text\": response.text, \"code\": response.status_code}",
"def post(url, headers=None, data=None, io_loop=None):\n\n # TODO: Add authentication parameters\n return AsyncHTTPClient(io_loop=io_loop).fetch(\n url,\n method='POST',\n body=urlencode(data),\n headers=headers)",
"def _submit(self, endpoint, data):\n full_url = self._prepare_url(endpoint)\n req = self._request(full_url, self._username, self._apikey)\n req.post(data)",
"def sign_in_authdata(self, authdata):\n body = self.SIGNIN_AUTHDATA_BODY % base64.encodestring(authdata)\n response = requests.post(self.signin_url, data=body)\n return self._process_sign_in_result(response)",
"def post(self, url, headers=None, data=None):\n pass",
"def make_post_request(client, endpoint, data):\n return client.post(endpoint, data=data)",
"def post_req(self, endpoint : str, data : dict):\r\n return self.session.post('https://127.0.0.1:' + self.app_port + endpoint, data = data, verify = False)",
"def send_outbound_request(self, auth, data, method=RequestType.POST):\n\n url = reverse(\"outbound_sms\")\n return self._send_request(url, method, auth, data)",
"async def post_req(self, endpoint : str, data : dict):\r\n return self.session.post('https://127.0.0.1:' + self.app_port + endpoint, data = data, verify = False)",
"def post_action(self, path, data=None):\n response = self._request(\"POST\", path, urllib.urlencode(data) if data else None)\n return self._handle_response(response)",
"async def send(self, url, data, header, auth: Union[BasicAuth, Dict], timeout: int):\n raise NotImplementedError",
"def token_request(post_data, auth_config, conformance):\n auth = None\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n\n uris = fhir.get_oauth_uris(conformance)\n\n response = requests.post(uris['token'],\n data=post_data,\n allow_redirects=False,\n auth=auth,\n timeout=5)\n\n return response",
"async def _post(self, data: dict = {}) -> dict:\n async with httpx.AsyncClient(timeout=10 * 60.0) as client:\n response = await client.post(self.url, params=self._params, data=data)\n logger.debug(\n \"made POST to {url} with parameters {params} and payload\\n{payload}\",\n url=self.url,\n params=self._params,\n payload=data,\n )\n return response.json()",
"def post(self, command, data={}):\n\t\treturn self.__open(\"%s%s\" % (self.api, command), headers=self.head, data=data)",
"def do_request(self, data):\n url = '%s:%s%s' % (\n self.server_address,\n self.server_port,\n self.login_route\n )\n\n # get an httprequest with the headers and such using the provided data\n r = UrlAuthenticator.create_request(url, data)\n resp = None\n\n # hit the url and hopefully get a good response\n try:\n with urllib.request.urlopen(r) as f:\n resp = f.read()\n except urllib.error.HTTPError as e:\n # user was not authorized most likely\n return None\n\n return resp",
"def _post(self, url, data=None):\n if self.ajax_post:\n response = self.client.post(\n url, data, HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\")\n else:\n response = self.client.post(url, data)\n return response"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Collect characters while within a source record
|
def characters(self, content):
if self.in_source: self.chars += content
|
[
"def characters(self, data):\n pass",
"def extractCharacters(self):\n \n length, high=self.getSize() ##geting size of LineFrame object - high and length\n vHisto = self.vLinesHistogram()\n spaceLength = findSpaceLength(vHisto,high) ##finding of expected length of Space in line\n position = 0 ##position, from where findChar is serching for character\n Line=[] ##list of words in line\n Word=[] ##list of characters in word\n correction=0\n End = False\n while not End: ##while not End of the line, search for characters\n position, char, correction = self.findChar(position, spaceLength+correction)\n if type(char) == str: #check if returned CharFrame object or repor\n if char == \"Space\": #Space was finded in line, end of word, Word list append to Line list, and new Word list started\n Line.append(Word)\n Word=[]\n elif char == \"Enter\": ##Finden end of line, Wor list closed and appened to Line list, end of method, returned Line list\n Line.append(Word)\n #for i in range(0,len(Line)):\n #for j in range(0, len(Line[i])):\n #Line[i][j].savePicture(str(i)+\"kafel\"+str(j)+\".bmp\",\"BMP\")\n return Line\n else: ## Character finden in line, append CharFrame object to Word list\n Word.append(char)",
"def raw(text):",
"def raw(self, txt_unRaw):",
"def parseC(self, field, data):\r\n return str(data.rstrip(b'\\0 '), self.encoding, errors='replace')",
"def extract_text(self, data):",
"def extract_cle_publc(self):",
"def redact(data, chars):\n result = \"\"\n for d in data:\n if d in chars:\n result += \"x\"\n else:\n result += d\n return result",
"def _get_chars(to_encode):\n return list(to_encode)",
"def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()",
"def read_until(self, chars):\n\n start_index = self.index\n\n while self.index < self.length and self.xtext[self.index] not in chars:\n self.index += 1\n\n assert self.index < self.length\n\n return self.xtext[start_index:self.index]",
"def generate_lines(self):\n buf = self.original_file.read(self.buffer_size)\n line = ''\n while buf:\n for char in buf:\n if line.endswith('\\r') and char == '\\n':\n last_line = line\n line = ''\n yield self.sanitize(last_line)\n elif line.endswith('\\r') or line.endswith('\\n'):\n last_line = line\n line = char\n yield self.sanitize(last_line)\n else:\n line += str(char) if isinstance(char, int) else char\n buf = self.original_file.read(self.buffer_size)\n if not buf and line:\n yield self.sanitize(line)",
"def process_NMEA(self, incoming_data):\n #complete_sentence = \"\"\n data = chr(incoming_data) \n return data",
"def filter_record(self, record):\n if len(record) >= self.max_length:\n return record[:self.max_length]\n else:\n return record",
"def _grab_unascii(self):\r\n unascii = \"\"\r\n while self._char != -1 and not self._char in \"\\x00\\t\\r\\n\":\r\n unascii += self._char\r\n self._get_char()\r\n return unascii",
"def fetch_line(self) -> None:\r\n word = self.reader.copy_to_end_of_line().strip()\r\n draft = word\r\n self.in_concat_mode = word.endswith(b\"+\")\r\n match = self.rus_end_of_line_pattern.match(draft)\r\n if match is not None:\r\n last_rus_letter = match.end(1)\r\n else:\r\n last_rus_letter = -1\r\n plus = draft.rfind(b\"+\")\r\n quote = draft.rfind(b\"'\")\r\n # позиция последнего читаемого символа\r\n end_of_line = max((last_rus_letter, plus, quote))\r\n # расстояние, на которое надо переместить указатель\r\n distance = max(plus + 1, last_rus_letter - 1, quote)\r\n # выделяем разбираемый кусок текста из прочитанной строки\r\n # чтобы избежать попадания в неё скобок\r\n # удаляем плюс в конце, если он есть\r\n draft = draft[:end_of_line].strip()\r\n # перекодируем буферную строку, bytes => utf-8\r\n # если есть закодированные символы, обрабатываем их\r\n # иначе просто переводим байтовую строку в юникод\r\n # print(draft)\r\n if self.check_encoded_russian_letters(draft):\r\n # draft = self.decode_russian_letters(draft)\r\n draft = self.decode_russian_letters_regex(draft)\r\n else:\r\n # удаляем из строки лишние кавычки перед преобразованием в юникод\r\n draft = draft.replace(b\"'\", b\"\").decode(\"utf-8\")\r\n # добавляем строку в буфер для сборки токена\r\n self.concat_strings_buffer.append(draft)\r\n # если в конце стоял '+', ищем следующий токен\r\n # иначе склеиваем новый токен из строк в буфере\r\n if self.in_concat_mode:\r\n self.assignment = False\r\n self.reader.forward(distance)\r\n self.fetch_next_token()\r\n else:\r\n self.current_token = QuotedStringToken(self.mark, \"\".join(self.concat_strings_buffer))\r\n self.concat_strings_buffer = []\r\n self.reader.forward(distance)",
"def _read_char(self):\n if self.read_pos >= len(self.data):\n self.char = \"\"\n else:\n self.char = self.data[self.read_pos]\n\n self.pos = self.read_pos\n self.read_pos += 1",
"def _readChars (self, num, asByteString = False):\n chars = self.file.read (num)\n if asByteString:\n return chars\n chars = re.sub(r'[^ -~]', '', chars.decode ('utf-8'))\n return chars.rstrip ()",
"def preprocess_data(self, dataSrc):\n\t\twith io.open(dataSrc, encoding=\"utf-8\") as f:\n\t\t\traw_data = f.readlines()\n\t\t\tfor x in range(0, len(raw_data)):\n\t\t\t\tfor ch in self.charsToBeRemoved:\n\t\t\t\t\tif (ch in raw_data[x]):\n\t\t\t\t\t\traw_data[x] = raw_data[x].replace(ch, \"\")\n\t\t\t\tself.preprocessedData.append(raw_data[x].lower())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create connection line constraint between item's handle and the port.
|
def constraint(self, item, handle, glue_item):
start = MatrixProjection(self.start, glue_item.matrix_i2c)
end = MatrixProjection(self.end, glue_item.matrix_i2c)
point = MatrixProjection(handle.pos, item.matrix_i2c)
cx = EqualsConstraint(point.x, start.x)
cy = BetweenConstraint(point.y, start.y, end.y)
return MultiConstraint(start, end, point, cx, cy)
|
[
"def test_line_constraint(self):\n item = Item()\n pos = Variable(1), Variable(2)\n line = (Variable(3), Variable(4)), (Variable(5), Variable(6))\n item.constraint(line=(pos, line))\n self.assertEquals(1, len(item._constraints))\n\n c = item._constraints[0]\n self.assertTrue(isinstance(c, LineConstraint))\n self.assertEquals((1, 2), c._point)\n self.assertEquals(((3, 4), (5, 6)), c._line)",
"def create_fixed_distance_to_line_constraint():\n return FixedDistanceToLineConstraint()",
"def test_item_and_port_glue(self):\n\n ports = self.box1.ports()\n\n # glue to port nw-ne\n sink = self.tool.glue(self.line, self.head, (120, 50))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[0], sink.port)\n\n # glue to port ne-se\n sink = self.tool.glue(self.line, self.head, (140, 70))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[1], sink.port)\n\n # glue to port se-sw\n sink = self.tool.glue(self.line, self.head, (120, 90))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[2], sink.port)\n\n # glue to port sw-nw\n sink = self.tool.glue(self.line, self.head, (100, 70))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[3], sink.port)",
"def add_constraint(self, constraint):",
"def ioLineDrag(self, startItem, pos0, pos1, done=False):\n assert isinstance(startItem, PortItem)\n assert isinstance(pos0, QPointF)\n assert isinstance(pos1, QPointF)\n assert isinstance(done, bool)\n\n if self._draggedLineItem is None:\n self._draggedLineItem = DraggedLineItem(pos0, pos1)\n self.addItem(self._draggedLineItem)\n else:\n self._draggedLineItem.setEndpoint(pos1)\n\n vaildItem = None\n\n if QLineF(pos0, pos1).length() > 5.0:\n # Check if line is over other PortItem\n for item in self.items(pos1):\n if isinstance(item, PortItem):\n vaildItem = item\n print item.name()\n break\n\n self._draggedLineItem.showEndpoint(vaildItem is not None)\n\n if done:\n self.removeItem(self._draggedLineItem)\n self._draggedLineItem = None\n\n if vaildItem is not None:\n # Request connection creation\n name1 = startItem.fullname()\n name2 = vaildItem.fullname()\n self.sigCreateConnection.emit(name1, name2)",
"def net_acl_iptables_rule(item):\n # defaults\n fmt = {\n 'chain': '-A INPUT',\n 'device': '',\n 'protocol': ' -p tcp',\n 'state': '',\n 'identifier': ' -m comment --comment \"20CACL {}\"'.format(item['name']),\n 'target': ' -j ACCEPT',\n }\n\n if item.get('device', None):\n fmt['device'] = ' -i {}'.format(item.device)\n if item.get('protocol', None):\n fmt['protocol'] = ' -p {}'.format(item.protocol)\n # FIXME parse for false\n if item.get('stateful', False) == True:\n fmt['state'] = ' -m state --state NEW'\n if not item.get('ports', None):\n raise ValueError(\"missing ports\")\n else:\n fmt['ports'] = ' -m multiport --dports={}'.format(','.join(map(str, item['ports'])))\n\n line = \"{chain}{device}{protocol}{state}{ports}{identifier}{target}\".format(**fmt)\n\n return line",
"def __create_connection(self, connection_node):\n bsource = str(connection_node.attributes['src_block'].value)\n gsource = str(connection_node.attributes['src_gate'].value)\n bdst = str(connection_node.attributes['dst_block'].value)\n gdst = str(connection_node.attributes['dst_gate'].value)\n self.__connect_blocks(self.__blocks[bsource], gsource, self.__blocks[bdst], gdst)",
"def test_reconnect_same(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n item = cinfo.connected\n port = cinfo.port\n constraint = cinfo.constraint\n\n assert item == self.box1\n assert item != self.box2\n\n # connect to box1 again, handle's connected item and port should be\n # the same but connection constraint will differ\n connected = self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n self.assertEqual(self.box1, cinfo.connected)\n self.assertEqual(self.box1.ports()[0], cinfo.port)\n self.assertNotEqual(constraint, cinfo.constraint)",
"def conn_constraint(self) -> ConnectivityConstraint:\n return self._conn_constraint",
"def slot_constraint(self, item, role_spec):\n return self.kb.slot_value(\n logic.expr(item),\n CONSTRAINT_EXPR,\n logic.expr(role_spec))",
"def __init__(self, srcNode, destNode):\r\n super(NodeConnection, self).__init__()\r\n \r\n self.setSrcNode(srcNode)\r\n self.setDestNode(destNode)\r\n \r\n self._srcPt = None\r\n self._destPt = None\r\n self.setArrowSize(10)\r\n \r\n self.Adjust()\r\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)",
"def _set_constraint(self):\n pass",
"def _build_line(self):\n # Build the line position (consecutive segments):\n nnz_x, nnz_y = np.where(~self._edges.mask)\n indices = np.c_[nnz_x, nnz_y].flatten()\n line_pos = self._pos[indices, :]\n\n # Color either edges or nodes :\n if self._color_by == 'strength':\n nnz_values = self._edges.compressed()\n values = np.c_[nnz_values, nnz_values].flatten()\n elif self._color_by == 'count':\n node_count = Counter(np.ravel([nnz_x, nnz_y]))\n values = np.array([node_count[k] for k in indices])\n self._minmax = (values.min(), values.max())\n if self._clim is None:\n self._clim = self._minmax\n\n # Get the color according to values :\n if isinstance(self._custom_colors, dict): # custom color\n if None in list(self._custom_colors.keys()): # {None : 'color'}\n color = color2vb(self._custom_colors[None], length=len(values))\n else: # black by default\n color = np.zeros((len(values), 4), dtype=np.float32)\n for val, col in self._custom_colors.items():\n color[values == val, :] = color2vb(col)\n else:\n color = array2colormap(values) #, **self.to_kwargs())\n color[:, -1] = self._alpha\n\n # Dynamic color :\n if self._dynamic is not None:\n color[:, 3] = normalize(values.copy(), tomin=self._dynamic[0],\n tomax=self._dynamic[1])\n\n # Send data to the connectivity object :\n self._connect.set_data(pos=line_pos, color=color)",
"def test_create_new_vertex_add_constraint(self):\n constraint1 = PartitionerMaximumSizeConstraint(2)\n constraint2 = PartitionerMaximumSizeConstraint(3)\n constr = list()\n constr.append(constraint1)\n constr.append(constraint2)\n vert = TestVertex(10, \"New AbstractConstrainedVertex\", 256)\n vert.add_constraint(constraint2)\n vert.add_constraint(constraint1)\n self.assertEqual(vert.n_atoms, 10)\n self.assertEqual(len(vert.constraints), 3)\n self.assertEqual(vert.label, \"New AbstractConstrainedVertex\")\n for constraint in constr:\n if constraint not in vert.constraints:\n raise Exception(\"dont exist where should\")",
"def test_horizontal_constraint(self):\n item = Item()\n p1 = Variable(1), Variable(2)\n p2 = Variable(3), Variable(4)\n item.constraint(horizontal=(p1, p2))\n self.assertEquals(1, len(item._constraints))\n\n c = item._constraints[0]\n self.assertTrue(isinstance(c, EqualsConstraint))\n # expect constraint on y-axis\n self.assertEquals(2, c.a)\n self.assertEquals(4, c.b)",
"def addConnection(self, port1Name, port2Name, connItem):\n assert isinstance(connItem, ConnectionItem)\n\n # Ensure port1Name and port2Name are str, not QString\n port1Name = str(port1Name)\n port2Name = str(port2Name)\n\n node1Name = port1Name.split(':')[0]\n node2Name = port2Name.split(':')[0]\n\n if node1Name == node2Name:\n return False\n\n node1 = self.nodeFromName(node1Name)\n node2 = self.nodeFromName(node2Name)\n\n if node1.isConnected(port1Name) or node2.isConnected(port2Name):\n return False\n\n self.addItem(connItem)\n node1.addConnection(port1Name, connItem)\n node2.addConnection(port2Name, connItem)\n\n assert connItem.startPortName() is not None\n assert connItem.endPortName() is not None\n return True",
"def _setup_create_firewall_rule_with_all_params(self, protocol='tcp'):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.CreateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n source_ip = '192.168.1.0/24'\r\n destination_ip = '192.168.2.0/24'\r\n source_port = '0:65535'\r\n destination_port = '0:65535'\r\n action = 'allow'\r\n tenant_id = 'my-tenant'\r\n my_id = 'myid'\r\n args = ['--description', description,\r\n '--shared',\r\n '--protocol', protocol,\r\n '--source-ip-address', source_ip,\r\n '--destination-ip-address', destination_ip,\r\n '--source-port', source_port,\r\n '--destination-port', destination_port,\r\n '--action', action,\r\n '--enabled',\r\n '--admin-state-up',\r\n '--tenant-id', tenant_id]\r\n position_names = []\r\n position_values = []\r\n if protocol == 'any':\r\n protocol = None\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description, shared=True,\r\n protocol=protocol,\r\n source_ip_address=source_ip,\r\n destination_ip_address=destination_ip,\r\n source_port=source_port,\r\n destination_port=destination_port,\r\n action=action, enabled=True,\r\n tenant_id=tenant_id)",
"def build_connection(self, src, tgt) -> NoReturn:\n # If src and tgt are the same node, src not in node_collection or\n # tgt not in node_collection,\n # then skip this edge.\n if src == tgt or src not in self._nodes_collection or tgt not in self._nodes_collection:\n if src.split(':')[0] not in self._nodes_collection:\n warnings.warn(f\"Graph construct a self-loop node {src}. Ignored.\")\n return\n\n if tgt not in self._nodes_collection[src.split(':')[0]].successor_nodes:\n self._nodes_collection[src.split(':')[0]].successor_nodes.append(tgt)\n if src not in self._nodes_collection[tgt].precursor_nodes:\n self._nodes_collection[tgt.split(':')[0]].precursor_nodes.append(src)",
"def test_port_create_with_binding_and_no_subnets(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n\n # No subnets, so no allocation. But, it shouldn't be an error.\n self.assertEqual(0, len(res['port']['fixed_ips']))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Draw lifeline. We always draw the lifeline's head. We only draw the lifeline's lifetime when the lifetime is visible.
|
def draw_lifeline(self, box, context, bounding_box):
cr = context.cairo
cr.rectangle(0, 0, self.width, self.height)
stroke(context)
if (
context.hovered
or context.focused
or context.dropzone
or self._lifetime.visible
):
bottom = self._lifetime.bottom
cr = context.cairo
with cairo_state(cr):
cr.set_dash((7.0, 5.0), 0)
x = self._handles[SW].pos.x
top = self._lifetime.top
cr.move_to(top.pos.x - x, top.pos.y)
cr.line_to(bottom.pos.x - x, bottom.pos.y)
stroke(context, dash=False)
# draw destruction event
if self.is_destroyed:
d1 = 8
d2 = d1 * 2
cr.move_to(bottom.pos.x - d1, bottom.pos.y - d2)
cr.line_to(bottom.pos.x + d1, bottom.pos.y)
cr.move_to(bottom.pos.x - d1, bottom.pos.y)
cr.line_to(bottom.pos.x + d1, bottom.pos.y - d2)
cr.stroke()
|
[
"def draw_line():\n\n # Small Size Line\n glLineWidth(0.1)\n glColor3f(0.5, 1.0, 0.9)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n length += 10\n wid += 50\n # Medium Size Line\n glLineWidth(2.0)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n length += 50\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n wid += 50\n # Main Line\n # ordinat\n glLineWidth(1.5)\n glColor3f(0.5, 0.4, 0.8)\n glBegin(GL_LINES)\n glVertex3f(height / 2, 0, 0.0)\n glVertex3f(height / 2, width, 0)\n glEnd()\n # absis\n glBegin(GL_LINES)\n glVertex3f(0, width / 2, 0.0)\n glVertex3f(height, width / 2, 0)\n glEnd()",
"def hline(self, x, y, l, color=0xffffff):\n for i in range(l):\n self.pixel(x+i, y, color)",
"def draw_lines(self):\n if len(self.active_points) > 1:\n pygame.draw.lines(self.screen, (200, 200, 255), True,\n self.active_points, 2)",
"def display_line_map(self):\n lh_count = len(flatten(self.lh_data))\n print('{} horizontal line mapping: {} hline draw calls. {} bytes'.format(\n self.char,\n lh_count,\n len(list(self._stream_lhmap()))\n ))\n print('v' * len(''.join([str(i) for i in range(self.width)])), ' y [(x, length)]')\n for y in range(self.height):\n for x in range(self.width):\n space = ' ' if x < 10 else ' '\n char = space if self.pixels[y * self.width + x] else x\n print(char, end='')\n print(' ', '%2d' % y, self.lh_data[y])\n print()\n\n lv_count = len(flatten(self.lv_data))\n print('{} vertical line mapping: {} vline draw calls. {} bytes'.format(\n self.char,\n lv_count,\n len(list(self._stream_lvmap()))\n ))\n print('>' * len(''.join([str(i) for i in range(self.height)])), ' x [(y, length)]')\n for x in range(self.width)[::-1]:\n for y in range(self.height):\n space = ' ' if y < 10 else ' '\n char = space if self.pixels[y * self.width + x] else y\n print(char, end='')\n print(' ', '%2d' % x, self.lv_data[x])\n print()\n\n print('selecting {} mapping for {} char\\n'.format(\n 'lhmap horizontal' if self.is_char_lhmap() else 'lvmap vertical',\n self.char\n ))",
"def drawLines(self):\n\t\tintersections = [[], []]\n\t\tfor l in self.lines:\n\t\t\tif l.direction == 'v':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + int((self.width - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.width / 100) if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[0].append(position)\n\t\t\t\tfor yPos in range(1, self.height - 2):\n\t\t\t\t\tself.wts(yPos, position, '│', self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(0, position, '┬',self._borderColor)\n\t\t\t\tself.wts(self.height - 2, position, '┴', self._borderColor)\n\t\t\telif l.direction == 'h':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + ((self.height - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.height / 100) - 1 if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[1].append(position)\n\t\t\t\tself.wts(position, 1, '─' * (self.width - 2), self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(position, 0, '├', self._borderColor)\n\t\t\t\tself.wts(position, self.width - 1, '┤', self._borderColor)\n\t\t# draw intersections\n\t\tfor x in intersections[1]:\n\t\t\tfor y in intersections[0]:\n\t\t\t\tself.wts(x, y, '┼', self._borderColor)\n\t\tself.verticalBoundaries = intersections[0]\n\t\tif self.screenBorder:\n\t\t\tself.verticalBoundaries.append(self.width)",
"def drawLineHorizontal(self, *args):\n return _pyupm_lcd.EBOLED_drawLineHorizontal(self, *args)",
"def draw_lines(self):\n for x_cord in range(0, Dimension.SCREEN_WIDTH.value, Dimension.SQUARE_WIDTH.value):\n pg.draw.line(self.window, Colors.BLACK.value, (x_cord, 0), (x_cord, Dimension.SCREEN_HEIGHT.value))\n\n for y_cord in range(0, Dimension.SCREEN_HEIGHT.value, Dimension.SQUARE_HEIGHT.value):\n pg.draw.line(self.window, Colors.BLACK.value, (0, y_cord), (Dimension.SCREEN_WIDTH.value, y_cord))\n\n pg.display.update()",
"def draw(self):\n self.draw_line = self.top_line\n for item in self.menu.iter_viewable():\n self.draw_item(item)",
"def update_line(self):\n self._draw_line_text()\n self._draw_status()\n self._line_listbox.set_focus(self.model.l_index)",
"def draw_line(tick_length, tick_label='.'):\n global x\n print(tick_length)\n line = '_' * tick_length\n if tick_label:\n line += '' + tick_label\n print(line)\n x += 1\n print('execute draw_line num is ', x)",
"def _draw_line_text(self):\n self._line_text.set_text(self.model.get_current_line())",
"def draw_lines(self, screen):\n line_offset = int(self.TOTAL_WIDTH / 2 - self.LINE_EDGE_OFFSET)\n left_x = int(screen.get_width() / 2 - line_offset)\n right_x = int(screen.get_width() / 2 + line_offset)\n inner_left_x = int(screen.get_width() / 2 - self.LINE_CENTER_OFFSET)\n inner_right_x = int(screen.get_width() / 2 + self.LINE_CENTER_OFFSET)\n left_points = [(left_x, self.LINE_TOP), (left_x, self.LINE_Y), (inner_left_x, self.MINUTE_Y)]\n right_points = [(right_x, self.LINE_TOP), (right_x, self.LINE_Y), (inner_right_x, self.LINE_Y)]\n\n pygame.draw.lines(screen, self.LINE_COLOR, False, left_points, 2)\n pygame.draw.lines(screen, self.LINE_COLOR, False, right_points, 2)",
"def draw_lines(self):\n # draw x lines\n y = self.step_y\n while y <= self.height:\n x = 0\n while x <= self.width:\n self.canvas.create_line(x, y, x+3.5, y)\n self.canvas.update()\n x += 3.5\n y += self.step_y\n \n # draw y lines\n x = self.step_x\n while x <= self.width:\n y = 0\n while y <= self.height:\n self.canvas.create_line(x, y, x, y+3.5)\n self.canvas.update()\n y += 3.5\n x += self.step_x\n \n self.is_operating = False",
"def hline(self, x, y, length, color):\n self.fill_rect(x, y, length, 1, color)",
"def doLineDraw(self, sx, sy, ex, ey):\r\n self.parent.addLine(sx, sy, ex, ey, self.set_colour)",
"def drawLines(self):\n for line in self.lines:\n if line[2] > 0:\n if self.pause == 0:\n line[2] -= 1\n weapon = line[0]\n target = line[1]\n color = anwp.func.globals.colors[self.empires[weapon.myShip.empireID].color1]\n (x,y) = weapon.getMyXY()\n anwp.sl.engine.drawLine(x+self.bufferX, y+self.bufferY, target.posX+self.bufferX, target.posY+self.bufferY, color)\n else:\n del line",
"def _redisplayLines(self):\n\n self.stdscr.clear()\n self.paintStatus(self.statusText)\n i = 0\n index = len(self.lines) - 1\n while i < (self.rows - 3) and index >= 0:\n self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index][:self.cols], \n curses.color_pair(2))\n i += 1\n index -= 1\n self.paintDebug()\n self.stdscr.refresh()\n self._pos_cursor()",
"def draw_laser(self):\n pygame.draw.rect(self.screen, self.color, self.rect)",
"def draw_line(self, x0, y0, x1, y1, color=Color['white']):\n pygame.draw.line(self.display, color, (x0, y0), (x1, y1))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load data from CSV files and return them as numpy arrays The use_labels parameter indicates whether one should read the first column (containing class labels). If false, return all 0s.
|
def load_data(filename, use_labels=True):
# load column 1 to 8 (ignore last one)
data = np.loadtxt(open( filename), delimiter=',',
usecols=range(1, 9), skiprows=1)
if use_labels:
labels = np.loadtxt(open( filename), delimiter=',',
usecols=[0], skiprows=1)
else:
labels = np.zeros(data.shape[0])
return labels, data
|
[
"def load_labels():\n labels = pd.read_csv(PATH_DATA_RAW / LABEL_PATH)\n labels = labels.iloc[:, 1:4].to_numpy()\n return labels",
"def get_data(data_file,label_file):\r\n with open(data_file,\"r\") as data, open(label_file,\"r\") as label:\r\n data_reader = csv.reader(data)\r\n label_reader = csv.reader(label)\r\n data = []\r\n label = []\r\n for data_line in data_reader:\r\n line_int = [int(i) for i in data_line]\r\n data.append(line_int)\r\n \r\n for label_line in label_reader:\r\n line_int = [int(i) for i in label_line]\r\n label.append(line_int)\r\n \r\n data = np.array(data)\r\n label = np.array(label)\r\n return data,label",
"def load_labeled_data(files):\n\tx = []\n\ty = []\n\tfor filename in files:\n\t\tdata = []\n\t\twith open(filename) as infile:\n\t\t\tlabel = int(infile.readline())\n\t\t\tfor line in infile:\t\n\t\t\t\tdata.append(dna_string_to_array(line.strip()))\n\t\ty += [label]*len(data)\n\t\tx += data\n\n\treturn (np.array(x), np.array(y))",
"def load_data():\n\n contents = []\n with open('train.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',',)\n next(csv_reader)\n for row in csv_reader:\n contents += [row]\n\n cont_np = np.asarray(contents, dtype=np.float64)\n train_x = cont_np[:, :-1]\n train_y = cont_np[:, -1]\n\n contents = []\n with open('test.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',',)\n next(csv_reader)\n for row in csv_reader:\n contents += [row]\n\n test_x = np.asarray(contents, dtype=np.float64)\n\n return train_x, train_y, test_x",
"def process_csv(input_filename, sep=\" \", input_filename_labels=\"\"): \n if input_filename_labels!=\"\":\n data = np.genfromtxt(input_filename, delimiter=sep,dtype='str')\n X = data.astype(float) \n labels = np.genfromtxt(input_labels, delimiter=sep,dtype='str')\n y = labels.astype(int) \n else: # take labels from the last column\n data = np.genfromtxt(input_filename, delimiter=sep,dtype='str')\n X = data[:,:-1].astype(float) # exclude last column --> labels\n y = data[:,-1] # labels\n \n # we may or may not need this later...\n # min_max_scaler = preprocessing.MinMaxScaler()\n # X = min_max_scaler.fit_transform(X) \n return X, y",
"def _csv_reader_numpy(self, mypath):\n onlyfiles = [f for f in listdir(mypath) if f.endswith(\".csv\")]\n all_data = None\n for index, file in enumerate(onlyfiles):\n data = np.loadtxt(file, delimiter=\",\", dtype=float)\n if index == 0:\n all_data = data\n else:\n all_data = np.vstack((all_data, data))\n return all_data",
"def getCSVDataValuesWithLabel(fileName, label):\n dat = pd.read_csv(fileName).dropna()\n Label = dat.loc[:, [x for x in dat.columns.tolist() if x == label]].as_matrix()\n return Label",
"def load_data(self):\n\n data_pd = pd.read_csv(self.filename)\n return np.array(data_pd)",
"def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]",
"def read_labels(label_paths, use):\n paths_to_use = label_paths[use]\n lbls_raw = [scipy.io.loadmat(path)['labels'] for path in paths_to_use]\n lbls = []\n for lbl_raw in lbls_raw:\n lbl = np.zeros((lbl_raw.shape[0], lbl_raw.shape[1], 3), dtype=np.uint8)\n bmask = lbl_raw == 1 \n gmask = ~bmask\n lbl[:,:,1] = gmask * 255\n lbl[:,:,2] = bmask * 255\n lbls.append(np.copy(lbl))\n return np.array(lbls)",
"def loadCsvData(self, file_path, trainPart=0.9, validationPart=0.1, delimiter=\",\", skip_header=1, labelColumn=-1):\n csv_data = np.genfromtxt(file_path, delimiter=delimiter, skip_header=skip_header)\n train_size = csv_data.shape[0]\n #csv_data = csv_data[0:1000]\n training_data = []\n training_labels = []\n validation_data = []\n validation_labels = []\n # np.random.seed(12345)\n np.random.shuffle(csv_data)\n\n for d in csv_data[0:int(train_size*trainPart)]:\n training_data.append(d[0:-1])\n training_labels.append([d[-1]])\n\n for d in csv_data[int(train_size*trainPart):]:\n validation_data.append(d[0:-1])\n validation_labels.append([d[-1]])\n\n return np.array(training_data), np.array(training_labels), np.array(validation_data), np.array(validation_labels)",
"def _read_labels_csv_file(self, csv_file_path, image_file_paths):\n\n self.__logger.debug('[Get Labels]')\n self.__logger.debug('Read CSV Labels ( %s ) ...' % csv_file_path)\n\n image_file_names = self.get_file_names_from_file_paths(file_paths=image_file_paths)\n\n labels = []\n\n with open(csv_file_path, newline='') as csvfile:\n read_image_files = 0 # numbers of image files read\n rows = csv.reader(csvfile)\n\n for row in rows:\n file_name = row[0]\n # make file name from '00030183_004.png' to '00030183_004'\n file_name = file_name.split('.')\n file_name = file_name[0]\n\n # if csv file name matches image file name, the label of the former will be stored in labels (list)\n if file_name == image_file_names[read_image_files]: # image_file_name has to remove str '.jpg'\n label = row[1].split('|')\n label_id = []\n for i in range(len(label)):\n label_id.append(Xray_class_id[label[i]])\n labels.append(label_id) # store the label\n\n read_image_files += 1\n if read_image_files == len(image_file_names): # if numbers of image files read equals numbers of\n # batch images, then break\n break\n\n self.__logger.debug('Done !')\n\n return labels",
"def get_labels_df():\n labels_df = pd.read_csv('data/train/truth_train.csv', header=None)\n return labels_df",
"def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes",
"def from_csv(filename) -> np.array:\n df = pd.read_csv(r\"{}\".format(filename), header=None)\n out = df.to_numpy()\n return out",
"def load_csv(fichero):\r\n data = np.loadtxt(fichero, delimiter=',')\r\n X = data[:,:-1]\r\n y = data[:,-1]\r\n return X, y",
"def get_labels(self, path_labels):\r\n self.labels_df = pd.read_csv(path_labels, names=['label'])\r\n #Extracting list of labels from the dataframe\r\n self.labels = [str(label[0]) for label in self.labels_df.values]\r\n self.n_labels = len(self.labels)\r\n #Create dictionnaries to convert label to int and backwards\r\n self.label_to_int = dict(zip(self.labels, range(self.n_labels)))\r\n self.int_to_label = dict(enumerate(self.labels))",
"def import_data(path, num_examples):\n data = np.empty((num_examples, 5), dtype=\"float128\")\n y = np.empty((num_examples, 1), dtype=\"float128\")\n\n with open(path, 'r') as f:\n i = 0\n for line in f:\n example = []\n terms = line.strip().split(',')\n for j in range(len(terms)):\n if j == 4:\n y[i] = 2 * float(terms[j]) - 1\n else:\n example.append(float(terms[j]))\n data[i, 1:] = example\n data[i, 0] = 1\n i += 1\n\n data = normalize(np.asmatrix(data), axis=0)\n return [data, np.asmatrix(y)]",
"def load_datasets(folder_path, glob_filter=\"*.csv\", labels_last_column=True, labels_filename=False, custom_func=None, **kwargs):\n # Convert the folder_path to a Path if needed\n if isinstance(folder_path, str):\n folder_path = Path(folder_path)\n elif isinstance(folder_path, Path):\n pass\n else:\n raise TypeError(f\"{type(folder_path)} is not a valid type for the folder_path\")\n # Check that the directory exists\n if not folder_path.is_dir():\n raise ValueError(f\"{folder_path} is not a directory\")\n # If both labels arguments are true, raise error\n if labels_last_column and labels_filename:\n raise ValueError(f\"labels_last_column and labels_filename cannot both be True\")\n # or if both are False\n elif not (labels_last_column or labels_filename):\n raise ValueError(f\"labels_last_column and labels_filename cannot both be False\")\n # Get the files according to the filter provided\n files = list(folder_path.glob(glob_filter))\n # Sort them files (avoids needing leading 0s)\n # https://stackoverflow.com/a/36202926/9963224\n files.sort(key=lambda var: [int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', str(var))])\n # If no files are found, raise error\n if not files:\n raise ValueError(f\"{folder_path} with {glob_filter} filter had no results\")\n # Run the custom function if provided\n if custom_func is not None:\n filenames, datasets, label_sets = custom_func(files)\n else:\n # Initialize containers\n filenames = []\n datasets = []\n label_sets = []\n # Loop through the files\n for file in files:\n filenames.append(file.stem)\n # If the labels are in separate files, load them\n if labels_filename:\n if \"label\" in file:\n label_sets.append(np.loadtxt(file, **kwargs))\n else:\n datasets.append(np.loadtxt(file, **kwargs))\n # Otherwise the labels are in the last column\n elif labels_last_column:\n # Load the data\n data = np.loadtxt(file, **kwargs)\n # Add the datasets and labels\n label_sets.append(data[:, -1].astype(int))\n datasets.append(data[:, :-1])\n # Return the filenames, datasets, and labels\n return filenames, datasets, label_sets"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a vector of predictions, save results in CSV format.
|
def save_results(predictions, filename):
with open(filename, 'w') as f:
f.write("id,ACTION\n")
for i, pred in enumerate(predictions):
f.write("%d,%f\n" % (i + 1, pred))
|
[
"def export_predictions(self):\n with open('prediction/submission.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n for i in range(len(self.test_predictions)):\n writer.writerow([str(i) + \", \" + self.test_predictions[i]])",
"def write_results(file_path, predictions):\n with open(file_path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\",\")\n writer.writerow([\"Id\", \"Bound\"])\n for id, bound in enumerate(predictions):\n writer.writerow([id, bound])",
"def export_prediction(prediction):\r\n # Store users, items and ratings in three arrays\r\n \r\n header = 'Id,Prediction\\n'\r\n \r\n N = len(prediction)\r\n users = []\r\n items = []\r\n rat = []\r\n \r\n for j, pred in enumerate(prediction):\r\n users.append(pred.uid)\r\n items.append(pred.iid)\r\n rat.append(pred.est)\r\n \r\n # Format preditions in the kaggle format\r\n data = []\r\n data.append(header) # Add header at the start of the text file\r\n for j in range(N):\r\n data.append('r{u}_c{i},{r}\\n'.format(u=users[j], i=items[j], r = rat[j]))\r\n \r\n # Write predictions in a csv file\r\n fp = open('../data/final_prediction.csv', 'w')\r\n fp.writelines(data)\r\n fp.close()",
"def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(zip(y_pred))\n out.close()",
"def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()",
"def predictions_to_csv():\n\n p = Path(\".\") / \"data\" / \"predictions\"\n print(\"Looking for prediction files\")\n prediction_files = []\n for i in p.glob('*.npy'):\n if \"prediction\" in i.name:\n print(i.name)\n prediction_files.append(i.name)\n\n test_labels = np.load(\"data/ising/test_labels.npy\")\n\n for i in prediction_files:\n curr = np.load(\"data/predictions/\" + i)\n if \"fmodel\" in i:\n model_result = \"final\"\n elif \"bmodel\" in i:\n model_result = \"best\"\n else:\n warn(\"Model should be final or best, none detected\")\n\n if \"clas\" in i:\n evaluate_models(model_iteration=int(i.split(\"_\")[1]),\n model_type=\"classification\",\n predicted=curr,\n ground_truth=convert_to_categories(test_labels, 10),\n model_result=model_result)\n\n elif \"reg\" in i:\n evaluate_models(model_iteration=int(i.split(\"_\")[1]),\n model_type=\"regression\",\n predicted=curr,\n ground_truth=test_labels,\n model_result=model_result)",
"def store_classes_and_predictions(output_file_path, classes, predictions):\n with open(output_file_path, mode='a', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['true', 'predicted'])\n for i in range(len(classes)):\n csvwriter.writerow([classes.iloc[i], predictions.iloc[i]])",
"def write_predictions(predictions,\n output_predict_file):\n with tf.io.gfile.GFile(output_predict_file, \"w\") as write_file:\n header = [\n \"query_id\",\n \"query_rep\",\n \"table_id\",\n \"table_rep\",\n ]\n writer = csv.DictWriter(write_file, fieldnames=header, delimiter=\"\\t\")\n writer.writeheader()\n\n for prediction in predictions:\n query_id = prediction[\"query_id\"]\n table_id = prediction[\"table_id\"]\n query_rep = prediction[\"query_rep\"]\n table_rep = prediction[\"table_rep\"]\n\n prediction_to_write = {\n \"query_id\": query_id[0].decode(\"utf-8\"),\n \"query_rep\": query_rep.tolist(),\n \"table_id\": table_id[0].decode(\"utf-8\"),\n \"table_rep\": table_rep.tolist(),\n }\n writer.writerow(prediction_to_write)",
"def write_predictions_to_file(predictions: list, path: str):\n\n with open(path, 'w') as f_preds:\n f_preds.write(_format_output(predictions))",
"def print_to_csv(results, filename):\n \n for i in results:\n string = str(i)+','\n for j,c in enumerate(results[i]):\n string += str(c)+','\n\n f = open(filename, \"a\")\n f.write(string + '\\n')\n f.close()",
"def write_predictions(all_predictions, output_prediction_file):\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")",
"def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)",
"def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)",
"def save_prediction(self, meta, y_pred, y, filename):\n df = pd.DataFrame(meta)\n df['y_pred'] = y_pred\n df['y'] = y\n print(df)\n df.loc[:, 'id'] = df.index\n self.df_to_csv(df, filename, store_header=False)",
"def write_test_results(results):\n with open(f'test_results.csv', \"a\") as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(results)\n return",
"def export_results_to_csv(test_df: np.array, bit_classification: np.array, export_path: Path) -> None:\n test_df = pd.DataFrame(test_df).rename(columns={0: 'UserID', 1: 'Item1', 2: 'Item2'})\n test_df['bit'] = bit_classification\n test_df.to_csv(export_path, index=False)",
"def predict_submissions(model, test_dataset, questions_list, batch_size, result_dir=\"./\", ):\n predictions = model.predict(x=test_dataset, steps=math.ceil(len(questions_list) / batch_size), verbose=1)\n predicted_class = np.argmax(predictions, axis=1)\n\n question_id_list = [question_dict['question_id'] for question_dict in questions_list]\n\n results = dict(zip(question_id_list, predicted_class))\n create_csv(results, results_dir=result_dir)\n\n print(\"Wrote file csv\")",
"def __write_csv(self, prediction_probs, n, filename):\n d = {'Id': pd.Series([i for i in xrange(1, n + 1)]),\n 'Action': pd.Series(prediction_probs)}\n df = pd.DataFrame(d)\n df = df[['Id', 'Action']]\n df.to_csv(filename, sep=',', encoding='utf-8',\n index=False)",
"def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The following function is used to format the numbers. In the beginning "th, st, nd, rd" are removed
|
def clean_numbers(self, x):
# remove "th" after a number
matches = re.findall(r'\b\d+\s*th\b', x)
if len(matches) != 0:
x = re.sub(r'\s*th\b', " ", x)
# remove "rd" after a number
matches = re.findall(r'\b\d+\s*rd\b', x)
if len(matches) != 0:
x = re.sub(r'\s*rd\b', " ", x)
# remove "st" after a number
matches = re.findall(r'\b\d+\s*st\b', x)
if len(matches) != 0:
x = re.sub(r'\s*st\b', " ", x)
# remove "nd" after a number
matches = re.findall(r'\b\d+\s*nd\b', x)
if len(matches) != 0:
x = re.sub(r'\s*nd\b', " ", x)
# replace standalone numbers higher than 10 by #
# this function does not touch numbers linked to words like "G-20"
matches = re.findall(r'^\d+\s+|\s+\d+\s+|\s+\d+$', x)
if len(matches) != 0:
x = re.sub('^[0-9]{5,}\s+|\s+[0-9]{5,}\s+|\s+[0-9]{5,}$', ' ##### ', x)
x = re.sub('^[0-9]{4}\s+|\s+[0-9]{4}\s+|\s+[0-9]{4}$', ' #### ', x)
x = re.sub('^[0-9]{3}\s+|\s+[0-9]{3}\s+|\s+[0-9]{3}$', ' ### ', x)
x = re.sub('^[0-9]{2}\s+|\s+[0-9]{2}\s+|\s+[0-9]{2}$', ' ## ', x)
# we do include the range from 1 to 10 as all word-vectors include them
# x = re.sub('[0-9]{1}', '#', x)
return x
|
[
"def format(number):\n return compact(number)",
"def remove_formatting(value):\n if isinstance(value, (int, float)):\n return value\n\n if not value:\n return 0\n\n formatting = [\",\", \"$\", \"ft\", \"yds\"]\n\n value = value.lower()\n for format_type in formatting:\n value = value.replace(format_type, \"\")\n\n if value.isspace() or value == \"\":\n return 0\n return float(value)",
"def test_dual_formats_of_number(self):\n number = 4508077077058854\n self.assertEqual(formatter.get_format(number), ['visa', 'visa electron'])",
"def format_number(separator, n):\n n_s = str(n)\n if len(n_s) <= 3:\n return n_s\n else:\n upper = n_s[:-3]\n lower = n_s[-3:]\n return format_number(separator, upper) + separator + lower",
"def replace_printf(number):\n if number > 2:\n return \"%0\" + str(number) + \"d\"\n else:\n return \"%d\"",
"def _remove_digit_blocks(self, text: str) -> str:\n return re.sub(r\"\\b\\d+\\b\", \" \", str(text))",
"def replace_digits(st):\n return re.sub(r\"\\d\", \"(DIGIT)\", st)",
"def suffix(number: int) -> str:\n\n if number != 11 and number % 10 == 1:\n return \"st\"\n if number != 12 and number % 10 == 2:\n return \"nd\"\n if number != 13 and number % 10 == 3:\n return \"rd\"\n return \"th\"",
"def my_formatter(x, pos):\n val_str = '${:g}$'.format(x)\n if np.abs(x) > 0 and np.abs(x) < 1:\n return val_str.replace(\"0\", \"\", 1)\n else:\n return val_str",
"def normalize_number(number):\n strip_chars = '()-.+ '\n for char in strip_chars:\n number = number.replace(char, '')\n if len(number) == 10:\n number = \"1{}\".format(number)\n return number",
"def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return ' '.join(new_words)",
"def formatNumber(number):\n temp = str(number)\n while len(temp) < 4:\n temp = '0' + temp\n return temp",
"def remove_numbers_fun(self):\n self.doc = re.sub(\"[0-9]\", \"\", self.doc)",
"def strip_numbers(s):\n if s:\n s = u' '.join([x for x in s.split(' ') if not x.isdigit()])\n return s",
"def compact(number):\n return clean(number, ' -./,').strip()",
"def compact(number):\n return clean(number, ' -.').strip()",
"def formatOutput(number):\n lenght = len(str(number))\n number = int(number)\n if lenght >= 3:\n return format(number, \"03,d\")\n return number",
"def remove_numbers(tweet):\n words = re.split(r'\\s+', tweet)\n new_words = []\n\n for word in words:\n if bool(re.search(r'\\d', word)):\n new_words.append('number')\n else:\n new_words.append(word)\n \n return ' '.join(new_words)",
"def compact(number):\n return clean(number, ' ').strip()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function is used to replace "yr,yrs" by year and "hr,hrs" by hour.
|
def year_and_hour(self, text):
# Find matches for "yr", "yrs", "hr", "hrs"
matches_year = re.findall(r'\b\d+\s*yr\b', text)
matches_years = re.findall(r'\b\d+\s*yrs\b', text)
matches_hour = re.findall(r'\b\d+\s*hr\b', text)
matches_hours = re.findall(r'\b\d+\s*hrs\b', text)
# replace all matches accordingly
if len(matches_year) != 0:
text = re.sub(r'\b\d+\s*yr\b', "year", text)
if len(matches_years) != 0:
text = re.sub(r'\b\d+\s*yrs\b', "year", text)
if len(matches_hour) != 0:
text = re.sub(r'\b\d+\s*hr\b', "hour", text)
if len(matches_hours) != 0:
text = re.sub(r'\b\d+\s*hrs\b', "hour", text)
return text
|
[
"def replace_time(text, ori):\n r = ori\n if '**' in text:\n r = 'xxhour'\n else:\n try:\n # handle exceptions with custom rules\n f, s = text.split()\n s = 'am' if s[0] == 'a' else 'pm'\n l, r = f.split(':')\n if l == '' or l == '00':\n if r == '':\n r = str(0).zfill(2)\n l = str(12)\n if int(l) > 12:\n l = str(int(l) % 12)\n f = ':'.join([l, r])\n text = ' '.join([f, s])\n\n d = datetime.strptime(text, '%I:%M %p')\n if d.hour >= 0 and d.hour < 4:\n r = 'xxmidngt'\n elif d.hour >= 4 and d.hour < 8:\n r = 'xxdawn'\n elif d.hour >= 8 and d.hour < 12:\n r = 'xxfore'\n elif d.hour >= 12 and d.hour < 16:\n r = 'xxafter'\n elif d.hour >=16 and d.hour <20:\n r = 'xxdusk'\n else:\n r = 'xxngt'\n except ValueError:\n pass\n return r",
"def convert_time_format(stime, dtype):\n#\n#--- today's ydate and year\n#\n cdate = int(float(time.strftime('%j', time.gmtime())))\n tyear = int(float(time.strftime('%Y', time.gmtime())))\n\n ctime = []\n byear = 0\n schk = 0\n for ent in stime:\n#\n#--- set date in ydate\n#\n if dtype in ['week', 'short', 'year']:\n out = Chandra.Time.DateTime(ent).date\n atemp = re.split(':', out)\n syear = int(atemp[0])\n syday = float(atemp[1])\n hh = float(atemp[2])\n mm = float(atemp[3])\n ss = float(atemp[4])\n\n if schk < 1:\n byear = syear\n if mcf.is_leapyear(byear):\n base = 366\n else:\n base = 365\n schk = 1\n#\n#--- if this is a year long plot, for the first three months, the base year is the previous year\n#--- after three months, the base year is this year\n#\n if dtype == 'year':\n if cdate < 90:\n syday += (syear - byear) * base + hh / 24.0 + mm / 1440.0 + ss / 86400.0\n else:\n if syear == byear:\n syday -= base\n syday += hh / 24.0 + mm / 1440.0 + ss / 86400.0\n schk = 2\n else:\n syday += (syear - byear) * base + hh / 24.0 + mm / 1440.0 + ss / 86400.0\n\n ctime.append(syday)\n#\n#--- set date in fractional year\n#\n else:\n byear = 1999\n ctime.append(float(mcf.chandratime_to_fraq_year(ent)))\n#\n#--- the base year changes, if it is year plot and after third months of the year\n#\n if schk == 2:\n byear = tyear\n\n return [ctime, byear]",
"def _adjust_time(time, quarter, end_of_quarter, league):\n\tnew_time = re.split(\":\", time)\n\tminutes = int(new_time[0])\n\tseconds = int(new_time[1])\n\tif minutes is 0 and not end_of_quarter:\n\t\tend_of_quarter = True\n\telif end_of_quarter and minutes > 1:\n\t\tquarter += 1\n\t\tend_of_quarter = False\n\toverall_time = _calc_overall_time(seconds, minutes, quarter, league)\n\ttime_dict = {}\n\ttime_dict[\"overall_time\"] = overall_time\n\ttime_dict[\"quarter_time\"] = time\n\ttime_dict[\"quarter\"] = quarter\n\treturn time_dict, quarter, end_of_quarter",
"def test_short_format_contains_year(self):\n locale = {\n 'timeformat': '%H:%M',\n 'dateformat': '%Y-%m-%d',\n 'longdateformat': '%Y-%m-%d',\n 'datetimeformat': '%Y-%m-%d %H:%M',\n 'longdatetimeformat': '%Y-%m-%d %H:%M',\n }\n assert (dt.datetime(2017, 1, 1), dt.datetime(2017, 1, 2), True) == \\\n guessrangefstr('2017-1-1 2017-1-1', locale=locale)",
"def do_ry(self, arg):\n self.do_timesheet('report year')",
"def interpret_time( text ):\n app.logger.debug(\"Decoding time '{}'\".format(text))\n time_formats = [\"ha\", \"h:mma\", \"h:mm a\", \"H:mm\"]\n try: \n as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())\n as_arrow = as_arrow.replace(year=2016) #HACK see below\n app.logger.debug(\"Succeeded interpreting time\")\n except:\n app.logger.debug(\"Failed to interpret time\")\n flask.flash(\"Time '{}' didn't match accepted formats 13:30 or 1:30pm\"\n .format(text))\n raise\n return as_arrow.isoformat()",
"def hours_in_year(year):\n if is_leapyear(year):\n return 8784\n else:\n return 8760",
"def build_convert_to_hours(time_units):\n if time_units not in VALID_TIME_UNITS:\n raise ValueError('Time units must be one of', VALID_TIME_UNITS)\n \n if time_units == 'min':\n return lambda x: x/60\n elif time_units == 'h':\n return lambda x: x",
"def test_evaluate_year_expression(self):\n for f, r in (\n (\"year\", 2013),\n (\"month\", 9),\n (\"day\", 1),\n (\"hour\", 10),\n (\"minute\", 56),\n (\"second\", 0)):\n value = self.evaluate_common(\"%s(datetime'2013-09-01T10:56')\" % f)\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value == r)\n try:\n value = self.evaluate_common(\n \"%s(datetimeoffset'2013-09-01T10:56:12-05:00')\" % f)\n self.fail(\"datetimeoffset %s\" % f)\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\n \"%s(datetime'2013-09-01T10:56',\"\n \"datetime'2013-09-01T10:57')\" % f)\n self.fail(\"2 parameters\")\n except odata.EvaluationError:\n pass",
"def time_to_year(time):\n\treturn str(time)[0:4]",
"def _fix_vague_date(self, datetime):\n return re.sub(r'^(?P<year>\\d{,4}-)00(?P<remainder>.*?)$',\n r'\\g<year>01\\g<remainder>', datetime)",
"def scaledTime():\n #return (time.gmtime().tm_wday, time.gmtime().tm_hour)\n epoch = time.strptime(\"2013-02-21 11:30:00\", \"%Y-%m-%d %H:%M:%S\")\n timeInSec = time.mktime(time.gmtime()) - time.mktime(epoch)\n hourSince = timeInSec / Predictor.hourScale\n day = int(hourSince / 24 % 7)\n hour = int(hourSince % 24)\n return (day, hour)",
"def timestamp_format_to_redex(time_format):\n\n time_keys = {'%Y': r'\\d{4}',\n '%m': r'\\d{2}',\n '%d': r'\\d{2}',\n '%H': r'\\d{2}',\n '%M': r'\\d{2}',\n '%S': r'\\d{2}'}\n\n redex = time_format\n for key, item in time_keys.items():\n if key in redex:\n redex = redex.replace(key, item)\n\n return redex",
"def convert_to_hour(date):\n return date[11:13]",
"def check_hour_range(self, hour):\n if 0 <= hour <= 5:\n return 'Early Morning'\n if 6 <= hour <= 11:\n return 'Day Time'\n if 12 <= hour <= 17:\n return 'Afternoon'\n if 18 <= hour <= 23:\n return 'Evening'",
"def _gatime(yyyy,jjj,hh=None,nn=None):\n t = date((yyyy,1,1)) + (jjj - 1)*DAY\n if hh is None:\n return '%02d%s%4d'%(t.day,t.ctime()[4:7],yyyy)\n else:\n return '%02d:%02dZ%02d%s%4d'%(hh,nn,t.day,t.ctime()[4:7],yyyy)",
"def ecalendar_input_format(term_year,course_code):\n course_code = course_code.lower()\n term_year = convert_ecalendar_term(term_year)\n\n codes = course_code.split(\"-\")\n code_pt1 = codes[0]\n code_pt2 = codes[1]\n # if \"-\" in code_pt2:\n # code_pt2 = code_pt2[ : code_pt2.find('-') ]\n if code_pt1.isalpha():\n return (term_year, code_pt1+\"-\"+code_pt2)\n else:\n raise ValueError('Input must be valid values in the format yyyy-yyyy, cccc-xxx ex. 2018-2019, comp-206')",
"def test_date_by_yr(self):\n spi_search = \"find date 2002\"\n inv_search = \"year:2002\"\n self._compare_searches(inv_search, spi_search)",
"def sanitize(time_string): # Fix non-uniformity in the athletes data to enable sorting\n if '-' in time_string:\n splitter = '-'\n (mins, secs) = time_string.split(splitter)\n elif ':' in time_string:\n splitter = ':'\n (mins, secs) = time_string.split(splitter)\n else:\n return time_string\n return '{0}.{1}'.format(mins, secs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Performs an HTTP request set in 'method'. Returns requests object The method will try to catch some of the typical errors and gather error messages from Newrelic API Each known error has a corresponding exception. All exceptions are inherited from generic NewRelicException If HTTP return code is not known a generic NewRelicException is raised.
|
def _request(self, method, *args, **kwargs):
try:
r = getattr(requests, method)(*args, **kwargs)
except AttributeError:
raise NewRelicException(
'Method {} is unsupported by requests module'
.format(method)
)
except requests.exceptions.Timeout:
raise Timeout('Request timed out after {} seconds'
.format(self.timeout))
if r.status_code < 200 or r.status_code > 299:
# Try to work out all known errors into separate exceptions
if r.status_code == 401:
try:
error_message = r.json()['error']['title']
except (KeyError, ValueError):
raise UnathorizedError(
'User is not authorized to perform requested operation'
)
else:
raise UnathorizedError(error_message)
if r.status_code == 402:
raise ChecksLimitExceeded(
"Creating the monitor will increase your scheduled checks "
"past your account's purchased check limit."
)
elif r.status_code == 404:
try:
error_message = r.json()['error']['title']
except (KeyError, ValueError):
raise ItemNotFoundError(
'Requested item not found. '
'No error message was provided by server.'
)
else:
raise ItemNotFoundError(error_message)
else:
# If we don't know what to do with specific error code
# ( most likely it's 400 )
# We at least try to get error message from the response
try:
response_errors = r.json()['errors']
raise NewRelicException(
"The following errors were returned by server:\n{}"
.format('\n'
.join(
[x['error'] for x in response_errors]
))
)
# Sometimes API does not return any useful information.
# In this case that's just an HTML page
# reporting 400 instead of JSON.
# We will just return an error code in this case.
except ValueError:
raise NewRelicException(
'Got unexpected response code {}. '
'No additional information provided by server.'
.format(r.status_code)
)
return r
|
[
"def _make_request(self):\n try:\n self.response = requests.request(\n method=self.method,\n url=self.url,\n params=self.params,\n data=self.data,\n )\n\n logger.debug(f\"Request URL: {self.response.url}\")\n\n self.response.raise_for_status()\n\n # wrap all `requests` library error and serve as custom application error\n except RequestException as e:\n logger.error(e.__str__(), exc_info=True)\n raise ExternalAPIError(\n \"Error while communication with External API\"\n )",
"def _send_request_safe_mode(self, method, url, **kwargs):\n try:\n return super().request(method, url, **kwargs)\n except (MissingSchema, InvalidSchema, InvalidURL):\n raise\n except RequestException as e:\n r = LocustResponse()\n r.error = e\n r.status_code = 0 # with this status_code, content returns None\n r.request = Request(method, url).prepare()\n return r",
"def request(self, method, **params):\n supported_methods = (requests.get, requests.post)\n if method not in supported_methods:\n raise TypeError(\"Unexpected or unsupported method provided\")\n\n params[\"headers\"] = self.HEADERS\n\n params[\"timeout\"] = self.TIMEOUT_SECONDS\n\n try:\n response = method(**params)\n except Exception as e:\n if self.DEBUG:\n self.errors.append((method, params, e))\n raise\n\n if self.DEBUG:\n self.responses.append(response)\n\n return response",
"def _request(self, method: str, url: str, **kwargs) -> requests.Response:\n self.last_request = None\n if method in (\"PUT\", \"POST\", \"DELETE\"):\n kwargs[\n \"allow_redirects\"\n ] = False # to prevent redirects on write action. Better check your URL first.\n self.last_response = self.session.request(\n method, url, auth=self.auth, headers=self.headers, **kwargs\n )\n self.last_request = self.last_response.request\n self.last_url = self.last_response.url\n\n if self.last_response.status_code == requests.codes.forbidden:\n raise ForbiddenError(self.last_response.json()[\"results\"][0])\n\n return self.last_response",
"def get_http(self, method=\"GET\") -> http.HttpRequest:\n if method == \"GET\":\n return http.HttpRequest\n elif method == \"POST\":\n return http.HttpPostRequest\n raise UnknownMethodException(\"Unknown http method: {}\".format(method))",
"def make_request(method, url, headers=None, data=None, retries=3):\n no_retry_status = [404, 401, 403]\n may_retry_status = [408, 500, 502, 503]\n\n if not retries:\n return requests.request(method=method,\n url=url,\n headers=headers,\n data=data)\n\n while retries:\n r = requests.request(method=method,\n url=url,\n headers=headers,\n data=data)\n if r.status_code in no_retry_status:\n return r\n\n elif r.status_code in may_retry_status:\n retries -= 1\n time.sleep(1)\n\n if retries == 0:\n return r\n continue\n\n else:\n return r",
"def _send_request(self, url: str, method: str, headers: dict, data: dict):\n common_kwargs = {\n 'url': url,\n 'headers': headers,\n 'verify': self.requests_ssl_verify\n }\n\n if method == 'get':\n api_response = requests.get(**common_kwargs)\n elif method == 'post':\n api_response = requests.post(**common_kwargs, data=json.dumps(data))\n elif method == 'delete':\n api_response = requests.delete(**common_kwargs, data=json.dumps(data))\n elif method == 'patch':\n api_response = requests.patch(**common_kwargs, data=json.dumps(data))\n elif method == 'put':\n api_response = requests.put(**common_kwargs, data=json.dumps(data))\n else:\n logger.debug('ERROR : Wrong requests, please only do [get, post, put, patch, delete] method')\n raise TypeError('Unknown method to requests %s', method)\n return api_response",
"def _RequestAndProcessHttpErrors(url, **kwargs):\n http = ServiceAccountHttp(timeout=60)\n\n response, content = http.request(url, **kwargs)\n\n if response['status'] == '404':\n raise NotFoundError(\n 'HTTP status code %s: %s' % (response['status'], repr(content[0:200])),\n response, content)\n if not response['status'].startswith('2'):\n raise RequestError(\n 'Failure in request for `%s`; HTTP status code %s: %s' %\n (url, response['status'], repr(content[0:200])), response, content)\n return content",
"def __http_request(self, url, method = 'GET', body = None, request_timeout = 30):\n\n # Encode and prepare the request URL\n url = urllib.quote(url)\n full_url = 'http://{0}:{1}/api{2}'.format(self.server, self.port, url)\n\n # Fix missing 'body' in POST and PUT requests\n if method in ['POST', 'PUT'] and body is None:\n body = ''\n\n # Try to send the request\n try:\n response = self.client.fetch(full_url, method = method, body = body, request_timeout = request_timeout)\n except HTTPError as error:\n # In case the response indicates an error, try to transform it into an AerialException instance. Fails if\n # the error response is not a standard aerial API error.\n try:\n aerial_error = json.loads(error.response.body)['error']\n error = AerialException(aerial_error['type'], aerial_error['message'])\n finally:\n # Nevertheless, wrapped or not, raise the HTTP error.\n raise error\n else:\n # If everything went smoothly, try to parse the JSON response and return the result.\n if response.body is not None and response.body != '':\n try:\n return json.loads(response.body)\n except ValueError:\n # If the response is not a JSON document, something unexpected has happened. Raise an appropriate\n # exception.\n raise AerialException('malformed_response',\n 'An unexpected response has been received from the server.')",
"def make(self, *args, **kwargs):\n try:\n return self.methods[args[0]](url=args[1], client=self.client).request()\n except KeyError as e:\n print(e)",
"def request_with_retry(\n attempts: int = 3, status_codes_to_retry: Optional[List[int]] = None, **kwargs\n) -> requests.Response:\n\n if status_codes_to_retry is None:\n status_codes_to_retry = [408, 418, 429, 503]\n\n @retry(\n reraise=True,\n wait=wait_exponential(),\n retry=retry_if_exception_type((requests.HTTPError, TimeoutError)),\n stop=stop_after_attempt(attempts),\n before=before_log(logger, logging.DEBUG),\n after=after_log(logger, logging.DEBUG),\n )\n def run():\n timeout = kwargs.pop(\"timeout\", 10)\n res = requests.request(**kwargs, timeout=timeout)\n\n if res.status_code in status_codes_to_retry:\n # We raise only for the status codes that must trigger a retry\n res.raise_for_status()\n\n return res\n\n res = run()\n # We raise here too in case the request failed with a status code that\n # won't trigger a retry, this way the call will still cause an explicit exception\n res.raise_for_status()\n return res",
"def dispatch_request(self, *args, **kwargs):\n try:\n return super().dispatch_request(*args, **kwargs)\n except HTTPException as e:\n logger.error(\"HTTP Error on APIResource %s\", e, exc_info=1)\n return return_response({\n \"code\": e.code,\n \"message\": e.description\n }, e.code)\n except BaseException as e:\n logger.error(\"Error occurred in APIResource %s\", e, exc_info=1)\n return return_response({\n \"code\": 500,\n \"message\": str(e)\n }, 500)",
"def retry_or_raise(self, http_class, request, first_try):\n # When an exception occurs, we give back the original\n # Traceback or the bugs are hard to diagnose.\n exc_type, exc_val, exc_tb = sys.exc_info()\n if exc_type == socket.gaierror:\n # No need to retry, that will not help\n raise errors.ConnectionError(\"Couldn't resolve host '%s'\"\n % request.get_origin_req_host(),\n orig_error=exc_val)\n elif isinstance(exc_val, httplib.ImproperConnectionState):\n # The httplib pipeline is in incorrect state, it's a bug in our\n # implementation.\n raise exc_type, exc_val, exc_tb\n else:\n if first_try:\n if self._debuglevel >= 2:\n print 'Received exception: [%r]' % exc_val\n print ' On connection: [%r]' % request.connection\n method = request.get_method()\n url = request.get_full_url()\n print ' Will retry, %s %r' % (method, url)\n request.connection.close()\n response = self.do_open(http_class, request, False)\n else:\n if self._debuglevel >= 2:\n print 'Received second exception: [%r]' % exc_val\n print ' On connection: [%r]' % request.connection\n if exc_type in (httplib.BadStatusLine, httplib.UnknownProtocol):\n # httplib.BadStatusLine and\n # httplib.UnknownProtocol indicates that a\n # bogus server was encountered or a bad\n # connection (i.e. transient errors) is\n # experimented, we have already retried once\n # for that request so we raise the exception.\n my_exception = errors.InvalidHttpResponse(\n request.get_full_url(),\n 'Bad status line received',\n orig_error=exc_val)\n elif (isinstance(exc_val, socket.error) and len(exc_val.args)\n and exc_val.args[0] in (errno.ECONNRESET, 10053, 10054)):\n # 10053 == WSAECONNABORTED\n # 10054 == WSAECONNRESET\n raise errors.ConnectionReset(\n \"Connection lost while sending request.\")\n else:\n # All other exception are considered connection related.\n\n # socket errors generally occurs for reasons\n # far outside our scope, so closing the\n # connection and retrying is the best we can\n # do.\n\n my_exception = errors.ConnectionError(\n msg= 'while sending %s %s:' % (request.get_method(),\n request.get_selector()),\n orig_error=exc_val)\n\n if self._debuglevel >= 2:\n print 'On connection: [%r]' % request.connection\n method = request.get_method()\n url = request.get_full_url()\n print ' Failed again, %s %r' % (method, url)\n print ' Will raise: [%r]' % my_exception\n raise my_exception, None, exc_tb\n return response",
"def api_get(self, *args, **kwargs):\n for _ in range(3):\n try:\n response = super(DiveSailthruClient, self).api_get(*args, **kwargs)\n break\n except SailthruClientError as e:\n if 'ConnectTimeoutError' in str(e):\n # We want to retry connection timeout errors only. Sailthru client\n # smushes the original exception from Requests into a string arg\n # so we need to test for it with string matching here.\n pass\n else:\n # If it wasn't a ConnectTimeoutError than don't retry\n raise\n else:\n # If we got here we exceeded the max number of retries\n raise\n\n # At this point we have a response from the server but we still need to check\n # if the response itself is marked as an error\n self.raise_exception_if_error(response)\n\n return response",
"def _request(self, request_method, url, *args, **kwargs):\n\n full_url = self.get_full_url(url)\n\n self.logger.info('Calling %s url: %s', request_method, full_url)\n\n request_args = self.get_request_args(kwargs)\n\n request = NapRequest(request_method, full_url, *args, **request_args)\n\n for mw in self.model._meta['middleware']:\n request = mw.handle_request(request)\n\n resource_response = request.send()\n response = NapResponse(\n url=request.url,\n status_code=resource_response.status_code,\n headers=resource_response.headers,\n content=resource_response.content,\n request_method=request_method,\n )\n\n for mw in reversed(self.model._meta['middleware']):\n response = mw.handle_response(request, response)\n\n return response",
"def issueException():\r\n return requests.exceptions.RequestException",
"def retry(num=5):\n s = requests.Session()\n retries = Retry(total=num, backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n s.mount('http://', HTTPAdapter(max_retries=retries))\n\n return s",
"def retry_request(self, method, action, body=None,\r\n headers=None, params=None):\r\n max_attempts = self.retries + 1\r\n for i in range(max_attempts):\r\n try:\r\n return self.do_request(method, action, body=body,\r\n headers=headers, params=params)\r\n except exceptions.ConnectionFailed:\r\n # Exception has already been logged by do_request()\r\n if i < self.retries:\r\n _logger.debug(_('Retrying connection to Neutron service'))\r\n time.sleep(self.retry_interval)\r\n\r\n raise exceptions.ConnectionFailed(reason=_(\"Maximum attempts reached\"))",
"def _http_request(self, method, url_suffix='', **kwargs):\n try:\n res = super()._http_request(method, url_suffix, error_handler=exception_handler, **kwargs)\n except Exception as e:\n if 'Expired Token' in e.__str__():\n self.generate_new_token()\n res = super()._http_request(method, url_suffix, error_handler=exception_handler, **kwargs)\n else:\n raise e\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrapper for requests GET method
|
def _get(self, *args, **kwargs):
return self._request('get', *args, **kwargs)
|
[
"def _get(self, *args, **kwargs):\n return self._request(requests.get, *args, **kwargs)",
"def _get(self, url, **data):\n return self._request('GET', url, **data)",
"def http_method_get():\n return 'GET'",
"def get(self, *path, **data):\n\t\treturn self.request('GET', *path, **data)",
"def get(self, *args, **kw):\n kw['method'] = 'GET'\n return self.open(*args, **kw)",
"def get(request_class):",
"def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)",
"def get(self, url, headers=None):\n pass",
"def get(self, path, req = None, **kwargs):\n req = req or []\n return self.route(path, req=req+[filter_method(['GET'])], **kwargs)",
"def do_GET(self):\n self.parameters = self.__parse_request()\n self.request_path = self.parameters.get(\"path\")\n self.GET()",
"def perform_get_request():\n url = 'https://httpbin.org/get'\n response = requests.get(url)\n return response",
"def http_GET(self, request):\n return self.render(request)",
"def _get(self, url, params=None):\n response = requests.get(url, params=params)\n logging_level = self._get_logging_level(response.status_code)\n self.logger.log(logging_level, '{status} - GET {url}'.format(status=response.status_code, url=response.url))\n return response",
"def http_get(self, **kwargs):\n return self.rabjcallable.get(**kwargs)",
"def GET(self):\n pass",
"def get(self, url, data, code=200):\n self.register(method=\"GET\", url=url, response=self.Response(data=data, code=code))",
"def simulate_get(app, path, **kwargs) -> _ResultBase:\n\n return simulate_request(app, 'GET', path, **kwargs)",
"def get(url, *args, **kwargs):\n return requests.get(localize_url(url), *args, **kwargs)",
"def _request_other(self, url, get=None):\n\t\tif get is None:\n\t\t\tget = {}\n\n\t\treturn self.urlopen(\"%s?%s\" % (url, urlencode(get)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrapper for requests POST method
|
def _post(self, *args, **kwargs):
return self._request('post', *args, **kwargs)
|
[
"def post_request(dummy_request):\n dummy_request.method = \"POST\"\n return dummy_request",
"def post_requests():",
"def http_method_post():\n return 'POST'",
"def http_post(self, **kwargs):\n return self.rabjcallable.post(**kwargs)",
"def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)",
"def post_params(self, request):\n return Response(request.data)",
"def simulate_post(app, path, **kwargs):\n return simulate_request(app, 'POST', path, **kwargs)",
"def post(self, *args, **kw):\n kw['method'] = 'POST'\n return self.open(*args, **kw)",
"def simulate_post(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'POST', path, **kwargs)",
"def post(self, request, *args, **kwargs):\n pass",
"def _createPostRequest(self, postBody: dict) -> object:\n request = HttpRequest()\n request.method = \"POST\"\n for name,value in postBody.items():\n request.POST[name]= value\n return request",
"def make_post_request(client, endpoint, data):\n return client.post(endpoint, data=data)",
"def post_required(func):\n def post_wrapper(request,*args,**kwds):\n res = http.ResponseBuilder()\n if request.method != 'POST':\n return res.error(\"post is required\").build_json()\n return func(request,*args,**kwds)\n return post_wrapper",
"def make_post_request(url:str, post_params:dict, **kwargs):\n\n print(\"Making call to '{}'...\".format(url))\n resp = requests.post(url, data=post_params, **kwargs)\n print(\"Received response.\")\n\n if not resp.ok:\n return False, resp.status_code, json.loads(resp.content)\n\n return True, resp.status_code, json.loads(resp.content)",
"def post(self, path, req = None, **kwargs):\n req = req or []\n return self.route(path, req=req+[filter_method(['POST'])], **kwargs)",
"def post(self, request, *args, **kwargs):\n return self.get(request, *args, **kwargs)",
"def do_POST(self):\r\n self.do_GET()",
"def _post(self, r, data):\n soup = BeautifulSoup(r.content, \"html.parser\")\n query = {}\n for field in soup.form.find_all(\"input\"):\n if field.get(\"type\") == \"hidden\":\n query[field.get(\"name\")] = field.get(\"value\")\n\n query.update(data)\n r = self.session.post(soup.form.get(\"action\"),\n headers = self._http_headers(r),\n data = query)\n return r",
"def post_req(self, endpoint : str, data : dict):\r\n return self.session.post('https://127.0.0.1:' + self.app_port + endpoint, data = data, verify = False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrapper for requests PUT method
|
def _put(self, *args, **kwargs):
return self._request('put', *args, **kwargs)
|
[
"def http_method_put():\n return 'PUT'",
"def do_PUT(self,):\n self.http_method = 'PUT'\n # Nothing to do for now.\n pass",
"def put(self, *args, **kw):\n kw['method'] = 'PUT'\n return self.open(*args, **kw)",
"def http_put(self, **kwargs):\n return self.rabjcallable.put(**kwargs)",
"def put(self, *args, **kwargs):\n return self.handle_put_request()",
"def simulate_put(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'PUT', path, **kwargs)",
"def do_PUT(self):\n # pylint: disable=invalid-name\n self._respond()",
"def put(self, path, body=\"\", **kwargs):\n return self.request(path, \"PUT\", body, **kwargs)",
"async def _put_request(self, url, data=None):\n return await self._request('PUT', url, data=data)",
"def simulate_put(app, path, **kwargs):\n return simulate_request(app, 'PUT', path, **kwargs)",
"def test_put_method(self):\n self.getPage('/blah', method='PUT')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')\n self.assertBody('{\"mystring\": \"blah\"}')",
"def add_put(self, *args, **kwargs):\n return self.add_route(hdrs.METH_PUT, *args, **kwargs)",
"def put(self, path, request):\n\n try:\n data = json_decode(request.body)\n self.interface_data.set(path, data)\n response = self.interface_data.get(path, False)\n status_code = 200\n except MetadataParameterError as e:\n response = {'error': str(e)}\n status_code = 400\n except (TypeError, ValueError) as e:\n response = {'error': 'Failed to decode PUT request body: {}'.format(str(e))}\n status_code = 400\n return ApiAdapterResponse(response, status_code=status_code)",
"def simulate_put(self, path='/', **kwargs):\n return self.simulate_request('PUT', path, **kwargs)",
"def proxy_http_put(\n self, path: str, port: Optional[int] = None, **kwargs\n ) -> Response:\n return self.proxy_http_request(\"PUT\", path, port, **kwargs)",
"def test_client_can_do_put_request(self):\n response = self.httpbin_4.test_requests_put_method()\n self.assertEqual(response.request.method, 'PUT')\n self.assertEqual(response.status_code, 200)",
"def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})",
"def put_requests():",
"def put(self, url, headers=None, data=None):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrapper for requests DELETE method
|
def _delete(self, *args, **kwargs):
return self._request('delete', *args, **kwargs)
|
[
"def http_delete(self, **kwargs):\n return self.rabjcallable.delete(**kwargs)",
"def httpDelete(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('DELETE', url, data, params, headers)",
"def perform_delete_request():\n url = 'https://httpbin.org/delete'\n pass",
"async def _delete_request(self, url, data=None):\n return await self._request('DELETE', url, data=data)",
"def do_DELETE(self,):\n self.http_method = 'DELETE'\n self.response()",
"def delete(self):\n \n return self.post(method='delete')",
"def delete(self, url, headers=None, data=None):\n pass",
"def send_http_delete_request(url, args = None):\n\tresponse = requests.delete(url = url, params = args)\n\treturn response.json()",
"def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)",
"def _delete(self, url, **kwargs):\n return self._http.delete(self.cluster + url, timeout=self.timeout, **kwargs)",
"def proxy_http_delete(\n self, path: str, port: Optional[int] = None, **kwargs\n ) -> Response:\n return self.proxy_http_request(\"DELETE\", path, port, **kwargs)",
"def simulate_delete(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'DELETE', path, **kwargs)",
"def test_client_can_do_delete_request(self):\n response = self.httpbin_4.test_requests_delete_method()\n self.assertEqual(response.request.method, 'DELETE')\n self.assertEqual(response.status_code, 200)",
"def json_delete(self, *args, **kwargs):\n return self._do_json_method(self.http_delete, *args, **kwargs)",
"def simulate_delete(self, path='/', **kwargs):\n return self.simulate_request('DELETE', path, **kwargs)",
"def _delete_request(self, client: Client, headers=None):\n if headers is None:\n headers = {}\n return client.delete('/api/books/1/', **headers)",
"def simulate_delete(app, path, **kwargs):\n return simulate_request(app, 'DELETE', path, **kwargs)",
"def delete(self, *arg, headers=None, **kwargs):\n if headers is None:\n headers = self._headers\n if self.base_url:\n return self._session.delete(self.base_url + arg[0], headers=headers, **kwargs)\n return self._session.delete(*arg, headers=headers, **kwargs)",
"def delete(self, url, headers=None):\r\n return self._request('DELETE', url, headers=headers)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load JSON as a protobuf (pb2) object. Any calls to load protobuf objects from JSON in this repository should be through this function. Returns `None` if the loading failed.
|
def open_pbobject(path, pb_class):
assert path.endswith(".json"), 'File extension for {} needs to be json.'.format(path)
if path.startswith('s3://'):
return open_remote_pb_object(path, pb_class)
assert os.path.exists(path), f'Path not found: {path}'
with open(path, 'r', encoding='UTF-8') as json_file:
pb_object = Parse(json_file.read(), pb_class())
return pb_object
|
[
"def _parse_json(\n protocol_contents: str, filename: str = None) -> JsonProtocol:\n protocol_json = json.loads(protocol_contents)\n version, validated = validate_json(protocol_json)\n return JsonProtocol(\n text=protocol_contents, filename=filename, contents=validated,\n schema_version=version)",
"def _proto2object(\n proto: LoadObjectMessage_PB,\n ) -> \"LoadObjectMessage\":\n\n return LoadObjectMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )",
"def json_load(msg):\r\n return json.loads(msg)",
"def deserialize(cls, file: IO[str]) -> 'JobBoardModel':\n jobBoard: JobBoardModel = json.load(file, object_hook=cls.fromDict)\n return jobBoard",
"def read_hub_from_json(self):\n try:\n return ServiceWrapper.from_json_file(self.hub_path)\n except JSONDecodeError:\n # local JSON blob might be invalid\n # see e.g. https://github.com/storyscript/sls/issues/191\n return None\n except OSError:\n # reading local JSON blob might fail\n # see e.g. https://github.com/storyscript/sls/issues/195\n return None",
"def load_from_json(json_data):\n\t\treturn Contact.load_from_map(json.loads(json_data))",
"def de_json(cls: Type[TO], data: Optional[JSONDict], bot: 'Bot') -> Optional[TO]:\n data = cls._parse_data(data)\n\n if data is None:\n return None\n\n if cls == TelegramObject:\n return cls()\n return cls(bot=bot, **data) # type: ignore[call-arg]",
"def load_json(self):\n try:\n with open(self.json_file) as data_file:\n self.data = json.load(data_file)\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise",
"def jsonpickle_load(filename):\n f = open(filename)\n json_str = f.read()\n obj = jsonpickle.decode(json_str, keys=True) # Use_jsonpickle=True used to prevent jsonPickle from encoding dictkeys to strings.\n return obj",
"def load_json(json_string):\n return json.loads(json_string)",
"def load(path: str) -> \"DataDescriptor\":\n\n\t\twith open(path, \"r\") as f:\n\t\t\tinfo_dict = json.load(f)\n\n\t\treturn DataDescriptor(\n\t\t\tn_gram_size=int(info_dict[\"n_gram_size\"]),\n\t\t\tcaseless=bool(info_dict[\"caseless\"]),\n\t\t\tignore_punctuation=bool(info_dict[\"ignore_punctuation\"]),\n\t\t\tadd_pos_tags=bool(info_dict[\"add_pos_tags\"]),\n\t\t\tuses_lemma=bool(info_dict[\"uses_lemma\"]),\n\t\t\tuses_sentences=bool(info_dict[\"uses_sentences\"])\n\t\t)",
"def load(fp, object_hook=object_hook, **kwargs):\n return json.load(fp, object_hook=object_hook, **kwargs)",
"def from_json(cls, json_str: str) -> V1Hook:\n return cls.from_dict(json.loads(json_str))",
"def _proto2object(\n proto: SaveObjectMessage_PB,\n ) -> \"SaveObjectMessage\":\n\n return SaveObjectMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n )",
"def decodeJson(json_text: Union[str, bytes], obj: PybindBase):\n json_text = removeOpenConfigPrefix(json_text)\n pybindJSONDecoder.load_ietf_json(json.loads(json_text), None, None, obj)",
"def _load_pipeline_proto(filename):\n pipeline_proto = pipeline_pb2.Pipeline()\n with tf.gfile.GFile(filename, 'r') as fp:\n text_format.Merge(fp.read(), pipeline_proto)\n return pipeline_proto",
"def _load_message(self,\n message_pb: descriptor_pb2.DescriptorProto,\n address: metadata.Address,\n path: Tuple[int],\n resources: Mapping[str, wrappers.MessageType],\n ) -> wrappers.MessageType:\n address = address.child(message_pb.name, path)\n\n # Load all nested items.\n #\n # Note: This occurs before piecing together this message's fields\n # because if nested types are present, they are generally the\n # type of one of this message's fields, and they need to be in\n # the registry for the field's message or enum attributes to be\n # set correctly.\n nested_enums = self._load_children(\n message_pb.enum_type,\n address=address,\n loader=self._load_enum,\n path=path + (4,),\n resources=resources,\n )\n nested_messages = self._load_children(\n message_pb.nested_type,\n address=address,\n loader=self._load_message,\n path=path + (3,),\n resources=resources,\n )\n\n oneofs = self._get_oneofs(\n message_pb.oneof_decl,\n address=address,\n path=path + (7,),\n )\n\n # Create a dictionary of all the fields for this message.\n fields = self._get_fields(\n message_pb.field,\n address=address,\n path=path + (2,),\n oneofs=oneofs,\n )\n fields.update(self._get_fields(\n message_pb.extension,\n address=address,\n path=path + (6,),\n oneofs=oneofs,\n ))\n\n # Create a message correspoding to this descriptor.\n self.proto_messages[address.proto] = wrappers.MessageType(\n fields=fields,\n message_pb=message_pb,\n nested_enums=nested_enums,\n nested_messages=nested_messages,\n meta=metadata.Metadata(\n address=address,\n documentation=self.docs.get(path, self.EMPTY),\n ),\n oneofs=oneofs,\n )\n return self.proto_messages[address.proto]",
"def testLoadProtojsonWithValidJsonModule(self):\n sys.modules['json'] = ValidJsonModule\n\n # This will cause protojson to reload with the default json module\n # instead of simplejson.\n reload(protojson)\n self.assertEquals('json', protojson.json.name)",
"def deserialize(cls, json_str):\n return cls.deserialize_json(json.loads(json_str))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Like open_pboject but source can be a path or a bytestring
|
def parse_pbobject(source, pb_class):
if isinstance(source, str):
return open_pbobject(source, pb_class)
elif isinstance(source, bytes):
pb_object = pb_class()
pb_object.ParseFromString(source)
return pb_object
else:
logging.error(f'cannot parse type {type(source)}')
|
[
"def _open(self, source):\r\n if hasattr(source, 'read'):\r\n return source\r\n else:\r\n from io import StringIO\r\n return StringIO(source)",
"def load_object(source):\n print(\"Loading pickle object\")\n with open(source, 'rb') as s:\n return pickle.load(s)",
"def open_pds(source):\n\t# if isinstance(source, file):\n\t# \treturn source\n\tif hasattr(source, \"read\"):\n\t\t# sys.stderr.write(\"Identified a file-like object by read() method existence\\n\")\n\t\treturn source\n\n\ttry:\n\t\t# For universal newlines -- i.e. newlines are automatically converted to \"\\n\", use mode \"U\".\n\t\t# For preserved newlines -- e.g. \"\\r\", \"\\r\\n\", \"\\n\", use mode \"rb\".\n\t\t# PDS style newlines are \"\\r\\n\", however, http://pds.jpl.nasa.gov/documents/qs/sample_image.lbl uses \"\\n\".\n\t\t# Check if hasattr(open, 'newlines') to verify that universal newline support is enabeled.\n\t\tf = open(source, \"rb\")\n\t\treturn f\n\texcept (IOError, OSError):\n\t\t# sys.stderr.write(\"Could not open source\\n\")\n\t\traise\n\telse:\n\t\t# sys.stderr.write(\"Opened source\\n\")\n\t\t# Re-raise to catch something hairy.\n\t\traise\n\tfinally:\n\t\tpass\n\t\t# sys.stderr.write(\"Closing previously opened file\\n\")\n\t\t# f.close()\n\t\t\n\tif isinstance(source, str):\n\t\ttry:\n\t\t\timport cStringIO as StringIO\n\t\texcept ImportError:\n\t\t\timport StringIO\n\t\telse:\n\t\t\t# sys.stderr.write(\"Making a file-like object from string source\\n\")\n\t\t\treturn StringIO.StringIO(str(source))\n\t\t\t\n\t# try:\n\t# \timport urllib\n\t# \tf = urllib.urlopen(source)\n\t# \treturn f\n\t# except (IOError, OSError):\n\t# \tpass\n\t# else:\n\t# \t# Re-raise to catch something hairy.\n\t# \traise\n\t# finally:\n\t# \tpass",
"def load_stream(source):\n raise NotImplementedError(\"not implemented yet\")",
"def test_prepare_source(source):\n assert isinstance(PseudoPotentialData.prepare_source(source), io.BytesIO)\n\n if isinstance(source, io.BytesIO):\n # If we pass a bytestream, we should get the exact same back\n assert PseudoPotentialData.prepare_source(source) is source",
"def dumpIO_source(object, **kwds):\n from .source import importable, getname\n if PY3:\n from io import BytesIO as StringIO\n else:\n from StringIO import StringIO\n alias = kwds.pop('alias', '') #XXX: include an alias so a name is known\n name = str(alias) or getname(object)\n name = \"\\n#NAME: %s\\n\" % name\n #XXX: assumes kwds['dir'] is writable and on $PYTHONPATH\n file = StringIO()\n file.write(b(''.join([importable(object, alias=alias),name])))\n file.flush()\n return file",
"def test_open_file_object(self):\n test_source = unittest.source\n if not test_source:\n raise unittest.SkipTest(\"missing source\")\n\n if not os.path.isfile(test_source):\n raise unittest.SkipTest(\"source not a regular file\")\n\n ${library_name_suffix}_${type_name} = ${python_module_name}.${type_name}()",
"def __init__(self, source=None):\n if source is not None:\n if isinstance(source, (str, unicode)):\n if os.path.isfile(source):\n self.from_file(source)\n elif hasattr(self, \"_FILE_NAME_\"):\n self.from_file(os.path.join(source, self._FILE_NAME_))\n else:\n raise IOError(\"Could not open result {}\".format(source))\n elif isinstance(source, dict):\n self.from_dict(source)\n else:\n self.from_file(source)",
"def make(self, source):\n if isinstance(source, str):\n return copy(self.get(source))\n elif self.PB_CLASS and isinstance(source, self.PB_CLASS):\n item = copy(self.get(source.name))\n item._pb = source\n return item\n else:\n return copy(source)",
"def psource(self,obj,oname=''):\n try:\n src = inspect.getsource(obj) \n except:\n self.noinfo('source',oname)\n else:\n page(self.format(src))",
"def _source_path_reader(self, src, encoding=\"utf-8\"):\n if src is None:\n return src\n if isinstance(src, dict) and \"content\" in src:\n with tempfile.NamedTemporaryFile(mode=\"w\", encoding=encoding, delete=False) as fp:\n fp.write(src[\"content\"])\n return fp.name\n elif isinstance(src, dict) and \"file\" in src:\n if os.path.exists(src[\"file\"]) is False:\n raise FileNotFound(src)\n return src[\"file\"]\n else:\n raise InvalidParameter(\"The parameter is invalid.\")",
"def create_outside_firmware_source(parent_mo, source, **args):\n args = args['optional_args'] if 'optional_args' in args.keys() else args\n\n # Create mo\n protocol = get_value(args, 'protocol', DEFAULT_PROTOCOL)\n if protocol != 'scp':\n args['source_user'] = DEFAULT_NONE\n args['source_password'] = DEFAULT_NONE\n firmware_osource = OSource(parent_mo, source, proto=protocol,\n url=get_value(args, 'url', DEFAULT_NONE),\n user=get_value(args, 'source_user', DEFAULT_NONE),\n password=get_value(args, 'source_password', DEFAULT_NONE))\n return firmware_osource",
"def Sourceify(path):\n return path",
"def _load_source(self):\r\n pass",
"def openStream(self, source):\n # Already a file object\n if hasattr(source, 'read'):\n stream = source\n else:\n # Otherwise treat source as a string and convert to a file object\n if isinstance(source, unicode):\n source = source.encode('utf-8')\n self.charEncoding = (\"utf-8\", \"certain\")\n import cStringIO\n stream = cStringIO.StringIO(str(source))\n return stream",
"def get_source_object(self): # real signature unknown; restored from __doc__\n pass",
"def CSourceToInput(source: str) -> str:\n bytecode = CSourceToBytecode(source)\n return bytecode",
"def from_source(cls, *args, **kwargs):\n raise NotImplementedError",
"def __init__(self,source):\n if not isinstance(source, basestring):\n try: source=source.getvalue()\n except AttributeError: \n raise InvalidSource(source)\n if source.find('<XTSM')==-1:\n try: source=urllib.urlopen(source).read()\n except IOError: \n try: source=urllib.urlopen('file:'+source).read()\n except IOError: InvalidSource(source)\n self.XTSM = gnosis.xml.objectify.make_instance(source)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save protobuf (pb2) object to JSON file with our standard indent, key ordering, and other settings. Any calls to save protobuf objects to JSON in this repository should be through this function.
|
def save_pbobject_as_json(pb_object, save_path):
if os.path.isdir(save_path):
save_path = os.path.join(save_path, generate_uid_from_pbobject(pb_object) + ".json")
assert save_path.endswith(".json"), 'File extension for {} needs to be json.'.format(save_path)
with open(save_path, "w", encoding='UTF-8') as _f:
json.dump(
MessageToDict(pb_object, including_default_value_fields=True, preserving_proto_field_name=True),
_f,
indent=2,
sort_keys=True
)
return save_path
|
[
"def _save(self):\n json_file = json.dumps(self.data, separators=(',', ':'))\n with open(self.path2save, \"w\") as outfile:\n outfile.write(json_file)\n outfile.close()",
"def save(self):\n tojson = {}\n for key in self.__objects:\n tojson[key] = self.__objects[key].to_dict()\n with open(self.__file_path, \"w\") as f:\n json.dump(tojson, f)",
"def _object2proto(self) -> SaveObjectMessage_PB:\n return SaveObjectMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n )",
"def save(self):\n d1 = {}\n with open(self.__file_path, mode=\"w\") as f:\n for k, v in self.__objects.items():\n d1[k] = v.to_dict()\n json.dump(d1, f)",
"def save(self):\n print(\"saving json file:\", self.json_path)\n with open(self.json_path, 'w') as outfile:\n outfile.write( json.dumps(self.json_data, indent=2) )",
"def save(self):\n with open(self.__file_path, \"w\") as f:\n f.write(json.dumps(self.__objects))",
"def save(self):\n data = json.dumps(self, default=lambda o: o.__dict__)\n with open(self.wallet_path, 'w') as f:\n f.write(data)",
"def write(self, _filepath=None):\n _json_txt = json.dumps(self.json_dict, indent=2)\n self._write_json_text(_json_txt, _filepath)",
"def jsonpickle_save(obj, filename):\n f = open(filename+\".json_pickle\", 'w')\n json_obj = jsonpickle.encode(obj, keys=True) # Use_jsonpickle=True used to prevent jsonPickle from encoding dictkeys to strings.\n f.write(json_obj)\n f.close()",
"def save_to_json(filename, struct):\n with open(filename, 'w') as outfile:\n json.dump(struct, outfile, sort_keys=True, indent=4)",
"def _write_json(self):\n with open(self._file_path, 'w') as f:\n json.dump(self._content, f, indent=4, separators=None,\n encoding='utf-8', sort_keys=False)",
"def WriteProtoFile(self, printer):\n self.Validate()\n extended_descriptor.WriteMessagesFile(\n self.__file_descriptor, self.__package, self.__client_info.version,\n printer)",
"def save_to_json_file(my_obj, filename):\n import json\n with open(filename, 'w', encoding='utf-8') as f:\n obj = json.dumps(my_obj)\n f.write(obj)",
"def write_custom_json(filepath, obj):\n\n with open(filepath, 'w', encoding='utf-8') as file_obj:\n json.dump(obj, file_obj, cls=CustomEncoder, ensure_ascii=False, indent=2)",
"def save(obj, filename, format = \"JSON\"):\n if format == \"Python\":\n s = str(obj)\n else:\n s = json.dumps(obj, indent=2)\n open(filename,'w').write(s)\n return",
"def SaveToJSON(self):\n import json\n\n f = open(f\"/Cache/{self.symbol}.JSON\", \"w\")\n j = {\"name\": self.name, \"symbol\": self.symbol}\n\n f.write(\"{\\\"name\\\":\\\"\" + str(self.name) + \"\\\", \")\n f.write(json.dumps(j))\n f.close()\n\n print(\"Warning: SaveToJSON not fully implemented.\")",
"def write_json(path, filename, object):\n with open(path + '/' + filename + '.' + 'json', 'w') as data_file:\n data_file.write(json.dumps(object, sort_keys=False, indent=2))\n data_file.close()",
"def write(self):\r\n\r\n with open(self.filename + \".json\", mode='w') as json_file:\r\n json.dump(self.data, json_file, separators=(',', ':'))",
"def write_to_json(self):\r\n logging.info('Writing records to JSON')\r\n with open(self.backup, 'w') as fp:\r\n json.dump(self.record, fp)\r\n logging.info(\"Finished writing records to JSON\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Open ontology objects, first attempt to open V2 before trying V1.
|
def open_ontology_pbobject(ontology_file):
try:
ontology = parse_pbobject(ontology_file, OntologyV2Pb2)
if ontology is not None:
logging.info('Successfully loaded Ontology V2 spec.')
return ontology
except Exception:
logging.error('Failed to load ontology file with V2 spec, trying V1 spec.')
try:
ontology = parse_pbobject(ontology_file, OntologyV1Pb2)
if ontology is not None:
logging.info('Successfully loaded Ontology V1 spec.')
return ontology
except Exception:
if isinstance(ontology_file, str):
logging.error('Failed to load ontology file' + ontology_file + 'with V1 spec also, returning None.')
else:
logging.error('Failed to load ontology file with V1 spec also, returning None.')
|
[
"def test_read_owl2():\n t = Transformer()\n s = OwlSource(t)\n\n g = s.parse(\n os.path.join(RESOURCE_DIR, \"goslim_generic.owl\"),\n provided_by=\"GO slim generic\",\n knowledge_source=\"GO slim generic\",\n )\n nodes = {}\n edges = {}\n for rec in g:\n if rec:\n if len(rec) == 4:\n edges[(rec[0], rec[1])] = rec[3]\n else:\n nodes[rec[0]] = rec[1]\n\n n1 = nodes[\"GO:0008150\"]\n assert n1[\"name\"] == \"biological_process\"\n assert \"has_exact_synonym\" in n1\n assert \"description\" in n1\n assert \"comment\" in n1\n assert \"has_alternative_id\" in n1\n assert \"GO slim generic\" in n1[\"provided_by\"]\n\n n2 = nodes[\"GO:0003674\"]\n n2[\"name\"] = \"molecular_function\"\n assert \"has_exact_synonym\" in n2\n assert \"description\" in n2\n assert \"comment\" in n2\n assert \"has_alternative_id\" in n2\n assert \"GO slim generic\" in n2[\"provided_by\"]\n\n n3 = nodes[\"GO:0005575\"]\n n3[\"name\"] = \"cellular_component\"\n assert \"has_exact_synonym\" in n3\n assert \"description\" in n3\n assert \"comment\" in n3\n assert \"has_alternative_id\" in n3\n assert \"GO slim generic\" in n3[\"provided_by\"]\n\n e1 = edges[\"GO:0008289\", \"GO:0003674\"]\n assert e1[\"subject\"] == \"GO:0008289\"\n assert e1[\"predicate\"] == \"biolink:subclass_of\"\n assert e1[\"object\"] == \"GO:0003674\"\n assert e1[\"relation\"] == \"rdfs:subClassOf\"\n assert \"GO slim generic\" in e1[\"knowledge_source\"]",
"def load_owl(self):\n \n for owl_file in self.owl_files.split(\":\"):\n\n # parse the owl file\n g = rdflib.Graph()\n try:\n g.parse(owl_file, format='xml')\n except Exception as e:\n raise rdflib.OWLException(\"Parsing failed!\")\n \n # add data to the local storage\n self.local_storage += g",
"def test_read_owl3():\n node_property_predicates = {\"http://www.geneontology.org/formats/oboInOwl#inSubset\"}\n predicate_mappings = {\n \"http://www.geneontology.org/formats/oboInOwl#inSubset\": \"subsets\",\n \"http://www.geneontology.org/formats/oboInOwl#hasOBONamespace\": \"namespace\",\n \"http://www.geneontology.org/formats/oboInOwl#hasAlternativeId\": \"xref\",\n }\n\n t = Transformer()\n source = OwlSource(t)\n\n source.set_predicate_mapping(predicate_mappings)\n source.set_node_property_predicates(node_property_predicates)\n g = source.parse(filename=os.path.join(RESOURCE_DIR, \"goslim_generic.owl\"))\n nodes = {}\n edges = {}\n for rec in g:\n if rec:\n if len(rec) == 4:\n key = (rec[0], rec[1])\n if key in edges:\n edges[key].append(rec[-1])\n else:\n edges[key] = [rec[-1]]\n else:\n nodes[rec[0]] = rec[-1]\n\n n1 = nodes[\"GO:0008150\"]\n pprint(n1)\n assert n1[\"name\"] == \"biological_process\"\n assert \"subsets\" in n1 and \"GOP:goslim_generic\" in n1[\"subsets\"]\n assert \"has_exact_synonym\" in n1\n assert \"description\" in n1\n assert \"comment\" in n1\n assert \"xref\" in n1 and \"GO:0044699\" in n1[\"xref\"]\n\n n2 = nodes[\"GO:0003674\"]\n n2[\"name\"] = \"molecular_function\"\n assert \"subsets\" in n2 and \"GOP:goslim_generic\" in n2[\"subsets\"]\n assert \"has_exact_synonym\" in n2\n assert \"description\" in n2\n assert \"comment\" in n2\n assert \"xref\" in n2 and \"GO:0005554\" in n2[\"xref\"]\n\n n3 = nodes[\"GO:0005575\"]\n n3[\"name\"] = \"cellular_component\"\n assert \"subsets\" in n3 and \"GOP:goslim_generic\" in n3[\"subsets\"]\n assert \"has_exact_synonym\" in n3\n assert \"description\" in n3\n assert \"comment\" in n3\n assert \"xref\" in n3 and \"GO:0008372\" in n3[\"xref\"]\n\n e1 = edges[\"GO:0008289\", \"GO:0003674\"][0]\n assert e1[\"subject\"] == \"GO:0008289\"\n assert e1[\"predicate\"] == \"biolink:subclass_of\"\n assert e1[\"object\"] == \"GO:0003674\"\n assert e1[\"relation\"] == \"rdfs:subClassOf\"",
"def open_feature_ontology_pbobject(ontology_file):\n try:\n ontology = open_pbobject(ontology_file, FeatureOntologyPb2)\n if ontology is not None:\n logging.info('Successfully loaded FeatureOntology spec.')\n return ontology\n except Exception:\n logging.error('Failed to load ontology file' + ontology_file + '.')",
"def multi_open(name, base_url='http://library.metatab.org/', print_ref=False):\n from metapack.exc import MetatabFileNotFound\n from rowgenerators.exceptions import AppUrlError\n\n r = None\n\n refs = [\n name,\n 'index:' + name,\n 'index:' + remove_version(name),\n base_url + name + '.csv',\n base_url + remove_version(name) + '.csv',\n ]\n\n for ref in refs:\n\n try:\n r = open_package(ref)\n if print_ref:\n print(\"Opening: \", ref)\n return r\n except (MetatabFileNotFound, AppUrlError):\n pass\n\n return None",
"def test1_loading(self):\n\t\tprint \"\\nTEST 1: Loading ontologies from %s folder.\\n=================\" % DATA_FOLDER\n\t\t\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\t\t\n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\t\n\t\t\t\tself.assertEqual(type(o), ontospy.Ontology)\n\t\t\t\tprint \"Success.\"",
"def __init__(self, ontology: str, *, ols_base: Optional[str] = None, auth: Optional[Tuple[str, str]] = None):\n self.ontology = ontology\n self.ols_client = OlsClient(ols_base=ols_base)\n\n if auth is not None:\n self.auth = auth\n elif 'ARTY_USERNAME' in os.environ and 'ARTY_PASSWORD' in os.environ:\n self.auth = (os.environ['ARTY_USERNAME'], os.environ['ARTY_PASSWORD'])\n else:\n self.auth = None\n\n self.metadata = self.ols_client.get_ontology(self.ontology)\n\n if self.metadata['status'] == 404:\n raise ValueError('Error from OLS:\\n{}'.format(json.dumps(self.metadata, indent=2)))",
"def ontologyFileWORepo(self, url: GitUrlParser, reasonerSelected: bool):\n returnObject = {}\n try:\n getOntologyResponse = requests.get(\"http://\" + url.file, timeout=settings.SINGLEONTOLOGYRETRIEVETIMOUT)\n ontology = getOntologyResponse.text\n returnObject[\"Size\"] =len(getOntologyResponse.content)\n except:\n returnObject[\"Size\"] = 0\n returnObject[\"ReadingError\"] = \"Ontology Source not Available\"\n opi = OpiHandler()\n \n \n returnObject[\"reasonerActive\"] = reasonerSelected\n if \"ontology\" in locals():\n try:\n opiMetrics = opi.opiOntologyRequest(\n ontologyString=ontology, classMetrics=False, ontologySize=returnObject[\"Size\"] , reasoner=reasonerSelected)\n if \"GeneralOntologyMetrics\" in opiMetrics:\n opiMetrics.update(opiMetrics.pop(\"GeneralOntologyMetrics\"))\n returnObject.update(opiMetrics)\n self.logger.debug(\"Ontology Analyzed Successfully\")\n except IOError:\n # A reading Error occurs, e.g., if an ontology does not conform to a definied ontology standard and cannot be parsed\n self.logger.warning(\n \"Ontology {0} not Readable \".format(url.file))\n \n returnObject[\"ReadingError\"] = \"Ontology not Readable\"\n \n ontologyFile = OntologyFile.objects.filter(fileName=url.file)\n if not ontologyFile.exists():\n ontologyFile = OntologyFile.objects.create(fileName=url.file)\n else:\n ontologyFile = ontologyFile[0]\n commit = Commit.objects.filter(metricSource=ontologyFile, reasonerActive = reasonerSelected)\n if not commit.exists():\n Commit.objects.create(metricSource = ontologyFile, **returnObject)\n else:\n commit = Commit(metricSource = ontologyFile, **returnObject, pk=commit[0].id)\n commit.save()",
"def __init__(self, short_term: str):\n if type(short_term) is not str:\n raise TypeError(\"The ontology object can only be initialzed with a string value\")\n self.short_term = short_term\n host = \"http://www.ebi.ac.uk/ols/api/terms?id=\" + short_term\n request = requests.get(host)\n\n response = request.json()\n num = response['page']['totalElements']\n\n if num:\n if num > 20:\n host = host + \"&size=\" + str(num)\n request = requests.get(host)\n response = request.json()\n terms = response['_embedded']['terms']\n for term in terms:\n if term['is_defining_ontology']:\n self.found = True\n self.detail = term\n else:\n logger.error(\"Could not find information for \" + short_term)",
"def load_ontology(self):\n with open(self.ontology_file_path) as ont_file:\n ontology = json.load(ont_file)\n return ontology",
"def svn_ra_open2(*args):\r\n return _ra.svn_ra_open2(*args)",
"def do_open(self, args):\n \n args = self.ParseArguments(args)\n \n if len(args) == 0:\n self.perror(\"No version specified.\")\n return\n if len(args) == 1:\n self.perror(\"No freecad specified.\")\n return\n\n conn = fiepipelib.assetdata.assetdatabasemanager.GetConnection(self.GetGitWorkingAsset())\n db = self.GetMultiManager()\n man = self.GetManager(db)\n db.AttachToConnection(conn)\n \n version = self.GetItemByName(args[0], man, conn)\n \n if version == None:\n self.perror(\"Version does not exist.\")\n return\n \n if not version.FileExists():\n self.perror(\"File does not exist.\")\n return\n \n fcman = fiepipefreecad.freecad.FreeCADLocalManager(self.GetAssetShell()._localUser)\n freecads = fcman.get_by_name(args[1])\n\n if len(freecads) == 0:\n self.perror(\"No such freecad.\")\n return\n \n freecad = freecads[0]\n assert isinstance(freecad, fiepipefreecad.freecad.FreeCAD)\n freecad.LaunchInteractive(filepaths=[version.GetAbsolutePath()])",
"def __init__(self, ontology):\n\n self.ontology = ontology",
"def get_ontology(base_iri='emmo-inferred.owl', verbose=False):\n if (not base_iri.endswith('/')) and (not base_iri.endswith('#')):\n base_iri = '%s#' % base_iri\n if base_iri in owlready2.default_world.ontologies:\n onto = owlready2.default_world.ontologies[base_iri]\n else:\n onto = Ontology(owlready2.default_world, base_iri)\n onto._verbose = verbose\n return onto",
"def ontology():\n return OKJSONResponse(ONTOLOGY)",
"def get_ontology(path=ONTOLOGY_PATH, recreate=False, verbose=True):\n tick = time.clock()\n ontology = OntologyFactory(path, recreate=recreate)\n if verbose:\n print(\"Ontology loading time: {0:.2f}s\".format(time.clock() - tick))\n\n return ontology",
"def import_from_file(self):\n\n onto_path.append(self.config.ontology_file)\n\n self.ontology = get_ontology(\"file://\" + self.config.ontology_file).load()\n\n with self.ontology:\n\n # Redefine relevant classes and relationships needed for retrieving the knowledge from the ontology\n class Feature(Thing):\n pass\n\n class RelevanceScore(Thing):\n pass\n\n class Component(Thing):\n pass\n\n # Retrieve features and store the names with the correct indices (matches self.dataset.feature_names_all,\n # which is not used in order to emphasize the ontology approach)\n features = []\n for feature in Feature.instances():\n f_name = feature.name\n f_index = feature.has_index_in_data[0]\n features.append((f_name, f_index))\n\n self.feature_names = np.empty(len(features), dtype=object)\n\n for f_name, f_index in features:\n self.feature_names[f_index] = f_name\n self.feature_name_to_index[f_name] = f_index\n\n # Retrieve component individuals and create a dictionary entry that stores the feature relevance vector for\n # this component. Note that the array is filled with the configured default\n # value of relevance when none is explicitly defined\n for component in Component.instances():\n relevance_vector = np.full(self.feature_names.shape[0],\n fill_value='e', dtype='str')\n self.relevance_knowledge_str[component.name] = relevance_vector\n\n # Fill previously created relevance vectors with the relevance scores in defined in the ontology\n for rs in RelevanceScore.instances():\n comp_name = rs.of_component[0].name\n symptom = rs.of_feature[0]\n symptom_index = symptom.has_index_in_data[0]\n relevance_score = rs.has_score[0]\n\n rel_vec_comp = self.relevance_knowledge_str.get(comp_name)\n rel_vec_comp[symptom_index] = relevance_score",
"def get_ontology(base_iri='emmo-inferred.owl', verbose=False, name=None):\n\n if (not base_iri.endswith('/')) and (not base_iri.endswith('#')):\n base_iri = '%s#'%base_iri\n if base_iri in default_world.ontologies:\n onto = default_world.ontologies[base_iri]\n else:\n onto = MyOntology(default_world, base_iri, name=name)\n onto._verbose = verbose\n return onto",
"def initOpen(self):\n\n\t\tfor view in util.allViews():\n\t\t\t# ignore unnamed files and files that don't exist in the file system\n\t\t\tif view.file_name() and os.path.exists( view.file_name() ): \n\t\t\t\tself.init_view(view)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Open feature ontology objects.
|
def open_feature_ontology_pbobject(ontology_file):
try:
ontology = open_pbobject(ontology_file, FeatureOntologyPb2)
if ontology is not None:
logging.info('Successfully loaded FeatureOntology spec.')
return ontology
except Exception:
logging.error('Failed to load ontology file' + ontology_file + '.')
|
[
"def openTypeFeatures(self, features):\n #raise NotImplementedError",
"def import_from_file(self):\n\n onto_path.append(self.config.ontology_file)\n\n self.ontology = get_ontology(\"file://\" + self.config.ontology_file).load()\n\n with self.ontology:\n\n # Redefine relevant classes and relationships needed for retrieving the knowledge from the ontology\n class Feature(Thing):\n pass\n\n class RelevanceScore(Thing):\n pass\n\n class Component(Thing):\n pass\n\n # Retrieve features and store the names with the correct indices (matches self.dataset.feature_names_all,\n # which is not used in order to emphasize the ontology approach)\n features = []\n for feature in Feature.instances():\n f_name = feature.name\n f_index = feature.has_index_in_data[0]\n features.append((f_name, f_index))\n\n self.feature_names = np.empty(len(features), dtype=object)\n\n for f_name, f_index in features:\n self.feature_names[f_index] = f_name\n self.feature_name_to_index[f_name] = f_index\n\n # Retrieve component individuals and create a dictionary entry that stores the feature relevance vector for\n # this component. Note that the array is filled with the configured default\n # value of relevance when none is explicitly defined\n for component in Component.instances():\n relevance_vector = np.full(self.feature_names.shape[0],\n fill_value='e', dtype='str')\n self.relevance_knowledge_str[component.name] = relevance_vector\n\n # Fill previously created relevance vectors with the relevance scores in defined in the ontology\n for rs in RelevanceScore.instances():\n comp_name = rs.of_component[0].name\n symptom = rs.of_feature[0]\n symptom_index = symptom.has_index_in_data[0]\n relevance_score = rs.has_score[0]\n\n rel_vec_comp = self.relevance_knowledge_str.get(comp_name)\n rel_vec_comp[symptom_index] = relevance_score",
"def __init__(self, ontology):\n\n self.ontology = ontology",
"def _get_ontology_db_xrefs(self, feature):\n ontology = collections.defaultdict(dict) # type: dict\n db_xrefs = []\n # these are keys are formatted strangely and require special parsing\n for key in (\"go_process\", \"go_function\", \"go_component\"):\n ontology_event_index = self._create_ontology_event(\"GO\")\n for term in feature.get(key, []):\n sp = term.split(\" - \")\n ontology['GO'][sp[0]] = [ontology_event_index]\n self.ontologies_present['GO'][sp[0]] = self.ont_mappings['GO'].get(sp[0], '')\n\n # CATH terms are not distinct from EC numbers so myst be found by key\n for term in feature.get('cath_funfam', []) + feature.get('cath', []):\n for ref in term.split(','):\n ontology['CATH'][ref] = [self._create_ontology_event(\"CATH\")]\n self.ontologies_present['CATH'][ref] = self.ont_mappings['CATH'].get(ref, '')\n\n search_keys = ['ontology_term', 'db_xref', 'dbxref', 'product_source', 'tigrfam', 'pfam',\n 'cog', 'go', 'po', 'ko']\n ont_terms = [] # type: list\n # flatten out into list of values\n for key in search_keys:\n if key in feature:\n ont_terms += [x for y in feature[key] for x in y.split(',')]\n\n for ref in ont_terms:\n if ref.startswith('GO:'):\n ontology['GO'][ref] = [self._create_ontology_event(\"GO\")]\n self.ontologies_present['GO'][ref] = self.ont_mappings['GO'].get(ref, '')\n elif ref.startswith('PO:'):\n ontology['PO'][ref] = [self._create_ontology_event(\"PO\")]\n self.ontologies_present['PO'][ref] = self.ont_mappings['PO'].get(ref, '')\n elif ref.startswith('KO:'):\n ontology['KO'][ref] = [self._create_ontology_event(\"KO\")]\n self.ontologies_present['KO'][ref] = self.ont_mappings['KO'].get(ref, '')\n elif ref.startswith('COG'):\n ontology['COG'][ref] = [self._create_ontology_event(\"COG\")]\n self.ontologies_present['COG'][ref] = self.ont_mappings['COG'].get(ref, '')\n elif ref.startswith('PF'):\n ontology['PFAM'][ref] = [self._create_ontology_event(\"PFAM\")]\n self.ontologies_present['PFAM'][ref] = self.ont_mappings['PFAM'].get(ref, '')\n elif ref.startswith('TIGR'):\n ontology['TIGRFAM'][ref] = [self._create_ontology_event(\"TIGRFAM\")]\n self.ontologies_present['TIGRFAM'][ref] = self.ont_mappings['TIGRFAM'].get(ref, '')\n elif \":\" not in ref:\n db_xrefs.append(tuple([\"Unknown_Source\", ref]))\n else:\n db_xrefs.append(tuple(ref.split(\":\", 1)))\n return dict(ontology), db_xrefs",
"def open_shapefile(file_path):\n datasource = ogr.Open(file_path)\n layer = datasource.GetLayerByIndex(0)\n print(\"Opening {} with {} features\".format(file_path, layer.GetFeatureCount()))\n return datasource",
"def open_ontology_pbobject(ontology_file):\n try:\n ontology = parse_pbobject(ontology_file, OntologyV2Pb2)\n if ontology is not None:\n logging.info('Successfully loaded Ontology V2 spec.')\n return ontology\n except Exception:\n logging.error('Failed to load ontology file with V2 spec, trying V1 spec.')\n try:\n ontology = parse_pbobject(ontology_file, OntologyV1Pb2)\n if ontology is not None:\n logging.info('Successfully loaded Ontology V1 spec.')\n return ontology\n except Exception:\n if isinstance(ontology_file, str):\n logging.error('Failed to load ontology file' + ontology_file + 'with V1 spec also, returning None.')\n else:\n logging.error('Failed to load ontology file with V1 spec also, returning None.')",
"def open_shapefile(file_path):\n datasource = ogr.Open(file_path)\n layer = datasource.GetLayerByIndex(0)\n print(\"Opening {}\".format(file_path))\n print(\"Number of features: {}\".format(layer.GetFeatureCount()))\n return datasource",
"def load_owl(self):\n \n for owl_file in self.owl_files.split(\":\"):\n\n # parse the owl file\n g = rdflib.Graph()\n try:\n g.parse(owl_file, format='xml')\n except Exception as e:\n raise rdflib.OWLException(\"Parsing failed!\")\n \n # add data to the local storage\n self.local_storage += g",
"def __init__(self, short_term: str):\n if type(short_term) is not str:\n raise TypeError(\"The ontology object can only be initialzed with a string value\")\n self.short_term = short_term\n host = \"http://www.ebi.ac.uk/ols/api/terms?id=\" + short_term\n request = requests.get(host)\n\n response = request.json()\n num = response['page']['totalElements']\n\n if num:\n if num > 20:\n host = host + \"&size=\" + str(num)\n request = requests.get(host)\n response = request.json()\n terms = response['_embedded']['terms']\n for term in terms:\n if term['is_defining_ontology']:\n self.found = True\n self.detail = term\n else:\n logger.error(\"Could not find information for \" + short_term)",
"def ontology():\n return OKJSONResponse(ONTOLOGY)",
"def create(self, save_to_file: bool = False):\n assert dataset is not None, 'Dataset can not be None.'\n\n with self.ontology:\n\n # Define ontology classes and properties\n class Feature(Thing):\n pass\n\n class RelevanceScore(Thing):\n pass\n\n class Component(Thing):\n pass\n\n class has_index_in_data(DataProperty):\n domain = [Feature]\n range = [int]\n\n class has_score(DataProperty):\n domain = [RelevanceScore]\n range = [str]\n\n class of_component(ObjectProperty):\n domain = [RelevanceScore]\n range = [Component]\n\n class comp_has_rs(ObjectProperty):\n domain = [Component]\n range = [RelevanceScore]\n inverse_property = of_component\n\n class feature_has_rs(ObjectProperty):\n domain = [Feature]\n range = [RelevanceScore]\n\n class of_feature(ObjectProperty):\n domain = [RelevanceScore]\n range = [Feature]\n inverse_property = feature_has_rs\n\n class Sensor(Thing):\n pass\n\n class records(ObjectProperty):\n domain = [Sensor]\n range = [Feature]\n\n class recorded_by(ObjectProperty):\n domain = [Feature]\n range = [Sensor]\n\n # Component individuals\n txt15_i1 = Component(name='txt15_i1')\n txt15_i3 = Component(name='txt15_i3')\n txt15_conveyor = Component(name='txt15_conveyor')\n txt15_m1 = Component(name='txt15_m1')\n txt15_pl = Component(name='txt15_pl')\n txt16_i3 = Component(name='txt16_i3')\n txt16_conveyor = Component(name='txt16_conveyor')\n txt16_m3 = Component(name='txt16_m3')\n txt16_turntable = Component(name='txt16_turntable')\n txt17_i1 = Component(name='txt17_i1')\n txt17_pl = Component(name='txt17_pl')\n txt18_pl = Component(name='txt18_pl')\n txt19_i4 = Component(name='txt19_i4')\n\n # Create feature individuals and save location in dataset and the manually defined anomaly threshold\n for index_in_data, feature_name in enumerate(self.dataset.feature_names_all):\n feature = Feature(name=feature_name)\n feature.has_index_in_data = [index_in_data]\n\n # Link components and features to relevance scores\n for component in Component.instances():\n c_name = component.name\n\n high, medium, low, _ = self.config.component_symptom_selection.get(c_name)\n all_symptoms = high + medium + low\n\n # Create relevance score individuals for each defined dependency\n for i, symptom in enumerate(all_symptoms):\n r_score_name = c_name + '_' + symptom + '_score'\n r_score = RelevanceScore(name=r_score_name)\n\n symptom_individual = self.get_individual(Feature, symptom)\n\n # Important: Assigned scores must be 1 char strings and can not be 'e' (assigned to all others)\n if symptom in high:\n r_score.has_score = ['h']\n elif symptom in medium:\n r_score.has_score = ['m']\n elif symptom in low:\n r_score.has_score = ['l']\n\n # Link relevance score individual to corresponding feature and component\n # IMPORTANT: Don't assign inverse properties or this will break for whatever reason\n r_score.of_feature = [symptom_individual]\n r_score.of_component = [component]\n\n if save_to_file:\n self.ontology.save(file=self.config.ontology_file, format=\"rdfxml\")",
"def fOpenSpec(object):\r\n global spec \r\n spec = object\r\n states[0] = (spec.name + \", dim = \" + str(spec.d) +\r\n \", nr. of Wiener processes = \" + str(spec.m)+\".\")\r\n updateObjectStates()",
"def load_ontology(self):\n with open(self.ontology_file_path) as ont_file:\n ontology = json.load(ont_file)\n return ontology",
"def addOntologyToObject(self, obj):\n i = -1\n for item in obj.ontologyItems.items:\n i = i + 1\n ana = vsdModels.ObjectOntology(\n type=vsdModels.OntologyItem(**item).type,\n position=i,\n ontologyItem=vsdModels.APIBase(selfUrl=vsdModels.OntologyItem(**item).selfUrl),\n object=vsdModels.APIBase(selfUrl=obj.selfUrl)\n )\n print(ana.to_struct())\n self.postRequest(\n 'object-ontologies/{0}'.format(\n vsdModels.OntologyItem(**item).type\n ),\n data=ana.to_struct())",
"def test1_loading(self):\n\t\tprint \"\\nTEST 1: Loading ontologies from %s folder.\\n=================\" % DATA_FOLDER\n\t\t\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\t\t\n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\t\n\t\t\t\tself.assertEqual(type(o), ontospy.Ontology)\n\t\t\t\tprint \"Success.\"",
"def test_read_owl3():\n node_property_predicates = {\"http://www.geneontology.org/formats/oboInOwl#inSubset\"}\n predicate_mappings = {\n \"http://www.geneontology.org/formats/oboInOwl#inSubset\": \"subsets\",\n \"http://www.geneontology.org/formats/oboInOwl#hasOBONamespace\": \"namespace\",\n \"http://www.geneontology.org/formats/oboInOwl#hasAlternativeId\": \"xref\",\n }\n\n t = Transformer()\n source = OwlSource(t)\n\n source.set_predicate_mapping(predicate_mappings)\n source.set_node_property_predicates(node_property_predicates)\n g = source.parse(filename=os.path.join(RESOURCE_DIR, \"goslim_generic.owl\"))\n nodes = {}\n edges = {}\n for rec in g:\n if rec:\n if len(rec) == 4:\n key = (rec[0], rec[1])\n if key in edges:\n edges[key].append(rec[-1])\n else:\n edges[key] = [rec[-1]]\n else:\n nodes[rec[0]] = rec[-1]\n\n n1 = nodes[\"GO:0008150\"]\n pprint(n1)\n assert n1[\"name\"] == \"biological_process\"\n assert \"subsets\" in n1 and \"GOP:goslim_generic\" in n1[\"subsets\"]\n assert \"has_exact_synonym\" in n1\n assert \"description\" in n1\n assert \"comment\" in n1\n assert \"xref\" in n1 and \"GO:0044699\" in n1[\"xref\"]\n\n n2 = nodes[\"GO:0003674\"]\n n2[\"name\"] = \"molecular_function\"\n assert \"subsets\" in n2 and \"GOP:goslim_generic\" in n2[\"subsets\"]\n assert \"has_exact_synonym\" in n2\n assert \"description\" in n2\n assert \"comment\" in n2\n assert \"xref\" in n2 and \"GO:0005554\" in n2[\"xref\"]\n\n n3 = nodes[\"GO:0005575\"]\n n3[\"name\"] = \"cellular_component\"\n assert \"subsets\" in n3 and \"GOP:goslim_generic\" in n3[\"subsets\"]\n assert \"has_exact_synonym\" in n3\n assert \"description\" in n3\n assert \"comment\" in n3\n assert \"xref\" in n3 and \"GO:0008372\" in n3[\"xref\"]\n\n e1 = edges[\"GO:0008289\", \"GO:0003674\"][0]\n assert e1[\"subject\"] == \"GO:0008289\"\n assert e1[\"predicate\"] == \"biolink:subclass_of\"\n assert e1[\"object\"] == \"GO:0003674\"\n assert e1[\"relation\"] == \"rdfs:subClassOf\"",
"def openTypeFeatures(self, *args, **features):\n if args and features:\n raise DrawBotError(\"Can't combine positional arguments and keyword arguments\")\n if args:\n if len(args) != 1:\n raise DrawBotError(\"There can only be one positional argument\")\n if args[0] is not None:\n raise DrawBotError(\"First positional argument can only be None\")\n warnings.warn(\"openTypeFeatures(None) is deprecated, use openTypeFeatures(resetFeatures=True) instead.\")\n self._openTypeFeatures.clear()\n else:\n if features.pop(\"resetFeatures\", False):\n self._openTypeFeatures.clear()\n self._openTypeFeatures.update(features)\n return dict(self._openTypeFeatures)",
"def __init__(self, features=None, **kwargs):\n super(FeatureIO, self).__init__(**kwargs)\n self.features = features",
"def add_features(self, obj, annotation):\n if annotation['problem']:\n obj.add(folia.Feature, subset='problem', cls=annotation['problem'])\n if annotation['pos']:\n obj.add(folia.Feature, subset='pos', cls=annotation['pos'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
From a list of 'scene.json' and/or 'scene_.json' paths in s3, return a Scene object for the one with the latest timestamp.
|
def get_latest_scene(s3_scene_jsons):
# Fetch all 'scene*.json' files and load Scenes
scenes = [open_remote_pb_object(scene_json, Scene) for scene_json in s3_scene_jsons]
# Find Scene with latest creation timestamp
creation_ts = [_s.creation_date.ToMicroseconds() for _s in scenes]
index = creation_ts.index(max(creation_ts))
return scenes[index], s3_scene_jsons[index]
|
[
"def get_latest_year_month_day_prefix(s3_path):\n latest = date.min\n keys = get_contents_of_directory(s3_path)\n\n for key in keys:\n search = re.search(r'.*year=(\\d{4}).*month=(\\d{2}).*day=(\\d{2})', key)\n if search:\n year, month, day = search.groups()\n bucket_date = date(int(year), int(month), int(day))\n if bucket_date > latest:\n latest = bucket_date\n\n if latest == date.min:\n return None\n return latest",
"def get_model(lst, datestamp):\n\n for item in lst:\n for k, v in item.items():\n model_params = k.split(\"_\")\n date_stamp = \"_\".join(model_params[0:2])\n if date_stamp == datestamp:\n return v[\"model-object\"]",
"def latest_archive_zip_revision(doi_id, s3_keys, journal, status):\n s3_key_name = None\n\n name_prefix_to_match = (journal + '-' + utils.pad_msid(doi_id)\n + '-' + status + '-v')\n\n highest = 0\n for key in s3_keys:\n if key[\"name\"].startswith(name_prefix_to_match):\n version_and_date = None\n try:\n parts = key[\"name\"].split(name_prefix_to_match)\n version = parts[1].split('-')[0]\n date_formatted = dateutil.parser.parse(key[\"last_modified\"])\n date_part = date_formatted.strftime(utils.S3_DATE_FORMAT)\n version_and_date = int(version + date_part)\n except:\n pass\n if version_and_date and version_and_date > highest:\n s3_key_name = key[\"name\"]\n highest = version_and_date\n\n return s3_key_name",
"def SELECT_LATEST_FILE_JSON(directory=LOCAL_DIRECTORY_OF_SENSOR_DATA):\n latest_time = None\n latest_path = None\n first_loop = True\n for file_name in os.listdir(directory):\n file_path_json = os.path.join(directory, file_name)\n if os.path.isfile(file_path_json):\n current_time = os.stat(file_path_json)\n if not first_loop and int(current_time.st_mtime) > int(latest_time.st_mtime) and \\\n file_path_json[-len('.json'):] == '.json':\n latest_time = os.stat(file_path_json)\n latest_path = file_path_json\n elif first_loop:\n latest_time = os.stat(file_path_json)\n latest_path = file_path_json\n first_loop = False\n return latest_path",
"def load_archives_from_s3(self):\n s3_bucket = S3Backend(self.conf).bucket\n try:\n k = Key(s3_bucket)\n k.key = self.backup_key\n\n return json.loads(k.get_contents_as_string())\n except S3ResponseError, exc:\n log.error(exc)\n return {}",
"def latest_snapshot(snapshots):\n # new list for sorting\n snaps = list()\n\n # update list with data and snapshotId\n for snapshot in snapshots:\n snaps.append({'date': snapshot['StartTime'], 'snap_id': snapshot['SnapshotId']})\n\n # sort new list by date\n sorted_snapshots = sorted(snaps, key=lambda k: k['date'], reverse=True)\n\n # get latest snapshot id\n latest_snap_id = sorted_snapshots[0]['snap_id']\n\n # get latest snapshot date\n latest_snap_date = sorted_snapshots[0]['date']\n\n return(latest_snap_date, latest_snap_id)",
"def get_latest_file_name(bucket_name,prefix):\n s3_client = boto3.client('s3')\n objs = s3_client.list_objects_v2(Bucket=bucket_name)['Contents']\n shortlisted_files = dict() \n for obj in objs:\n key = obj['Key']\n timestamp = obj['LastModified']\n # if key starts with folder name retrieve that key\n if key.startswith(prefix): \n # Adding a new key value pair\n shortlisted_files.update( {key : timestamp} ) \n latest_filename = max(shortlisted_files, key=shortlisted_files.get)\n print('Lastest File Name: ' + latest_filename)\n return latest_filename",
"def load_s3_njson(bucket, prefix, key_list, honorary_list):\n # Get list of files in bucket and with prefix:\n s3_file_list = list_s3_files(bucket, prefix)\n \n # Load data from all files:\n structured_data = []\n for s3_file in s3_file_list:\n structured_data = structured_data + s3_file_to_dict_list(bucket, s3_file, key_list, honorary_list)\n \n return structured_data",
"def get_most_recent_folder_from_s3_bucket():\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(BUCKET_NAME)\n result = bucket.meta.client.list_objects(Bucket=bucket.name, Delimiter='/')\n folders = []\n date_pattern = re.compile(r\"[0-9_]+\")\n for o in result.get('CommonPrefixes'):\n folder_name = o.get('Prefix')\n if re.match(date_pattern, folder_name):\n folders.append(folder_name)\n folders.sort(reverse=True)\n return folders[0]",
"def get_archive(katfilenames):\n\timport requests\n\n\tfile_refs = []\n\tfor filename in katfilenames:\n\t\tif filename.startswith('s3'):\n\t\t\tres = requests.post(S3_URL, headers=S3_HEAD, data='{\"s3_ref\":\"%s\",\"ref_key\":\"Nope\"}'%(filename,))\n\t\t\turl = res.json()['url']\n\t\t\tres1 = requests.get(url)\n\t\t\toutfile = filename.split('/')[-1]\n\t\t\topen(outfile, 'wb').write(res1.content)\n\t\t\tfile_refs.append(outfile)\n\t\telse:\n\t\t\tfile_refs.append(filename)\n\treturn file_refs",
"def get_version_data(version_file_path_s3):\n return s3_util.get_json_to_obj(version_file_path_s3)",
"def get_last_modified_from_first_matching_file(key_list, framework_slug, prefix):\n path_starts_with = '{}/{}'.format(framework_slug, prefix)\n return next((key for key in key_list if key.get('path').startswith(path_starts_with)), {}).get('last_modified')",
"def ingest_latests(last_timestamp, file_list):\n def _iterator(file_name):\n # Is a radar image file\n if re.match(r'cag01est2400\\d{4}-\\d{2}-\\d{2}_\\d{2}:\\d{2}:\\d{2}.png', file_name):\n file_timestamp = datetime.datetime.strptime(\n file_name, 'cag01est2400%Y-%m-%d_%H:%M:%S.png')\n if file_timestamp > last_timestamp:\n return True\n else:\n return False\n else:\n return False\n\n return list(filter(_iterator, file_list))",
"def _find_last_dates_by_prefix(bucket_name):\n # Pattern that matches a manifest key and captures the end date.\n manifest_pattern = re.compile(r'.*\\.(\\d{8})T\\d{6}Z\\.manifest.gpg')\n result = dict()\n for obj in boto3.resource('s3').Bucket(bucket_name).objects.all():\n prefix, basename = os.path.split(obj.key)\n match = manifest_pattern.fullmatch(basename)\n if match:\n # The object appears to be a Duplicity manifest.\n end_date = datetime.strptime(match.group(1), '%Y%m%d').date()\n if end_date > result.get(prefix, date(1970, 1, 1)):\n result[prefix] = end_date\n return result",
"def json_join(path,json_list):\n manifest = Dict({})\n for jj in json_list:\n json_path = osp.join(path,str(jj)+'.json')\n try:\n f = json.load(open(json_path))\n manifest.update({jj: f})\n except:\n logging.warning('no satellite data for source %s in manifest json file %s' % (jj,json_path))\n manifest.update({jj: {}})\n pass\n remove(json_path)\n return manifest",
"def get_gzipped_s3_objects_from_dict(session, event):\n return get_s3_objects_from_dict(\n session, event, default_unzip_s3_object_handler_function\n )",
"def download_json_metadata_from_s3(bucket_name, prefix=\"\", num_threads=20):\n\n # simple method for threads to pull from a queue and download JSON files\n def download_object(queue):\n while True:\n obj = queue.get()\n if obj is None:\n break\n obj.Object().download_file(obj.key.replace(prefix, ''))\n queue.task_done()\n\n # create a directory to store downloaded metadata\n cwd = Path.cwd()\n data_dir = cwd / 'data'\n json_dir = data_dir / 'json'\n # try:\n os.makedirs(json_dir, exist_ok=True)\n # except FileExistsError:\n # shutil.rmtree(json_dir)\n # os.makedirs(json_dir)\n os.chdir(json_dir)\n\n # create a queue for objects that need to be downloaded\n # and spawn threads to download them concurrently\n download_queue = Queue(maxsize=0)\n workers = []\n for worker in range(num_threads):\n worker = Thread(target=download_object, args=(download_queue, ))\n worker.setDaemon(True)\n worker.start()\n workers.append(worker)\n\n # loop through the files in the bucket and filter for JSON metadata\n # files for only labeled images; add them to the queue\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=prefix):\n if obj.key.endswith(\"meta.json\"):\n download_queue.put(obj)\n\n # wait for the queue to be empty, then join all threads\n download_queue.join()\n for _ in range(num_threads):\n download_queue.put(None)\n for worker in workers:\n worker.join()\n\n os.chdir(cwd)",
"def _get_s3_object(self, s3_path):\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n return self.s3_resource.Object(bucket_name, key)",
"def download_latest_tree(chain: str):\n if chain == \"eth\":\n key = \"badger-tree.json\"\n else:\n key = \"badger-tree-{}.json\".format(chain)\n\n target = {\n \"bucket\": get_bucket(env_config.test),\n \"key\": key,\n } # badger-api production\n\n console.print(\"Downloading latest rewards file from s3: \" + target[\"bucket\"])\n s3_clientobj = s3.get_object(Bucket=target[\"bucket\"], Key=target[\"key\"])\n s3_clientdata = s3_clientobj[\"Body\"].read().decode(\"utf-8\")\n return json.loads(s3_clientdata)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
It builds the configuration space with the needed hyperparameters. It is easily possible to implement different types of hyperparameters. Beside floathyperparameters on a log scale, it is also able to handle categorical input parameter.
|
def get_configspace():
cs = CS.ConfigurationSpace()
# Learning rate hyperparameter
lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)
# Stochastic gradient descent momentum as parameter.
sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)
cs.add_hyperparameters([lr, sgd_momentum])
# Optimizer hyperparameters.
#optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD'])
#cs.add_hyperparameters([optimizer])
# Only add the sgd_momentum hyperparameter if the optimizer is stochastic gradient descent. Otherwise, it doesn't make sense.
#cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')
#cs.add_condition(cond)
''' The below is commented out because we're not fiddling with architecture in this optimization.'''
#num_new_fc_layers = CSH.UniformIntegerHyperparameter('num_new_fc_layers', lower=0, upper=3, default_value=0, log=False)
#num_els_new_1 = CSH.UniformIntegerHyperparameter('num_els_new_1', lower=128, upper=4096, default_value = 1000, log=True)
#num_els_new_2 = CSH.UniformIntegerHyperparameter('num_els_new_2', lower=128, upper=4096, default_value = 1000, log=True)
#num_els_new_3 = CSH.UniformIntegerHyperparameter('num_els_new_3', lower=128, upper=4096, default_value = 1000, log=True)
#freeze0_old = CSH.UniformIntegerHyperparameter('freeze0_cat', lower = 0, upper = 1, default_value = 1, log=False)
#freeze1_old = CSH.UniformIntegerHyperparameter('freeze1_cat', lower=0, upper=1, default_value=1, log=False)
#cs.add_hyperparameters([num_new_fc_layers, num_els_new_1, num_els_new_2, num_els_new_3, freeze0_old, freeze1_old, batchsize])
dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)
cs.add_hyperparameters([dropout_rate])
return cs
|
[
"def get_configspace():\r\n cs = CS.ConfigurationSpace()\r\n\r\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\r\n\r\n # For demonstration purposes, we add different optimizers as categorical hyperparameters.\r\n # To show how to use conditional hyperparameters with ConfigSpace, we'll add the optimizers 'Adam' and 'SGD'.\r\n # SGD has a different parameter 'momentum'.\r\n optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD'])\r\n\r\n sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)\r\n\r\n cs.add_hyperparameters([lr, optimizer, sgd_momentum])\r\n\r\n\r\n\r\n num_conv_layers = CSH.UniformIntegerHyperparameter('num_conv_layers', lower=1, upper=3, default_value=2)\r\n\r\n num_filters_1 = CSH.UniformIntegerHyperparameter('num_filters_1', lower=4, upper=64, default_value=16, log=True)\r\n num_filters_2 = CSH.UniformIntegerHyperparameter('num_filters_2', lower=4, upper=64, default_value=16, log=True)\r\n num_filters_3 = CSH.UniformIntegerHyperparameter('num_filters_3', lower=4, upper=64, default_value=16, log=True)\r\n\r\n cs.add_hyperparameters([num_conv_layers, num_filters_1, num_filters_2, num_filters_3])\r\n\r\n\r\n dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)\r\n num_fc_units = CSH.UniformIntegerHyperparameter('num_fc_units', lower=8, upper=256, default_value=32, log=True)\r\n\r\n cs.add_hyperparameters([dropout_rate, num_fc_units])\r\n\r\n\r\n # The hyperparameter sgd_momentum will be used,if the configuration\r\n # contains 'SGD' as optimizer.\r\n cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')\r\n cs.add_condition(cond)\r\n\r\n # You can also use inequality conditions:\r\n cond = CS.GreaterThanCondition(num_filters_2, num_conv_layers, 1)\r\n cs.add_condition(cond)\r\n\r\n cond = CS.GreaterThanCondition(num_filters_3, num_conv_layers, 2)\r\n cs.add_condition(cond)\r\n\r\n return cs",
"def generate_hyperparameters(self):\n params = Namespace(**self.settings)\n hyperparameters = generate_base_hyperparameter_set(params.low_lr,\n params.high_lr,\n params.low_reg,\n params.high_reg)\n number_of_layers = np.random.randint(params.cnn_min_layers,\n params.cnn_max_layers + 1)\n hyperparameters['filters'] = np.random.randint(params.cnn_min_filters,\n params.cnn_max_filters + 1,\n number_of_layers)\n hyperparameters['fc_hidden_nodes'] = np.random.randint(params.cnn_min_fc_nodes,\n params.cnn_max_fc_nodes + 1)\n return hyperparameters",
"def set_default_hyperparameters(self):\n self.hyperparameter_space = {\n 'scale_X': hp.choice('scale_X', ['std', 'mm01', 'mm11', None]),\n 'scale_y': hp.choice('scale_y', ['std', 'mm01', 'mm11', None]),\n }\n\n if self.input_obj.keywords['pes_format'] == 'interatomics':\n self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': True,'morse_alpha': hp.quniform('morse_alpha', 1, 2, 0.1)},{'morse': False}]))\n else:\n self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': False}]))\n if self.pip:\n val = hp.choice('pip',[{'pip': True,'degree_reduction': hp.choice('degree_reduction', [True,False])}])\n self.set_hyperparameter('pip', val)\n else:\n self.set_hyperparameter('pip', hp.choice('pip', [{'pip': False}]))\n\n if self.input_obj.keywords['gp_ard'] == 'opt': # auto relevancy determination (independant length scales for each feature)\n self.set_hyperparameter('ARD', hp.choice('ARD', [True,False]))\n #TODO add optional space inclusions, something like: if option: self.hyperparameter_space['newoption'] = hp.choice(..)",
"def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj",
"def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)",
"def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }",
"def get_param_grid():\n layer_width = [32, 64, 128, 256, 512]\n layers = [2, 3, 4, 5, 6]\n epochs = [10, 25, 50, 75, 100]\n batch_size = [32, 64, 96, 128, 160, 192, 224, 256]\n activation = ['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']\n init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal',\n 'he_uniform']\n dropout_rate = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n optimizer = ['adam', 'sgd', 'adadelta', 'adagrad', 'adamax', 'ftrl', 'nadam', 'rmsprop']\n\n grid = {'layer_width': layer_width,\n 'layers': layers,\n 'epochs': epochs,\n 'batch_size': batch_size,\n 'activation': activation,\n 'init_mode': init_mode,\n 'dropout_rate': dropout_rate,\n 'optimizer': optimizer}\n\n return grid",
"def build_param_grid(self, C_list:list=[0.1, 1, 10, 100], gamma_list:list=[1, 0.1, 0.01, 0.001], kernel_list:list=['rbf']):\n ans = {}\n ans['C'] = C_list\n ans['gamma'] = gamma_list\n ans['kernel'] = kernel_list\n self.param_grid = ans\n return ans",
"def get_hyperparameter_configuration(cat_hparam, num_hparam, layers_hparam, combinations, n, random_state=420):\n np.random.seed(seed=random_state)\n configuration = dict.fromkeys(range(n))\n for ind in range(n):\n configuration[ind] = {'hparams': None}\n configuration[ind]['hparams'] = dict.fromkeys(\n [*cat_hparam.keys(), *num_hparam.keys(), 'list_hidden_layer']\n )\n if len(layers_hparam['num_hidden_layer']) == 3:\n try:\n distribution = eval(\n layers_hparam['num_hidden_layer'][2].replace(\"-\", \"\"))\n num_hidden_layer = int(distribution.rvs(\n layers_hparam['num_hidden_layer'][0], layers_hparam['num_hidden_layer'][1]-layers_hparam['num_hidden_layer'][0]))\n except NameError:\n logging.warning(\n f'WARNING: Distribution {layers_hparam[\"num_hidden_layer\"][2]} not found, generating random number uniformly.')\n num_hidden_layer = randint.rvs(\n layers_hparam['num_hidden_layer'][0], layers_hparam['num_hidden_layer'][1]+1)\n else:\n num_hidden_layer = randint.rvs(\n layers_hparam['num_hidden_layer'][0], layers_hparam['num_hidden_layer'][1]+1)\n\n if len(layers_hparam['num_neuron']) == 3:\n try:\n distribution = eval(\n layers_hparam['num_neuron'][2].replace(\"-\", \"\"))\n configuration[ind]['hparams']['list_hidden_layer'] = distribution.rvs(\n layers_hparam['num_neuron'][0], layers_hparam['num_neuron'][1]-layers_hparam['num_neuron'][0], size=num_hidden_layer).astype(int).tolist()\n except NameError:\n logging.warning(\n f'WARNING: Distribution {layers_hparam[\"num_neuron\"][2]} not found, generating random number uniformly.')\n configuration[ind]['hparams']['list_hidden_layer'] = randint.rvs(\n layers_hparam['num_neuron'][0], layers_hparam['num_neuron'][1]+1, size=num_hidden_layer).tolist()\n else:\n configuration[ind]['hparams']['list_hidden_layer'] = randint.rvs(\n layers_hparam['num_neuron'][0], layers_hparam['num_neuron'][1]+1, size=num_hidden_layer).tolist()\n\n if len(cat_hparam):\n cat_combination_num = random.randint(\n 0, len(combinations)-1)\n for hparam in cat_hparam.keys():\n configuration[ind]['hparams'][hparam] = combinations.loc[cat_combination_num, hparam]\n\n if len(num_hparam):\n for hparam in num_hparam.keys():\n if len(num_hparam[hparam]) == 3:\n try:\n distribution = eval(\n num_hparam[hparam][2].replace(\"-\", \"\"))\n if (type(num_hparam[hparam][0]) == int) and (type(num_hparam[hparam][1]) == int):\n configuration[ind]['hparams'][hparam] = int(distribution.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]-num_hparam[hparam][0]))\n else:\n configuration[ind]['hparams'][hparam] = distribution.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]-num_hparam[hparam][0])\n except NameError:\n logging.warning(\n f'WARNING: Distribution {num_hparam[hparam][2]} not found, generating random number uniformly.')\n if (type(num_hparam[hparam][0]) == int) and (type(num_hparam[hparam][1]) == int):\n configuration[ind]['hparams'][hparam] = randint.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]+1)\n else:\n configuration[ind]['hparams'][hparam] = uniform.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]-num_hparam[hparam][0])\n else:\n if (type(num_hparam[hparam][0]) == int) and (type(num_hparam[hparam][1]) == int):\n configuration[ind]['hparams'][hparam] = randint.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]+1)\n else:\n configuration[ind]['hparams'][hparam] = uniform.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]-num_hparam[hparam][0])\n\n return configuration",
"def make_network_config(params,single_trial=None,custom=False):\n network_config = {}\n network_config['in_features'] = params['nk']\n network_config['Ncells'] = params['Ncells']\n network_config['initW'] = params['initW']\n network_config['optimizer'] = params['optimizer']\n network_config['activation_type'] ='ReLU' # Default is ReLU, choose ReLu, SoftPlus, or None\n if custom == False:\n network_config['shift_in'] = params['shift_in']\n network_config['shift_hidden'] = params['shift_hidden']\n network_config['shift_out'] = params['shift_out']\n network_config['LinMix'] = params['LinMix']\n network_config['pos_features'] = params['pos_features']\n network_config['lr_shift'] = 1e-2\n network_config['lr_w'] = 1e-3\n network_config['lr_b'] = 1e-3\n network_config['lr_m'] = 1e-3\n network_config['single_trial'] = single_trial\n if params['NoL1']:\n network_config['L1_alpha'] = None\n network_config['L1_alpha_m'] = None\n else:\n network_config['L1_alpha'] = .0001\n network_config['L1_alpha_m'] = None\n\n if params['NoL2']:\n network_config['L2_lambda'] = 0\n network_config['L2_lambda_m'] = 0\n initial_params={}\n else:\n if single_trial is not None:\n network_config['L2_lambda'] = 13 #np.logspace(-2, 3, 20)[-5]\n network_config['L2_lambda_m'] = 0 #np.logspace(-2, 3, 20)[-5]\n initial_params={}\n else:\n network_config['L2_lambda'] = tune.grid_search(list(np.logspace(-2, 3,num=20)))\n # network_config['L2_lambda_m'] = tune.loguniform(1e-2, 1e3)\n network_config['L2_lambda_m'] = 0 #tune.loguniform(1e-2, 1e3)\n initial_params = [{'L2_lambda':.01},]\n return network_config, initial_params",
"def build(dynamic_hyperparams_config, is_training):\n if not isinstance(dynamic_hyperparams_config,\n hyperparams_pb2.Hyperparams):\n raise ValueError('dynamic_hyperparams_config not of type '\n 'hyperparams_pb.Hyperparams.')\n\n batch_norm = None\n batch_norm_params = None\n if dynamic_hyperparams_config.HasField('batch_norm'):\n batch_norm = slim.batch_norm\n batch_norm_params = _build_batch_norm_params(\n dynamic_hyperparams_config.batch_norm, is_training)\n\n affected_ops = [dynamic_conv2d]\n with slim.arg_scope(\n affected_ops,\n activation_fn=_build_activation_fn(dynamic_hyperparams_config.activation),\n normalizer_fn=batch_norm,\n normalizer_params=batch_norm_params) as sc:\n return sc",
"def get_hyperparameter_search_space(seed) -> ConfigSpaceWrapper:\n cs = ConfigSpace.ConfigurationSpace('sklearn.ensemble.GradientBoostingClassifier', seed)\n\n # fixed to deviance, as exponential requires two classes\n loss = ConfigSpace.hyperparameters.Constant(name='gradientboostingclassifier__loss', value='deviance')\n # JvR: changed after conversation with AM on 2019-01-17\n learning_rate = ConfigSpace.hyperparameters.UniformFloatHyperparameter(\n name='learning_rate', lower=0.00001, upper=0.1, default_value=0.0001, log=True)\n n_estimators = ConfigSpace.hyperparameters.UniformIntegerHyperparameter(\n name='n_estimators', lower=64, upper=2048, default_value=100, log=True)\n subsample = ConfigSpace.UniformFloatHyperparameter(\n name='subsample', lower=0.0, upper=1.0, default_value=1.0)\n criterion = ConfigSpace.hyperparameters.CategoricalHyperparameter(\n name='criterion', choices=['friedman_mse', 'mse', 'mae'])\n min_samples_split = ConfigSpace.hyperparameters.UniformIntegerHyperparameter(\n name='min_samples_split', lower=2, upper=20, default_value=2)\n min_samples_leaf = ConfigSpace.hyperparameters.UniformIntegerHyperparameter(\n name='min_samples_leaf', lower=1, upper=20, default_value=1)\n # TODO: upper bound?\n min_weight_fraction_leaf = ConfigSpace.hyperparameters.UniformFloatHyperparameter(\n name='min_weight_fraction_leaf', lower=0.0, upper=0.5, default_value=0.0)\n # JvR: changed after conversation with AM on 2019-01-17\n max_depth = ConfigSpace.hyperparameters.UniformIntegerHyperparameter(\n name='max_depth', lower=1, upper=32, default_value=3)\n # TODO: upper bound?\n min_impurity_decrease = ConfigSpace.hyperparameters.UniformFloatHyperparameter(\n name='min_impurity_decrease', lower=0.0, upper=1.0, default_value=0.0)\n max_features = ConfigSpace.hyperparameters.UniformFloatHyperparameter(\n name='max_features', lower=0.0, upper=1.0, default_value=0.0)\n validation_fraction = ConfigSpace.UniformFloatHyperparameter(\n name='validation_fraction', lower=0, upper=1, default_value=0.1)\n n_iter_no_change = ConfigSpace.UniformIntegerHyperparameter(\n name='n_iter_no_change', lower=1, upper=2048, default_value=200)\n tol = ConfigSpace.UniformFloatHyperparameter(\n name='tol', lower=1e-5, upper=1e-1, default_value=1e-4, log=True)\n\n hyperparameters = [\n loss,\n learning_rate,\n n_estimators,\n subsample,\n criterion,\n min_samples_split,\n min_samples_leaf,\n min_weight_fraction_leaf,\n max_depth,\n min_impurity_decrease,\n max_features,\n validation_fraction,\n n_iter_no_change,\n tol,\n ]\n\n return ConfigSpaceWrapper(cs, hyperparameters, None)",
"def _fill_config_hyperparam(self, config):\n if self._searcher_name != \"random\":\n return\n\n if isinstance(self.param_distributions, list):\n return\n\n samples = 1\n all_lists = True\n for key, distribution in self.param_distributions.items():\n if isinstance(distribution, Domain):\n config[key] = distribution\n all_lists = False\n elif isinstance(distribution, list):\n config[key] = tune.choice(distribution)\n samples *= len(distribution)\n else:\n all_lists = False\n\n def get_sample(dist):\n return lambda spec: dist.rvs(1)[0]\n\n config[key] = tune.sample_from(get_sample(distribution))\n if all_lists:\n self.n_trials = min(self.n_trials, samples)",
"def set_hyperparams(self, params):",
"def build_eval_dict(cfg_dict,lens_params,baobab_config=True):\n\n\tif baobab_config:\n\t\t# In this case we only need to build the evaluation for each\n\t\t# lens parameter and the hyperparameter values.\n\t\teval_dict = dict(hyp_len=0, hyp_values=[], hyp_names=[])\n\telse:\n\t\t# Initialize eval_dict with empty lists for hyperparameters and\n\t\t# hyperpriors.\n\t\teval_dict = dict(hyp_len=0, hyp_init=[], hyp_sigma=[], hyp_prior=[],\n\t\t\thyp_names=[])\n\t# For each lens parameter add the required hyperparameters and evaluation\n\t# function.\n\tfor lens_param in lens_params:\n\t\t# Skip lens parameters that are in the covariance matrix\n\t\tif ('cov_info' in cfg_dict.bnn_omega and\n\t\t\tlens_param in cfg_dict.bnn_omega['cov_info']['cov_params_list']):\n\t\t\teval_dict[lens_param] = None\n\t\t\tcontinue\n\t\t# Make a new entry for the parameter\n\t\teval_dict[lens_param] = dict()\n\n\t\t# Load the config entry associated to this parameter\n\t\tlens_split = lens_param.split('_')\n\t\t# This is a bit ugly, but it's the quickest way to map between the\n\t\t# parameter name and the location in the config file.\n\t\tdist = cfg_dict.bnn_omega['_'.join(lens_split[:2])][\n\t\t\t'_'.join(lens_split[2:])]\n\n\t\t# Get the function in question from baobab.\n\t\teval_fn = getattr(distributions,'eval_{}_logpdf_approx'.format(\n\t\t\tdist['dist']))\n\t\teval_sig = signature(eval_fn)\n\t\tfn_name = dist['dist']\n\n\t\t# Hyperparameters is number of parameters-1 because the first parameter\n\t\t# is where to evaluate.\n\t\tn_hyps = len(eval_sig.parameters)-1\n\n\t\t# Initialize the dict of lens_param evaluation function kwargs\n\t\teval_dict[lens_param]['eval_fn_kwargs'] = {}\n\n\t\t# Add the value of the hyperparameters in the config file. Again skip\n\t\t# the first one.\n\t\tfor hyperparam_name in list(eval_sig.parameters.keys())[1:]:\n\t\t\t# Sometimes hyperparameters are not specified\n\t\t\tif hyperparam_name not in dist or dist[hyperparam_name] == {}:\n\t\t\t\tn_hyps -= 1\n\t\t\t# For the baobab config we just populate the hyp values and names\n\t\t\telif baobab_config:\n\t\t\t\teval_dict['hyp_values'].extend([dist[hyperparam_name]])\n\t\t\t\teval_dict['hyp_names'].extend([lens_param+':'+hyperparam_name])\n\t\t\t# For the ovejero distribution config, we want to deal with fixed\n\t\t\t# hyperparameters. We will do so by passing them in as kwargs to\n\t\t\t# our evaluation function. Hyperparameters are fixed if they\n\t\t\t# have a sigma of 0.\n\t\t\telif dist[hyperparam_name]['sigma'] == 0:\n\t\t\t\tn_hyps -= 1\n\t\t\t\teval_dict[lens_param]['eval_fn_kwargs'][hyperparam_name] = (\n\t\t\t\t\tdist[hyperparam_name]['init'])\n\t\t\t# For non fixed hyperparameters we need to get their initial value\n\t\t\t# the spread (sigma) we want to use, and the functional form of the\n\t\t\t# prior.\n\t\t\telse:\n\t\t\t\teval_dict['hyp_init'].extend([dist[hyperparam_name]['init']])\n\t\t\t\teval_dict['hyp_sigma'].extend([dist[hyperparam_name]['sigma']])\n\t\t\t\teval_dict['hyp_names'].extend([lens_param+':'+hyperparam_name])\n\t\t\t\teval_dict['hyp_prior'].extend([dist[hyperparam_name]['prior']])\n\n\t\t# Record the indices we saved the hyperparameter values for this\n\t\t# lens parameter to.\n\t\teval_dict[lens_param]['hyp_ind'] = np.arange(eval_dict['hyp_len'],\n\t\t\teval_dict['hyp_len']+n_hyps)\n\t\teval_dict['hyp_len'] += n_hyps\n\n\t\t# Finally, actually include the evaluation function.\n\t\teval_dict[lens_param]['eval_fn'] = eval_fn\n\t\teval_dict[lens_param]['fn_name'] = fn_name\n\n\t# Add covariance matrix information if that's relevant\n\tif 'cov_info' in cfg_dict.bnn_omega:\n\t\tcov_dict = cfg_dict.bnn_omega['cov_info']\n\t\tif baobab_config:\n\t\t\tcov_dim = len(cov_dict['cov_omega']['mu'])\n\t\telse:\n\t\t\tcov_dim = len(cov_dict['cov_omega']['mu']['init'])\n\t\t# Add the indices for the mu and tril parameters seperately\n\t\teval_dict['cov_params_list'] = cov_dict['cov_params_list']\n\t\teval_dict['cov_params_is_log'] = cov_dict['cov_omega']['is_log']\n\t\tn_hyps = cov_dim\n\t\teval_dict['cov_mu_hyp_ind'] = np.arange(eval_dict['hyp_len'],\n\t\t\teval_dict['hyp_len']+n_hyps)\n\t\teval_dict['hyp_len'] += n_hyps\n\t\tn_hyps = int(cov_dim*(cov_dim+1)/2)\n\t\teval_dict['cov_tril_hyp_ind'] = np.arange(eval_dict['hyp_len'],\n\t\t\teval_dict['hyp_len']+n_hyps)\n\t\teval_dict['hyp_len'] += n_hyps\n\t\t# Now extract the desired dictionary values depending on whether or not\n\t\t# we have passed in a baobab_config.\n\t\tif baobab_config:\n\t\t\teval_dict['hyp_values'].extend(cov_dict['cov_omega']['mu'])\n\t\t\t# The baobab config specifies the covariance matrix, but our\n\t\t\t# hyperparameters are the lower triangular matrix. Here we\n\t\t\t# correct for that.\n\t\t\tcov = np.array(cov_dict['cov_omega']['cov_mat'])\n\t\t\ttril = np.linalg.cholesky(cov).astype(np.float)\n\t\t\ttril_mask = np.tri(cov_dim,dtype=bool, k=0)\n\t\t\teval_dict['hyp_values'].extend(tril[tril_mask])\n\t\t\tfor i in range(cov_dim):\n\t\t\t\teval_dict['hyp_names'].extend(['cov_mu_%d'%(i)])\n\t\t\tfor i in range(int(cov_dim*(cov_dim+1)/2)):\n\t\t\t\teval_dict['hyp_names'].extend(['cov_tril_%d'%(i)])\n\t\telse:\n\t\t\teval_dict['hyp_init'].extend(cov_dict['cov_omega']['mu']['init'])\n\t\t\teval_dict['hyp_init'].extend(cov_dict['cov_omega']['tril']['init'])\n\t\t\teval_dict['hyp_sigma'].extend(cov_dict['cov_omega']['mu']['sigma'])\n\t\t\teval_dict['hyp_sigma'].extend(cov_dict['cov_omega']['tril']['sigma'])\n\t\t\tfor i in range(cov_dim):\n\t\t\t\teval_dict['hyp_names'].extend(['cov_mu_%d'%(i)])\n\t\t\tfor i in range(int(cov_dim*(cov_dim+1)/2)):\n\t\t\t\teval_dict['hyp_names'].extend(['cov_tril_%d'%(i)])\n\t\t\teval_dict['hyp_prior'].extend(cov_dict['cov_omega']['mu']['prior'])\n\t\t\teval_dict['hyp_prior'].extend(cov_dict['cov_omega']['tril']['prior'])\n\n\t# Transform list of values into np array\n\tif baobab_config:\n\t\teval_dict['hyp_values'] = np.array(eval_dict['hyp_values'])\n\telse:\n\t\teval_dict['hyp_init'] = np.array(eval_dict['hyp_init'])\n\t\teval_dict['hyp_sigma'] = np.array(eval_dict['hyp_sigma'])\n\n\treturn eval_dict",
"def make_params(config):\n params = copy.deepcopy(config.view.params)\n params.t2bins = np.arange(0, params.t2bin_max + 1e-4, params.t2bin_stepsize)\n params.out = make_Bunch(\"State and output of detection processing\") # outputs are not parameters, maybe separate \n return params",
"def set_hyperparams(self):\n # Instantiate hyper parameters for MSN algorithm\n self.alpha = self.hyper_params[\"alpha\"]\n self.beta = self.hyper_params[\"beta\"]\n self.def_integrity = self.hyper_params[\"default integrity\"]\n self.initial_integrity = self.hyper_params[\"initial integrity\"]\n self.min_integrity = self.hyper_params[\"minimum integrity\"]\n self.max_integrity = self.hyper_params[\"maximum integrity\"]\n self.minimizing = self.hyper_params[\"minimization mode\"]\n self.target = self.hyper_params[\"target\"]\n self.tolerance = self.hyper_params[\"tolerance\"]\n self.min_entropy = self.hyper_params[\"minimum entropy\"]\n self.max_steps = self.hyper_params[\"max steps\"]\n self.mem_size = self.hyper_params[\"memory size\"]\n self.set_initial_score()",
"def compute_configuration_space(self):\n dimension_name = []\n dimension = []\n if not self.ignore_preprocessing:\n dimension_name.append('DataPreprocessing')\n preprocess = self.primitives.hierarchies[Category.PREPROCESSING].get_primitives_as_list()\n dimension.append(preprocess)\n\n dimension_name.append('FeatureExtraction')\n feature = self.primitives.hierarchies[Category.FEATURE].get_primitives_as_list()\n dimension.append(feature)\n\n learner = None\n if self.task_type == TaskType.CLASSIFICATION:\n learner = self.primitives.hierarchies[Category.CLASSIFICATION].get_primitives_as_list()\n elif self.task_type == TaskType.REGRESSION:\n learner = self.primitives.hierarchies[Category.REGRESSION].get_primitives_as_list()\n elif self.task_type == TaskType.GRAPH_MATCHING:\n learner = self.primitives.hierarchies[Category.GRAPH].get_primitives_as_list()\n elif self.task_type == TaskType.TIME_SERIES_FORECASTING:\n # FIXME: assume time series forecasting is regression\n learner = self.primitives.hierarchies[Category.REGRESSION].get_primitives_as_list()\n # FIXME: Change task_type to regression\n self.task_type = TaskType.REGRESSION\n else:\n print('L1 Planner: task type \"{}\" not implemented'.format(self.task_type))\n\n if learner is not None:\n dimension_name.append(self.task_type.value)\n dimension.append(learner)\n\n dimension_name.append('Metrics')\n evaluator = self.primitives.hierarchies[Category.METRICS].get_primitives_as_list()\n dimension.append(evaluator)\n\n return ConfigurationSpace(dimension_name, dimension)",
"def construct_hyper_grids(X, n_grid=20):\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method computes lookup tables of the cumulative ``galprop`` PDF defined by ``input_galaxy_table``.
|
def build_one_point_lookup_table(self, **kwargs):
galaxy_table = kwargs['input_galaxy_table']
prim_galprop_bins = kwargs['prim_galprop_bins']
self.one_point_lookup_table = np.zeros(
len(prim_galprop_bins)+1, dtype=object)
binned_prim_galprop = np.digitize(
galaxy_table[self.prim_galprop_key],
self.prim_galprop_bins)
for i in range(len(self.one_point_lookup_table)):
idx_bini = np.where(binned_prim_galprop == i)[0]
if model_helpers.custom_len(idx_bini) > self.minimum_sampling:
gals_bini = galaxy_table[idx_bini]
abcissa = np.arange(len(gals_bini))/float(len(gals_bini)-1)
ordinates = np.sort(gals_bini[self.galprop_key])
self.one_point_lookup_table[i] = (
model_helpers.custom_spline(abcissa, ordinates, k=2)
)
# For all empty lookup tables, fill them with the nearest lookup table
unfilled_lookup_table_idx = np.where(
self.one_point_lookup_table == 0)[0]
filled_lookup_table_idx = np.where(
self.one_point_lookup_table != 0)[0]
if len(unfilled_lookup_table_idx) > 0:
msg = ("When building the one-point lookup table from input_galaxy_table, " +
"there were some bins of prim_galprop_bins that contained fewer than " +
str(self.minimum_sampling)+ " galaxies. In such cases, the lookup table " +
"of the nearest sufficiently populated bin will be chosen.")
warn(msg)
for idx in unfilled_lookup_table_idx:
closest_filled_idx_idx = array_utils.find_idx_nearest_val(
filled_lookup_table_idx, idx)
closest_filled_idx = filled_lookup_table_idx[closest_filled_idx_idx]
self.one_point_lookup_table[idx] = (
self.one_point_lookup_table[closest_filled_idx])
|
[
"def _process_distribute_lookuptable(self, param_grads):\n from paddle.distributed.distribute_lookup_table import (\n find_distributed_lookup_table,\n )\n\n program = framework.default_main_program()\n global_block = framework.default_main_program().global_block()\n table_name = find_distributed_lookup_table(program)\n table_param = None\n table_grad = None\n new_param_grads = []\n for p, g in param_grads:\n if p.name == table_name:\n if table_param is not None:\n raise RuntimeError(\n \"multi dist table var found, only support one now!\"\n )\n table_param = p\n table_grad = g\n else:\n new_param_grads.append((p, g))\n sgd_op = None\n if table_param is not None:\n param_and_grad = [table_param, table_grad]\n with table_param.block.program._optimized_guard(\n param_and_grad\n ), framework.name_scope(\"optimizer\"):\n self._create_global_learning_rate()\n # create the optimize op\n sgd_op = global_block.append_op(\n type='sgd',\n inputs={\n \"Param\": table_param,\n \"Grad\": table_grad,\n \"LearningRate\": self._create_param_lr(param_and_grad),\n },\n outputs={\"ParamOut\": param_and_grad[0]},\n )\n return new_param_grads, (table_param, table_grad), sgd_op",
"def _build_alias_table(self):\n candidates_list = self._get_candidates_list()\n self.prob = dict(Counter(candidates_list))\n self.alias = self.prob.copy()\n large_q = []\n small_q = []\n for i in self.prob:\n self.alias[i] = -1\n self.prob[i] = self.prob[i] / len(candidates_list)\n self.prob[i] = pow(self.prob[i], self.alpha)\n normalize_count = sum(self.prob.values())\n for i in self.prob:\n self.prob[i] = self.prob[i] / normalize_count * len(self.prob)\n if self.prob[i] > 1:\n large_q.append(i)\n elif self.prob[i] < 1:\n small_q.append(i)\n while len(large_q) != 0 and len(small_q) != 0:\n l = large_q.pop(0)\n s = small_q.pop(0)\n self.alias[s] = l\n self.prob[l] = self.prob[l] - (1 - self.prob[s])\n if self.prob[l] < 1:\n small_q.append(l)\n elif self.prob[l] > 1:\n large_q.append(l)",
"def training_galaxy_props(psf,\n in_dir = '.',\n in_filename = 'real_galaxy_catalog_23.5_fits.fits',\n out_dir = '.',\n out_filename = 'real_galaxy_catalog_23.5_props.fits',\n pix_scale = 0.03,\n size_factor = 0.6,\n ps_size = 48,\n n_use = None):\n\n # Define the effective PSF including the pixel convolution. Draw it into an image.\n pix = galsim.Pixel(pix_scale)\n epsf = galsim.Convolve(psf, pix)\n im_epsf = epsf.draw(dx=pix_scale)\n\n # Read in galaxy catalog.\n infile = os.path.join(in_dir, in_filename)\n dat = pyfits.getdata(infile)\n n = len(dat)\n print \"Read in \",n,\" from \",infile\n\n # Select the requested subsample of galaxies.\n if n_use is not None:\n dat = dat[0:n_use]\n print \"Using \",n_use\n n = n_use\n\n # Create output arrays for the following quantities: bulge-to-total flux ratio B/T (bt),\n # flux_frac (fraction of the flux included in a GREAT3 postage stamp), resolution factor based\n # on adaptive moments, noise_var_snr_20 (noise variance to make the object have a S/N of 20) and\n # use_bulgefit (should we use 2-component fit, or 1-component Sersic fit?).\n bt = np.zeros(n)\n do_meas = np.zeros(n)\n flux_frac = np.zeros(n)\n resolution = np.zeros(n)\n noise_var_snr_20 = np.zeros(n)\n use_bulgefit = np.ones(n)\n # Loop over objects.\n for i in range(n):\n if i % 1000 == 0:\n print \"...\",i\n params = dat[i].field('bulgefit')\n sparams = dat[i].field('sersicfit')\n bstat = dat[i].field('fit_status')[0]\n sstat = dat[i].field('fit_status')[4]\n dvc_btt = dat[i].field('fit_dvc_btt')\n bmad = dat[i].field('fit_mad_b')\n smad = dat[i].field('fit_mad_s')\n\n # Select which galaxies require use of a single-Sersic fit. See `shape_2comp.py` for more\n # details of how this selection is done (the same code appears there).\n if bstat<1 or bstat>4 or dvc_btt<0.1 or dvc_btt>0.9 or np.isnan(dvc_btt) or params[9]<=0 or params[1]<=0 or params[11]<0.051 or params[3]<0.051 or smad<bmad:\n use_bulgefit[i] = 0\n # Then check if sersicfit is viable; if not, this object is a total failure.\n if sstat<1 or sstat>4 or sparams[1]<=0 or sparams[0]<=0:\n use_bulgefit[i] = -1\n do_meas[i] = -1\n resolution[i] = -10.\n noise_var_snr_20[i] = -10.\n flux_frac[i] = -10.\n bt[i] = -10.\n continue\n\n # If we're using 2-component fits, we have to reconstruct that galaxy model.\n if use_bulgefit[i]:\n # Extract and calculate the necessary parameters.\n bulge_q = params[11]\n bulge_beta = params[15]*galsim.radians\n bulge_hlr = 0.03*size_factor*np.sqrt(bulge_q)*params[9]\n bulge_flux = 2.0*np.pi*3.607*(bulge_hlr**2)*params[8]\n\n disk_q = params[3]\n disk_beta = params[7]*galsim.radians\n disk_hlr = 0.03*size_factor*np.sqrt(disk_q)*params[1]\n disk_flux = 2.0*np.pi*1.901*(disk_hlr**2)*params[0]\n # Note: the flux calculations here and below in the Sersic flux calculation are wrong:\n # due to confusion over surface brightness vs. flux, they are too low by pixel\n # area=0.03^2. Also, the size rescaling factor that is included in hlr and therefore\n # flux, 0.6^2, should not be included in the flux calculation - i.e., we aren't\n # preserving surface brightness, we're just making them smaller. The relevance of these\n # errors is that the noise variance we calculate for SNR=20 is too low by (0.03^2 *\n # 0.6^2)^2=1.05e-7. We correct for this error in some later scripts.\n \n bfrac = bulge_flux/(bulge_flux+disk_flux)\n\n # Exclude ones for which the bulge flux fraction is nonsense.\n if bfrac < 0 or bfrac > 1 or np.isnan(bfrac):\n use_bulgefit[i] = -1\n do_meas[i] = -1\n resolution[i] = -10.\n noise_var_snr_20[i] = -10.\n flux_frac[i] = -10.\n bt[i] = -10.\n continue\n\n bt[i] = bfrac\n # Make the GalSim objects.\n bulge = galsim.Sersic(4.0, half_light_radius = bulge_hlr, flux = bulge_flux)\n disk = galsim.Exponential(half_light_radius = disk_hlr, flux = disk_flux)\n if bulge_q > 0.:\n bulge.applyShear(q = bulge_q, beta = bulge_beta)\n if disk_q > 0.:\n disk.applyShear(q = disk_q, beta = disk_beta)\n gal = bulge+disk\n gal_flux = bulge_flux + disk_flux\n\n else:\n # For a single Sersic fit, extract the necessary parameters and make the galaxy object.\n gal_n = sparams[2]\n if gal_n < 0.3: gal_n = 0.3\n tmp_ser = galsim.Sersic(gal_n, half_light_radius=1.)\n gal_bn = (1./tmp_ser.getScaleRadius())**(1./gal_n)\n prefactor = gal_n * math.gamma(2.*gal_n) * math.exp(gal_bn) / (gal_bn**(2.*gal_n))\n gal_q = sparams[3]\n gal_beta = sparams[7]*galsim.radians\n gal_hlr = 0.03*size_factor*np.sqrt(gal_q)*sparams[1]\n gal_flux = 2.*np.pi*prefactor*(gal_hlr**2)*params[0]\n\n gal = galsim.Sersic(gal_n, half_light_radius = gal_hlr, flux = gal_flux)\n if gal_q > 0.:\n gal.applyShear(q = gal_q, beta = gal_beta)\n\n # Carry out the convolution with the ePSF (which already includes the pixel response).\n obj = galsim.Convolve(gal, epsf, gsparams = galsim.GSParams(maximum_fft_size=15000))\n im = galsim.ImageF(ps_size, ps_size)\n # Try drawing it; store failure values if an exception is thrown by GalSim when drawing.\n try:\n im = obj.draw(dx = pix_scale)\n except:\n do_meas[i] = -0.5 # fit parameters make object impossible to draw\n resolution[i] = -10.\n noise_var_snr_20[i] = -10.\n flux_frac[i] = -10.\n # Store two of the quantities that we want: the fraction of the galaxy flux contained in the\n # postage stamp, and the noise variance to add in order to get SNR=20 according to an\n # optimal flux-weighted estimator.\n flux_frac[i] = im.array.sum()/gal_flux\n noise_var_snr_20[i] = np.sum(im.array**2) / 20.**2\n\n # Try getting a resolution factor for this object. Store failure values if we cannot do it.\n try:\n result = galsim.hsm.EstimateShear(im, im_epsf, guess_sig_gal=20)\n resolution[i] = result.resolution_factor\n do_meas[i] = 1. # made model and was able to measure shape\n except RuntimeError:\n resolution[i] = -10.\n do_meas[i] = 0. # made model and did other calculations, but could not measure shape\n\n # Save results to file\n tbhdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='IDENT',\n format='J',\n array=dat.field('IDENT')),\n pyfits.Column(name='bulge_tot',\n format='D',\n array=bt),\n pyfits.Column(name='flux_frac',\n format='D',\n array=flux_frac),\n pyfits.Column(name='resolution',\n format='D',\n array=resolution),\n pyfits.Column(name='noise_var_snr_20',\n format='D',\n array=noise_var_snr_20),\n pyfits.Column(name='do_meas',\n format='D',\n array=do_meas),\n pyfits.Column(name='use_bulgefit',\n format='D',\n array=use_bulgefit)]\n ))\n \n # Note some statistics of the sample.\n fail_ind = np.where(do_meas < -0.5)[0]\n print len(fail_ind),' failures'\n bulgefit_ind = np.where(use_bulgefit == 1)[0]\n print len(bulgefit_ind),' use 2-component fits'\n\n # Write outputs.\n outfile = os.path.join(out_dir, out_filename)\n print \"Writing to file \",outfile\n tbhdu.writeto(outfile, clobber=True)",
"def compute_lookuptable(self):\n\n if self.uselookuptable:\n # Evaluation lookup tables \n self.action_isok = np.zeros( ( self.nodes_n , self.actions_n ) , dtype = bool )\n self.x_next = np.zeros( ( self.nodes_n , self.actions_n , self.DS.n ) , dtype = float ) # lookup table for dynamic\n \n # For all state nodes \n for node in range( self.nodes_n ): \n \n x = self.nodes_state[ node , : ]\n \n # For all control actions\n for action in range( self.actions_n ):\n \n u = self.actions_input[ action , : ]\n \n # Compute next state for all inputs\n x_next = self.DS.fc( x , u ) * self.dt + x\n \n # validity of the options\n x_ok = self.DS.isavalidstate(x_next)\n u_ok = self.DS.isavalidinput(x,u)\n \n self.x_next[ node, action, : ] = x_next\n self.action_isok[ node, action] = ( u_ok & x_ok )",
"def tabulate_pdf(self):\n\n from mitsuba.core import Float, Vector2f, ScalarVector2f\n\n extents = self.bounds.extents()\n endpoint = self.bounds.max - extents / ScalarVector2f(self.res)\n\n # Compute a set of nodes where the PDF should be evaluated\n x, y = ek.meshgrid(\n ek.linspace(Float, self.bounds.min.x, endpoint.x, self.res.x),\n ek.linspace(Float, self.bounds.min.y, endpoint.y, self.res.y)\n )\n\n endpoint = extents / ScalarVector2f(self.res)\n eps = 1e-4\n nx = ek.linspace(Float, eps, endpoint.x * (1 - eps), self.ires)\n ny = ek.linspace(Float, eps, endpoint.y * (1 - eps), self.ires)\n wx = [1 / (self.ires - 1)] * self.ires\n wy = [1 / (self.ires - 1)] * self.ires\n wx[0] = wx[-1] = wx[0] * .5\n wy[0] = wy[-1] = wy[0] * .5\n\n integral = 0\n\n self.histogram_start = time.time()\n for yi, dy in enumerate(ny):\n for xi, dx in enumerate(nx):\n xy = self.domain.map_forward(Vector2f(x + dx, y + dy))\n pdf = self.pdf_func(xy)\n integral = ek.fmadd(pdf, wx[xi] * wy[yi], integral)\n self.histogram_end = time.time()\n\n self.pdf = integral * (ek.hprod(extents / ScalarVector2f(self.res))\n * self.sample_count)\n\n # A few sanity checks\n pdf_min = ek.hmin(self.pdf) / self.sample_count\n if not pdf_min >= 0:\n self._log('Failure: Encountered a cell with a '\n 'negative PDF value: %f' % pdf_min)\n self.fail = True\n\n self.pdf_sum = ek.hsum(self.pdf) / self.sample_count\n if self.pdf_sum > 1.1:\n self._log('Failure: PDF integrates to a value greater '\n 'than 1.0: %f' % self.pdf_sum)\n self.fail = True",
"def create_exgauss_lookup_table(self):\n return self.exgauss_cdf_nparray(range(self.xmin,self.xmax, self.dx)).tolist(), range(self.xmin,self.xmax, self.dx)",
"def pc_project(\n mt: hl.MatrixTable,\n loadings_ht: hl.Table,\n loading_location: str = \"loadings\",\n af_location: str = \"pca_af\",\n) -> hl.Table:\n mt = pc_hwe_gt(mt, loadings_ht, loading_location, af_location)\n mt = mt.annotate_cols(scores=hl.agg.array_sum(mt.pca_loadings * mt.GTN))\n return mt.cols().select(\"scores\")",
"def create_smarter_lookup_table(self, y=0.95):\n # First determine an approximate starting point for the lookup taqble by halving the max value till the point \n # where the cdf value is less than the cdf value we are looking for\n xold = self.xmax\n xnew = self.xmax\n y_calc = self.exgauss_cdf(xnew)\n while y_calc > y:\n xold = xnew\n xnew = xnew/2.\n y_calc = self.exgauss_cdf(xnew)\n \n # Make sure the interval over which this is being constructed is okay\n npts = 10. # Number of data pts in case the interval xold-xnew is smaller than self.dx\n if xold-xnew < self.dx:\n dx = int((xold-xnew)/npts)\n else: \n dx = self.dx\n # Now start building the lookup table from the value of x\n return self.exgauss_cdf_nparray(range(int(xnew),int(xold), dx)).tolist(), range(int(xnew),int(xold), dx)",
"def gini_coefficient(tables, guests):\n length = len(tables)\n shared_table = dict()\n for table in tables:\n shared_table[table] = tables[table] / guests\n denom = 0\n for table in shared_table:\n denom += shared_table[table]\n denom = denom / length\n denom = 2 * denom\n temp = 0\n for i in shared_table:\n for j in shared_table:\n temp += abs(shared_table[i] - shared_table[j])\n temp = temp / math.pow(length, 2)\n gini_coe = temp / denom\n return gini_coe",
"def populate_amplicon_dgs(amp, dg_calc):\n for cs in amp.cached_sequences:\n cs.folding_dg = dg_calc.delta_g(cs.positive_amplicon)",
"def get_protein_degeneracy_table(self):\n\n # for each df, create new dfs for each row of next df,\n # then concat, repeat\n dfs = [pd.DataFrame({'Degeneracy': list(_.keys()),\n 'Proteins': list(map(decimal.Decimal,\n _.values()))},\n dtype='object')\n for _ in self.amino_acid_degeneracy_profile]\n\n if not dfs:\n self.protein_degeneracy_table = pd.DataFrame(\n columns=['Degeneracy', 'Proteins'])\n return self.protein_degeneracy_table\n\n df = dfs[0]\n for next_df in dfs[1:]:\n df = pd.concat([df * next_df.iloc[i] for i in range(len(next_df))])\n df = df.groupby(['Degeneracy'], sort=False).sum().reset_index()\n\n self.protein_degeneracy_table = df.sort_values(by=['Degeneracy']) \\\n .reset_index(drop=True)\n return self.protein_degeneracy_table",
"def update_PDG_table(input_file, pdg_table, mass_spectrum=1):\n # Check that we had the right output file type\n # Get the masses that we need\n masses = get_gluino_Rhadron_masses(input_file,mass_spectrum)\n # Get the output file ready\n # Open for appending (assume that's what was done if given a file handle)\n if isinstance(pdg_table, str):\n out_file = open(pdg_table,'a')\n else:\n out_file = pdg_table\n # Add all our R-hadrons to the table!\n for pid in masses:\n # For the PDG table, we only write positive-signed PDG ID particles\n if pid<0: continue\n # Note that we follow the Pythia6 convention of *including* fundamental SUSY particles\n # The format is VERY specific; needs mass and width (we always set the width to 0)\n # Mass is in MeV here, rather than GeV as in the dictionary\n out_file.write('\\nM %i %11.7E +0.0E+00 -0.0E+00 %s %s'%(pid,masses[pid]*1000.,offset_options[pid][2],charge(offset_options[pid][3])))\n out_file.write('\\nW %i %11.7E +0.0E+00 -0.0E+00 %s %s'%(pid,0.E+00,offset_options[pid][2],charge(offset_options[pid][3])))\n\n # Done writing all the lines! Clean up if necessary\n if isinstance(pdg_table, str):\n out_file.close()\n\n # Nothing to return",
"def createPalmerInterpolationFunctions(impure=True):\n #Construct linear interpolation functions for Table from Palmer (1991)\n T_arr = np.array([5.,15.,25.])\n logPCO2_arr = np.log10(np.array([1.0,0.3,0.03,0.003]))\n T_grid, CO2_grid = np.meshgrid(T_arr,logPCO2_arr)\n if impure:\n k1_grid = np.array([[0.07,0.09,0.12],\n [0.03,0.035,0.04],\n [0.009,0.015,0.02],\n [0.006,0.01,0.015]])\n else:\n k1_grid = np.array([[0.11,0.14,0.18],\n [0.044,0.055,0.065],\n [0.014,0.018,0.028],\n [0.01,0.015,0.02]])\n C_Cs_T_grid = np.array([[0.8,0.85,0.9],\n [0.65,0.7,0.8],\n [0.6,0.7,0.8],\n [0.6,0.7,0.8]])\n n_arr = np.array([1.5,1.6,1.7,2.2])\n\n palmer_k1 = LinearNDInterpolator((T_grid.ravel(), CO2_grid.ravel()), k1_grid.ravel())\n palmer_C_Cs_T = LinearNDInterpolator((T_grid.ravel(), CO2_grid.ravel()), C_Cs_T_grid.ravel())\n palmer_n = interp1d(logPCO2_arr, n_arr)\n return (palmer_k1, palmer_C_Cs_T, palmer_n)",
"def pair_hg(gene_map, in_cls_count, pop_count, in_cls_product, total_product,\n upper_tri_indices):\n\n # maps the scipy hypergeom test over a numpy array\n #vhg = np.vectorize(ss.hypergeom.sf, excluded=[1,3,4], otypes=[np.float])\n\n #This gives us the matrix with one subtracted everywhere(zero\n #lowest since cant have neg counts). With SF, this should give\n #us prob that we get X cells or more\n #altered_in_cls_product = np.copy(in_cls_product)\n #for value in np.nditer(altered_in_cls_product,op_flags=['readwrite']):\n # if value == 0:\n # pass\n # else:\n # value[...] = value - 1\n #print('here')\n #print(in_cls_product)\n #print(altered_in_cls_product)\n #print(upper_tri_indices)\n #altered_in_cls_product = altered_in_cls_product.transpose()\n\n # Only apply to upper triangular\n #hg_result = vhg(\n # in_cls_product[upper_tri_indices] ,\n # pop_count,\n #in_cls_count,\n # total_product[upper_tri_indices],\n # in_cls_count,\n # loc=1\n #)\n #output = pd.DataFrame({\n # 'gene_1': gene_map[upper_tri_indices[0]],\n # 'gene_2': gene_map[upper_tri_indices[1]],\n # 'HG_stat': hg_result\n #}, columns=['gene_1', 'gene_2', 'HG_stat'])\n\n #print('here')\n #print(gene_map)\n #return output\n\n\n #OLD CODE\n vhg = np.vectorize(ss.hypergeom.sf, excluded=[1, 2, 4], otypes=[np.float])\n\n # Only apply to upper triangular\n hg_result = vhg(\n in_cls_product[upper_tri_indices],\n pop_count,\n in_cls_count,\n total_product[upper_tri_indices],\n loc=1\n )\n #print(in_cls_product)\n #print(in_cls_product[upper_tri_indices])\n #print(total_product)\n #print(total_product[upper_tri_indices])\n #print(hg_result)\n #print(gene_map)\n output = pd.DataFrame({\n 'gene_1': gene_map[upper_tri_indices[0]],\n 'gene_2': gene_map[upper_tri_indices[1]],\n 'HG_stat': hg_result\n }, columns=['gene_1', 'gene_2', 'HG_stat'])\n return output",
"def _engprop(l): # {{{1\n print(\" \\\\begin{tabular}[t]{rcrrl}\")\n print(\" \\\\multicolumn{4}{c}{\\\\small\"\n \"\\\\textbf{Laminate stacking}}\\\\\\\\[0.1em]\")\n print(\" \\\\toprule %% \\\\usepackage{booktabs}\")\n print(\" Layer & Weight & Angle & vf & Fiber type\\\\\\\\\")\n print(\" & [g/m$^2$] & [$\\\\circ$] & [\\\\%]\\\\\\\\\")\n print(\" \\\\midrule\")\n for ln, la in enumerate(l.layers, start=1):\n s = \" {} & {:4.0f} & {:5.0f} & {:.3g} & {}\\\\\\\\\"\n texfname = la.fiber.name.replace('_', '\\_')\n print(s.format(ln, la.fiber_weight, la.angle, la.vf*100, texfname))\n print(\" \\\\bottomrule\")\n print(\" \\\\end{tabular}\\\\hspace{0.02\\\\textwidth}\")\n print(\" \\\\begin{tabular}[t]{rrl}\")\n print(\" \\\\multicolumn{3}{c}{\\\\small\\\\textbf{Engineering\"\n \" properties}}\\\\\\\\[0.1em]\")\n print(\" \\\\toprule\")\n print(\" Property & Value & Dimension\\\\\\\\\")\n print(\" \\\\midrule\")\n print(\" $\\\\mathrm{{v_f}}$ & {:.3g} &\\\\%\\\\\\\\\".format(l.vf*100))\n print(\" $\\\\mathrm{{w_f}}$ & {:.3g} &\\\\%\\\\\\\\\".format(l.wf*100))\n print(\" thickness & {:.3g} & mm\\\\\\\\\".format(l.thickness))\n print(\" density & {:.3g} & g/cm$^3$\\\\\\\\\".format(l.ρ))\n s = \" weight & {:.0f} & g/m$^2$\\\\\\\\\"\n print(s.format(l.fiber_weight+l.resin_weight))\n print(\" resin & {:.0f} & g/m$^2$\\\\\\\\\".format(l.resin_weight))\n print(\" \\\\midrule\")\n print(\" $\\\\mathrm{{E_x}}$ & {:8.0f} & MPa\\\\\\\\\".format(l.Ex))\n print(\" $\\\\mathrm{{E_y}}$ & {:8.0f} & MPa\\\\\\\\\".format(l.Ey))\n print(\" $\\\\mathrm{{G_{{xy}}}}$ & {:8.0f} & MPa\\\\\\\\\".format(l.Gxy))\n print(\" $\\\\mathrm{{\\\\nu_{{xy}}}}$ & {:g} &-\\\\\\\\\".format(l.νxy))\n print(\" $\\\\mathrm{{\\\\nu_{{yx}}}}$ & {:g} &-\\\\\\\\\".format(l.νyx))\n s = \" $\\\\mathrm{{\\\\alpha_x}}$ & {:g} & K$^{{-1}}$\\\\\\\\\"\n print(s.format(l.αx))\n s = \" $\\\\mathrm{{\\\\alpha_y}}$ & {:g} & K$^{{-1}}$\\\\\\\\\"\n print(s.format(l.αy))\n print(\" \\\\bottomrule\")\n print(\" \\\\end{tabular}\")",
"def _build_lookup_tables(cls):\n # TODO: Make this faster by using JIT-compiled ufuncs and vector arithmetic, when possible\n primitive_element = int(cls._primitive_element)\n add = cls._add.python_calculate\n multiply = cls._multiply.python_calculate\n\n cls._EXP = np.zeros(2 * cls.order, dtype=np.int64)\n cls._LOG = np.zeros(cls.order, dtype=np.int64)\n cls._ZECH_LOG = np.zeros(cls.order, dtype=np.int64)\n if cls.characteristic == 2:\n cls._ZECH_E = 0\n else:\n cls._ZECH_E = (cls.order - 1) // 2\n\n element = 1\n cls._EXP[0] = element\n cls._LOG[0] = 0 # Technically -Inf\n for i in range(1, cls.order):\n # Increment by multiplying by the primitive element, which is a multiplicative generator of the field\n element = multiply(element, primitive_element)\n cls._EXP[i] = element\n\n # Assign to the log lookup table but skip indices greater than or equal to `order - 1`\n # because `EXP[0] == EXP[order - 1]`\n if i < cls.order - 1:\n cls._LOG[cls._EXP[i]] = i\n\n # Compute Zech log lookup table\n for i in range(0, cls.order):\n one_plus_element = add(1, cls._EXP[i])\n cls._ZECH_LOG[i] = cls._LOG[one_plus_element]\n\n if not cls._EXP[cls.order - 1] == 1:\n raise RuntimeError(\n f\"The anti-log lookup table for {cls.name} is not cyclic with size {cls.order - 1}, which means \"\n f\"the primitive element {cls._primitive_element} does not have multiplicative order {cls.order - 1} \"\n f\"and therefore isn't a multiplicative generator for {cls.name}.\"\n )\n if not len(set(cls._EXP[0 : cls.order - 1])) == cls.order - 1:\n raise RuntimeError(\n f\"The anti-log lookup table for {cls.name} is not unique, \"\n f\"which means the primitive element {cls._primitive_element} has order less than {cls.order - 1} \"\n f\"and is not a multiplicative generator of {cls.name}.\"\n )\n if not len(set(cls._LOG[1 : cls.order])) == cls.order - 1:\n raise RuntimeError(f\"The log lookup table for {cls.name} is not unique.\")\n\n # Double the EXP table to prevent computing a `% (order - 1)` on every multiplication lookup\n cls._EXP[cls.order : 2 * cls.order] = cls._EXP[1 : 1 + cls.order]",
"def map_cell_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n for gal_index in range(GR.N_gal):\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n print('Now mapping %s' % gal_ob.name)\n isrf_ob = gal.isrf(gal_index)\n\n # Load SKIRT output\n wavelengths,bin_width = aux.read_probe_wavelengths(isrf_ob.name)\n N_start,N_stop = aux.FUV_index(wavelengths)\n image_data,units = isrf_ob._get_cut_probe(orientation=p.orientation)\n\n # Plot\n ax1 = axs[9 - counter]\n if p.prop == 'FUV':\n # FUV_xy_image = np.array([np.trapz(image_data[N_start:N_stop,:,:],x=wavelengths[N_start:N_stop]) \\\n # for i in range(len(df))])\n FUV_xy_image = image_data[N_start:N_stop,:,:].sum(axis=0) * 4 * np.pi\n FUV_xy_image = ndimage.rotate(FUV_xy_image, 0, reshape=True)\n # FUV_xy_image = np.fliplr(FUV_xy_image)\n FUV_xy_image[FUV_xy_image <= 0] = np.min(FUV_xy_image[FUV_xy_image > 0])\n im = ax1.imshow(np.log10(FUV_xy_image),\\\n extent=[-isrf_ob.radius,isrf_ob.radius,-isrf_ob.radius,isrf_ob.radius],\\\n vmin=p.vmin,\\\n cmap='twilight')\n lab = 'FUV flux [W/m$^2$/micron]'\n\n # pdb.set_trace()\n\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.8*gal_ob.radius,0.8*gal_ob.radius])\n ax1.set_ylim([-0.8*gal_ob.radius,0.8*gal_ob.radius])\n if p.prop == 'm':\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n\n if counter == 0:\n cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n figname = p.d_plot + 'cell_data/map_%s_%s_gals_%s_%i.png' % (p.prop,p.z1,p.orientation,fignum)\n print('Saving in ' + figname)\n # plt.tight_layout()\n plt.savefig(figname, format='png', dpi=250, facecolor='w')\n fignum += 1\n pdb.set_trace()\n else:\n fig, ax1 = plt.subplots(figsize=(10,10))\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='cell_data')\n print(simgas.keys())\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n\n # Plot\n Rmax = max_scale/2\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D = np.log10(map2D)\n if not p.log: map2D[map2D < p.vmin] = p.vmin/2 #np.min(map2D[map2D > 0])\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n ax1.set_ylim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index))\n if not os.path.isdir(p.d_plot + 'cell_data/'): os.mkdir(p.d_plot + 'cell_data/')\n plt.savefig(p.d_plot + 'cell_data/map_%s_G%i.png' % (p.prop,p.gal_index), format='png', dpi=250, facecolor='w')",
"def dp_cal_and_pro_only(foods, cal_goal, pro_goal):\n macros = init_two_d_array((cal_goal, pro_goal), 999999999)\n foods_used = init_two_d_array((cal_goal, pro_goal), {})\n\n for i in range(cal_goal):\n for j in range(pro_goal):\n for n in range(len(foods)):\n food = foods[n]\n if (int(food['calories']) > i and int(food['protein']) > j):\n continue\n if (macros[i - int(food['calories'])]\n [j - int(food['protein'])]\n == 999999999):\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = (macros[i - int(food['calories'])]\n [j - int(food['protein'])])\n prev_foods_used = \\\n (foods_used[i - int(food['calories'])]\n [j - int(food['protein'])]).copy()\n new_cal = calories(foods, prev_foods_used) + food['calories']\n new_pro = protein(foods, prev_foods_used) + food['protein']\n if (macros[i][j] > prev_cost + food['serving_cost']\n and new_cal > i - 50 and new_cal < i + 10\n and new_pro > j - 5 and new_pro < j + 5):\n macros[i][j] = prev_cost + food['serving_cost']\n try:\n prev_foods_used[n] += 1\n except KeyError:\n prev_foods_used[n] = 1\n foods_used[i][j] = prev_foods_used\n return foods_used[cal_goal - 1][pro_goal - 1]",
"def get_degeneracy_table(self):\n df = self.protein_degeneracy_table.copy()\n df['Oligonucleotides'] = df.Degeneracy * df.Proteins\n\n df['DNA_Quantile'] = \\\n df.Oligonucleotides.cumsum() / sum(df.Oligonucleotides)\n df['Protein_Quantile'] = \\\n df.Proteins.cumsum() / sum(df.Proteins)\n self.degeneracy_table = df\n\n # defining additional aliases\n self.protein_quantiles = df['Protein_Quantile'].tolist()\n self.dna_quantiles = df['DNA_Quantile'].tolist()\n self.degeneracies = df['Degeneracy'].tolist()\n\n self.df = df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method creates ``self.param_dict`` regulating the strength of the correlation between sec_haloprop and galprop at each value of prim_galprop.
|
def _build_param_dict(self, **kwargs):
if 'correlation_strength' in kwargs.keys():
correlation_strength = kwargs['correlation_strength']
if custom_len(correlation_strength) > 1:
try:
self.correlation_strength_abcissa = kwargs['correlation_strength_abcissa']
except KeyError:
msg = ("If correlation_strength keyword is passed to the constructor, \n" +
"you must also pass a correlation_strength_abcissa keyword argument " +
"storing an array of the same length as correlation_strength.")
raise(msg)
else:
self.correlation_strength_abcissa = [0]
correlation_strength = [correlation_strength]
self._param_dict_keys = ['correlation_param' + str(i+1) for i in range(len(correlation_strength))]
self.param_dict = {key:value for key, value in zip(self._param_dict_keys, correlation_strength)}
else:
self.param_dict = {'correlation_param1': 1.0}
self._set_correlation_strength()
|
[
"def _initialize_param_dict(self):\n self.param_dict={}\n for ipar, val in enumerate(self.ordinates):\n key = self._get_param_key(ipar)\n self.param_dict[key] = val",
"def _sample_params(self, trial: Trial) -> dict:\n # pseudocode\n # . hyparams = dict loop self.param_space trial.method(args)\n hyparams = {}\n for param in self.param_space:\n print(param.func)\n hyparams[param.name] = param.func(trial, param.name, *param.args)\n print(hyparams)\n return hyparams",
"def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()",
"def _build_param_dict(self, **kwargs):\n\n if hasattr(self, 'retrieve_default_param_dict'):\n self.param_dict = self.retrieve_default_param_dict()\n else:\n self.param_dict = {}\n\n scatter_param_dict = self.scatter_model.param_dict\n\n for key, value in scatter_param_dict.iteritems():\n self.param_dict[key] = value",
"def _draw_param_dicts(self, param_dicts):\n self.params = dict()\n\n if not self._preprocessing:\n # No preprocessing\n # the expected param_dicts key is 'est_name'\n for est_name, _ in self._estimators:\n self._set_params(param_dicts, est_name)\n else:\n # Preprocessing\n # Iterate over cases, expected param_dicts key is\n # 'case_name__est_name'\n if isinstance(self._preprocessing, dict):\n for case in self._preprocessing:\n for est_name, _ in self._estimators[case]:\n self._set_params(\n param_dicts, '%s.%s' % (case, est_name))\n else:\n for est_name, _ in self._estimators:\n self._set_params(param_dicts, est_name)",
"def _PhenomPCalculateModelParameters(self, p):\n\n\n logger.info(\"p['m1'] = {0}\".format(p['m1']))\n logger.info(\"p['m2'] = {0}\".format(p['m2']))\n if p['m1'] < p['m2']:\n raise ValueError('m1 = {0}, m2 = {1}. Convention error, this function needs m1 > m2'.format(p['m1'], p['m2']))\n\n #check that the spin magnitude is <=1\n if norm([p['chi1x'], p['chi1y'], p['chi1z']]) > 1.:\n raise ValueError('chi1 has a magnitude > 1')\n if norm([p['chi2x'], p['chi2y'], p['chi2z']]) > 1.:\n raise ValueError('chi2 has a magnitude > 1')\n\n m1_2 = p['m1']**2.\n m2_2 = p['m2']**2.\n\n #we start out in the Lhat = zhat frame\n #and define the spin w.r.t this frame.\n #Then, we incline the orbital frame w.r.t to the z-axis\n #by the angle inc.\n #This is done by a rotation about the y-axis, so the y-components do not change\n #in LAL this step is done in XLALSimInspiralInitialConditionsPrecessingApproxs in LALSimInspiralSpinTaylor.c\n #But it's simple so I just do it in this function.\n\n logger.info(\"spins before rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n p['chi1x'], p['chi1z'] = self.ROTATEY(p['inclination'], p['chi1x'], p['chi1z'])\n p['chi2x'], p['chi2z'] = self.ROTATEY(p['inclination'], p['chi2x'], p['chi2z'])\n\n logger.info(\"spins after rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n\n #from this we construct the orbital angular momentum\n #Again, this is a rotation about the y-axis.\n lnhatx = sin(p['inclination'])\n lnhaty = 0.\n lnhatz = cos(p['inclination'])\n\n chip, chi1_l, chi2_l = chip_fun(p['m1'], p['m2'], p['chi1x'], p['chi1y'], p['chi1z'], p['chi2x'], p['chi2y'], p['chi2z'], lnhatx, lnhaty, lnhatz)\n\n #compute L, J0 and orientation angles\n piM = Constants.LAL_PI * p['M_sec']\n v_ref = (piM * p['fRef'])**(1./3.)\n\n #Use 2PN approximation for initial L\n #magnitude of L\n L0 = p['Mtot']**2. * PhenomPL2PN(v_ref, p['eta'])\n\n #compute initial J\n #NOTE: we the spins need to be dimensionfull\n Jx0 = L0 * lnhatx + p['chi1x']*m1_2 + p['chi2x']*m2_2\n Jy0 = L0 * lnhaty + p['chi1y']*m1_2 + p['chi2y']*m2_2\n Jz0 = L0 * lnhatz + p['chi1z']*m1_2 + p['chi2z']*m2_2\n J0 = norm( [ Jx0, Jy0, Jz0 ] )\n\n #Compute thetaJ, the angle between J0 and line of sight (z-direction)\n if (J0 < 1e-10):\n logger.warning(\"Warning: |J0| < 1e-10. Setting thetaJ = 0.\\n\")\n thetaJ = 0.\n else:\n thetaJ = arccos(Jz0 / J0)\n\n #phiJ, We only use this angle internally since it is degenerate with alpha0.\n #NOTE:\n #in C code\n #if (Jx0 < DBL_MIN && Jy0 < DBL_MIN)\n #I think the replacement is the same\n if (Jx0 <= 0. and Jy0 <= 0.):\n phiJ = 0.\n else:\n phiJ = arctan2(Jy0, Jx0) #Angle of J0 in the plane of the sky\n #NOTE: Compared to the similar code in SpinTaylorF2 we have defined phiJ as the angle between the positive\n #(rather than the negative) x-axis and the projection of J0, since this is a more natural definition of the angle.\n #We have also renamed the angle from psiJ to phiJ.\n\n #Rotate Lnhat back to frame where J is along z and the line of\n #sight in the Oxz plane with >0 projection in x, to figure out initial alpha\n #The rotation matrix is\n #{\n #{-cos(thetaJ)*cos(phiJ), -cos(thetaJ)*sin(phiJ), sin(thetaJ)},\n #{sin(phiJ), -cos(phiJ), 0},\n #{cos(phiJ)*sin(thetaJ), sin(thetaJ)*sin(phiJ),cos(thetaJ)}\n #}\n\n rotLx = -lnhatx*cos(thetaJ)*cos(phiJ) - lnhaty*cos(thetaJ)*sin(phiJ) + lnhatz*sin(thetaJ)\n rotLy = lnhatx*sin(phiJ) - lnhaty*cos(phiJ)\n if (rotLx == 0.0 and rotLy == 0.0):\n alpha0 = 0.0\n else:\n alpha0 = arctan2(rotLy, rotLx)\n\n logger.info(\"chi1_l = {0}, chi2_l = {1}, chip = {2}, thetaJ = {3}, alpha0 = {4},\".format(chi1_l, chi2_l, chip, thetaJ, alpha0))\n\n return {\"chi1_l\" : chi1_l, \"chi2_l\" : chi2_l, \"chip\": chip, \"thetaJ\" : thetaJ, \"alpha0\" : alpha0}",
"def _set_correlation_strength(self):\n\n if hasattr(self, 'correlation_strength_abcissa'):\n abcissa = self.correlation_strength_abcissa\n ordinates = [self.param_dict['correlation_param'+str(i+1)] for i in range(len(abcissa))]\n correlation_strength_spline = model_helpers.custom_spline(abcissa, ordinates, k=custom_len(abcissa)-1)\n self.correlation_strength = correlation_strength_spline(self.prim_galprop_bins)\n else:\n self.correlation_strength = np.repeat(self.param_dict['correlation_param1'], len(self.prim_galprop_bins))\n\n self.correlation_strength[self.correlation_strength > 1] = 1\n self.correlation_strength[self.correlation_strength <- 1] = -1\n\n self.correlation_strength = np.append(\n self.correlation_strength, self.correlation_strength[-1])",
"def getGPEParams(self):\n outKeysScaleDouble = ['R', 'gamma_C', 'gamma_R', 'g_C', 'g_R', 'k',\n 'Pth']\n outKeysScaleSingle = outKeysScaleDouble + ['gamma_nl']\n outKeysScale = outKeysScaleSingle if self.singleComp else\\\n outKeysScaleDouble\n outKeys = ['charL', 'charT']\n out = {key: self.__dict__[key + '_scaled'] for key in outKeysScale}\n for key in outKeys:\n out[key] = self.__dict__[key]\n return out",
"def _set_init_param_dict(self):\n\n self.param_dict = {}\n\n try:\n suppress_warning = self._suppress_repeated_param_warning\n except AttributeError:\n suppress_warning = False\n msg = (\"\\n\\nThe param_dict key %s appears in more than one component model.\\n\"\n \"This is permissible, but if you are seeing this message you should be sure you \"\n \"understand it.\\nIn particular, double-check that this parameter does not have \"\n \"conflicting meanings across components.\\n\"\n \"\\nIf you do not wish to see this message every time you instantiate, \\n\"\n \"simply attach a _suppress_repeated_param_warning attribute \\n\"\n \"to any of your component models and set this variable to ``True``.\\n\")\n\n for component_model in self.model_dictionary.values():\n\n if not hasattr(component_model, 'param_dict'):\n component_model.param_dict = {}\n intersection = set(self.param_dict) & set(component_model.param_dict)\n if intersection != set():\n for key in intersection:\n if suppress_warning is False:\n warn(msg % key)\n\n for key, value in component_model.param_dict.iteritems():\n self.param_dict[key] = value\n\n self._init_param_dict = copy(self.param_dict)",
"def init_params(self):\n self.params = {p.title: p for p in\n chain(self.session.shared_params.values(), self._get_addl_params())}",
"def evaluate_reco_param(self):\n evals = self.input_binning['true_energy'].weighted_centers.magnitude\n n_e = len(self.input_binning['true_energy'].weighted_centers.magnitude)\n n_cz = len(self.input_binning['true_coszen'].weighted_centers.magnitude)\n eval_dict = deepcopy(self.param_dict)\n for flavintgroup, dim_dict in eval_dict.items():\n for dim, dist_list in dim_dict.items():\n for dist_prop_dict in dist_list:\n for dist_prop in dist_prop_dict.keys():\n if dist_prop == 'dist':\n continue\n if callable(dist_prop_dict[dist_prop]):\n func = dist_prop_dict[dist_prop]\n vals = func(evals)\n dist_prop_dict[dist_prop] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n elif isinstance(dist_prop_dict[dist_prop], dict):\n assert dist_prop == 'kwargs'\n for kwarg in dist_prop_dict['kwargs'].keys():\n func = dist_prop_dict['kwargs'][kwarg]\n vals = func(evals)\n dist_prop_dict['kwargs'][kwarg] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n # Now check for consistency, to not have to loop over all dict\n # entries again at a later point in time\n self.check_reco_dist_consistency(dist_list)\n return eval_dict",
"def _build_param_dict(self):\n # Add parameter handlers to parameter dict. \n self._param_dict.add(SBE37Parameter.OUTPUTSAL,\n r'(do not )?output salinity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.OUTPUTSV,\n r'(do not )?output sound velocity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.NAVG,\n r'number of samples to average = (\\d+)',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.SAMPLENUM,\n r'samplenumber = (\\d+), free = \\d+',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.INTERVAL,\n r'sample interval = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.STORETIME,\n r'(do not )?store time with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.TXREALTIME,\n r'(do not )?transmit real-time data',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCMODE,\n r'serial sync mode (enabled|disabled)',\n lambda match : False if (match.group(1)=='disabled') else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCWAIT,\n r'wait time after serial sync sampling = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.TCALDATE,\n r'temperature: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.TA0,\n r' +TA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA1,\n r' +TA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA2,\n r' +TA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA3,\n r' +TA3 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CCALDATE,\n r'conductivity: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.CG,\n r' +G = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CH,\n r' +H = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CI,\n r' +I = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CJ,\n r' +J = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.WBOTC,\n r' +WBOTC = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CTCOR,\n r' +CTCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CPCOR,\n r' +CPCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PCALDATE,\n r'pressure .+ ((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.PA0,\n r' +PA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA1,\n r' +PA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA2,\n r' +PA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA0,\n r' +PTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA1,\n r' +PTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA2,\n r' +PTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB0,\n r' +PTCSB0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB1,\n r' +PTCSB1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB2,\n r' +PTCSB2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.POFFSET,\n r' +POFFSET = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RCALDATE,\n r'rtc: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.RTCA0,\n r' +RTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA1,\n r' +RTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA2,\n r' +RTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)",
"def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj",
"def update_params(self, param_diff):\n # Update the predicted parameters\n self.pred_params += param_diff\n\n # Use the SMPL model to render these parameters to point clouds\n pred_pcs = np.zeros((self.BATCH_SIZE, 6890, 3))\n for i, params in enumerate(self.pred_params):\n pred_pcs[i] = self.smpl.set_params(beta=params[72:82], pose=params[0:72].reshape((24,3)), trans=params[82:85])\n\n self.pred_pcs = pred_pcs\n\n return self.pred_params, self.pred_pcs",
"def set_rand_params(self) -> Dict:\n new_params: Dict = self.gen_params()\n self.set_params(new_params)\n return new_params",
"def set_hyperparams_dict(self, hyper_params):\n self.hyper_params = {\n \"alpha\":0.05,\n \"beta\": 0.29,\n \"default integrity\": 0.6,\n \"initial integrity\": 0.6,\n \"minimum integrity\": 0.01,\n \"maximum integrity\": 0.99,\n \"minimization mode\": True,\n \"target\": 0,\n \"minimum entropy\": -0.01,\n \"tolerance\": 0,\n \"max steps\": 50,\n \"memory size\": 50\n }\n # Update dictionary if appropriate\n if isinstance(hyper_params, dict):\n self.hyper_params.update(hyper_params)",
"def _update_pars(self, p):\n self._prevalence = p\n self._beta_p = self.rng.normal()\n\n self.cl.prevalence = p\n self.cl._beta_p = self._beta_p",
"def histalp_scaling_params():\n # start logger with OGGM settings\n cfg.set_logging_config()\n\n # get HISTALP RGI IDs\n rgi_ids = pd.read_csv('/home/users/moberrauch/data/histalp_rgi_ids.csv',\n index_col=0)['RGIId'].values\n compute_scaling_params(rgi_ids, path=True)",
"def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.PTYPE,\n r\"<Sensor id = 'Main Pressure'>.*?<type>(.*?)</type>.*?</Sensor>\",\n self._pressure_sensor_to_int,\n str,\n type=ParameterDictType.INT,\n display_name=\"Pressure Sensor Type\",\n range={'Strain Gauge': 1, 'Quartz with Temp Comp': 3},\n startup_param=True,\n direct_access=True,\n default_value=1,\n description=\"Sensor type: (1:strain gauge | 3:quartz with temp comp)\",\n visibility=ParameterDictVisibility.IMMUTABLE,\n regex_flags=re.DOTALL)\n self._param_dict.add(Parameter.ECHO,\n r'<EchoCharacters>(.*)</EchoCharacters>',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Echo Characters\",\n range={'True': True, 'False': False},\n description=\"Enable characters to be echoed as typed (true | false)\",\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OUTPUT_EXEC_TAG,\n r'<OutputExecutedTag>(.*)</OutputExecutedTag>',\n lambda match: True,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Output Execute Tag\",\n range={'True': True, 'False': False},\n description=\"Enable display of XML executing and executed tags (true | false)\",\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_MODE,\n r'<AutoRun>(.*)</AutoRun>',\n self._pump_mode_to_int,\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Mode\",\n range={'No Pump':0, 'Run Pump for 0.5 sec': 1, 'Run Pump during Sample': 2},\n description=\"Mode: (0:no pump | 1:run pump for 0.5 sec | 2:run pump during sample)\",\n startup_param=True,\n direct_access=True,\n default_value=2)\n self._param_dict.add(Parameter.SBE50,\n r'SBE50>(.*)</SBE50',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"SBE50 Attached\",\n range={'True': True, 'False': False},\n description=\"Enabled SBE50: (true | false)\",\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.DELAY_BEFORE_SAMPLE,\n r'DelayBeforeSampling>(.*?)</DelayBeforeSampling',\n lambda match: float(match.group(1)),\n self._float_to_string,\n type=ParameterDictType.FLOAT,\n display_name=\"Delay Before Sample\",\n range=(0, 600),\n description=\" Time to wait after switching on external voltages and RS-232 sensors \"\n \"before sampling: (0-600).\",\n startup_param=True,\n direct_access=True,\n default_value=0.0,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.DELAY_AFTER_SAMPLE,\n r'DelayAfterSample>(.*?)</DelayBeforeSampling',\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Delay After Sample\",\n description=\"Time to wait after sampling is completed, before turning off power \"\n \"to external voltages and RS-232 sensors.\",\n #range not specified in IOS\n startup_param=True,\n direct_access=True,\n default_value=0.0,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.SYNCMODE,\n r'SyncMode>(dis|en)abled</SyncMode',\n lambda match: True if match.group(1) == 'en' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Enable Serial Sync\",\n range={'True': True, 'False': False},\n description=\"Enable serial line sync mode: (true | false)\",\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.NCYCLES,\n r'NCycles>(.*?)</NCycles',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Ncycles\",\n #range not specified in the IOS\n description=\"Number of measurements to take and average every SampleInterval seconds \"\n \"(must be and even number).\",\n startup_param=True,\n direct_access=False,\n default_value=4)\n self._param_dict.add(Parameter.INTERVAL,\n r'SampleInterval>(.*?)</SampleInterval',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Sample Interval\",\n range=(10, 14400),\n description=\"Interval between samples: (10 - 14,400).\",\n startup_param=True,\n direct_access=False,\n units=Units.SECOND,\n default_value=10)\n self._param_dict.add(Parameter.BIOWIPER,\n r'Biowiper>(.*?)</Biowiper',\n lambda match: False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Biowiper\",\n range={'True': True, 'False': False},\n description=\"Enable ECO-FL fluorometer with Bio-Wiper: (true | false)\",\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.TXREALTIME,\n r'TxRealTime>(yes|no)</TxRealTime',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Transmit Real-Time\",\n range={'True': True, 'False': False},\n description=\"Enable real-time data output: (true | false)\",\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method uses the current values in the param_dict to update the strength of the correlation between sec_haloprop and galprop at each value of prim_galprop.
|
def _set_correlation_strength(self):
if hasattr(self, 'correlation_strength_abcissa'):
abcissa = self.correlation_strength_abcissa
ordinates = [self.param_dict['correlation_param'+str(i+1)] for i in range(len(abcissa))]
correlation_strength_spline = model_helpers.custom_spline(abcissa, ordinates, k=custom_len(abcissa)-1)
self.correlation_strength = correlation_strength_spline(self.prim_galprop_bins)
else:
self.correlation_strength = np.repeat(self.param_dict['correlation_param1'], len(self.prim_galprop_bins))
self.correlation_strength[self.correlation_strength > 1] = 1
self.correlation_strength[self.correlation_strength <- 1] = -1
self.correlation_strength = np.append(
self.correlation_strength, self.correlation_strength[-1])
|
[
"def _update_pars(self, p):\n self._prevalence = p\n self._beta_p = self.rng.normal()\n\n self.cl.prevalence = p\n self.cl._beta_p = self._beta_p",
"def parameters_update(self, learning_rate):\n for i in range(1, self.L):\n #print('dW' + str(i))\n #print(self.grads['dW' + str(i)])\n #print(learning_rate * self.grads['dW' + str(i)])\n self.params['W' + str(i)] = self.params['W' + str(i)] - learning_rate * self.grads['dW' + str(i)]\n self.params['b' + str(i)] = self.params['b' + str(i)] - learning_rate * self.grads['db' + str(i)]",
"def update_params(self, param_diff):\n # Update the predicted parameters\n self.pred_params += param_diff\n\n # Use the SMPL model to render these parameters to point clouds\n pred_pcs = np.zeros((self.BATCH_SIZE, 6890, 3))\n for i, params in enumerate(self.pred_params):\n pred_pcs[i] = self.smpl.set_params(beta=params[72:82], pose=params[0:72].reshape((24,3)), trans=params[82:85])\n\n self.pred_pcs = pred_pcs\n\n return self.pred_params, self.pred_pcs",
"def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()",
"def _PhenomPCalculateModelParameters(self, p):\n\n\n logger.info(\"p['m1'] = {0}\".format(p['m1']))\n logger.info(\"p['m2'] = {0}\".format(p['m2']))\n if p['m1'] < p['m2']:\n raise ValueError('m1 = {0}, m2 = {1}. Convention error, this function needs m1 > m2'.format(p['m1'], p['m2']))\n\n #check that the spin magnitude is <=1\n if norm([p['chi1x'], p['chi1y'], p['chi1z']]) > 1.:\n raise ValueError('chi1 has a magnitude > 1')\n if norm([p['chi2x'], p['chi2y'], p['chi2z']]) > 1.:\n raise ValueError('chi2 has a magnitude > 1')\n\n m1_2 = p['m1']**2.\n m2_2 = p['m2']**2.\n\n #we start out in the Lhat = zhat frame\n #and define the spin w.r.t this frame.\n #Then, we incline the orbital frame w.r.t to the z-axis\n #by the angle inc.\n #This is done by a rotation about the y-axis, so the y-components do not change\n #in LAL this step is done in XLALSimInspiralInitialConditionsPrecessingApproxs in LALSimInspiralSpinTaylor.c\n #But it's simple so I just do it in this function.\n\n logger.info(\"spins before rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n p['chi1x'], p['chi1z'] = self.ROTATEY(p['inclination'], p['chi1x'], p['chi1z'])\n p['chi2x'], p['chi2z'] = self.ROTATEY(p['inclination'], p['chi2x'], p['chi2z'])\n\n logger.info(\"spins after rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n\n #from this we construct the orbital angular momentum\n #Again, this is a rotation about the y-axis.\n lnhatx = sin(p['inclination'])\n lnhaty = 0.\n lnhatz = cos(p['inclination'])\n\n chip, chi1_l, chi2_l = chip_fun(p['m1'], p['m2'], p['chi1x'], p['chi1y'], p['chi1z'], p['chi2x'], p['chi2y'], p['chi2z'], lnhatx, lnhaty, lnhatz)\n\n #compute L, J0 and orientation angles\n piM = Constants.LAL_PI * p['M_sec']\n v_ref = (piM * p['fRef'])**(1./3.)\n\n #Use 2PN approximation for initial L\n #magnitude of L\n L0 = p['Mtot']**2. * PhenomPL2PN(v_ref, p['eta'])\n\n #compute initial J\n #NOTE: we the spins need to be dimensionfull\n Jx0 = L0 * lnhatx + p['chi1x']*m1_2 + p['chi2x']*m2_2\n Jy0 = L0 * lnhaty + p['chi1y']*m1_2 + p['chi2y']*m2_2\n Jz0 = L0 * lnhatz + p['chi1z']*m1_2 + p['chi2z']*m2_2\n J0 = norm( [ Jx0, Jy0, Jz0 ] )\n\n #Compute thetaJ, the angle between J0 and line of sight (z-direction)\n if (J0 < 1e-10):\n logger.warning(\"Warning: |J0| < 1e-10. Setting thetaJ = 0.\\n\")\n thetaJ = 0.\n else:\n thetaJ = arccos(Jz0 / J0)\n\n #phiJ, We only use this angle internally since it is degenerate with alpha0.\n #NOTE:\n #in C code\n #if (Jx0 < DBL_MIN && Jy0 < DBL_MIN)\n #I think the replacement is the same\n if (Jx0 <= 0. and Jy0 <= 0.):\n phiJ = 0.\n else:\n phiJ = arctan2(Jy0, Jx0) #Angle of J0 in the plane of the sky\n #NOTE: Compared to the similar code in SpinTaylorF2 we have defined phiJ as the angle between the positive\n #(rather than the negative) x-axis and the projection of J0, since this is a more natural definition of the angle.\n #We have also renamed the angle from psiJ to phiJ.\n\n #Rotate Lnhat back to frame where J is along z and the line of\n #sight in the Oxz plane with >0 projection in x, to figure out initial alpha\n #The rotation matrix is\n #{\n #{-cos(thetaJ)*cos(phiJ), -cos(thetaJ)*sin(phiJ), sin(thetaJ)},\n #{sin(phiJ), -cos(phiJ), 0},\n #{cos(phiJ)*sin(thetaJ), sin(thetaJ)*sin(phiJ),cos(thetaJ)}\n #}\n\n rotLx = -lnhatx*cos(thetaJ)*cos(phiJ) - lnhaty*cos(thetaJ)*sin(phiJ) + lnhatz*sin(thetaJ)\n rotLy = lnhatx*sin(phiJ) - lnhaty*cos(phiJ)\n if (rotLx == 0.0 and rotLy == 0.0):\n alpha0 = 0.0\n else:\n alpha0 = arctan2(rotLy, rotLx)\n\n logger.info(\"chi1_l = {0}, chi2_l = {1}, chip = {2}, thetaJ = {3}, alpha0 = {4},\".format(chi1_l, chi2_l, chip, thetaJ, alpha0))\n\n return {\"chi1_l\" : chi1_l, \"chi2_l\" : chi2_l, \"chip\": chip, \"thetaJ\" : thetaJ, \"alpha0\" : alpha0}",
"def updateRNGParam(self, dictParam):\n for key in dictParam:\n if key == 'tolerance':\n self.RNGtolerance = dictParam['tolerance']\n elif key == 'initialGridDisc':\n self.RNGInitDisc = dictParam['initialGridDisc']\n self._distribution.updateRNGparameter(self.RNGtolerance,self.RNGInitDisc)",
"def _build_param_dict(self, **kwargs):\n \n if 'correlation_strength' in kwargs.keys():\n\n correlation_strength = kwargs['correlation_strength']\n if custom_len(correlation_strength) > 1:\n try:\n self.correlation_strength_abcissa = kwargs['correlation_strength_abcissa']\n except KeyError:\n msg = (\"If correlation_strength keyword is passed to the constructor, \\n\" + \n \"you must also pass a correlation_strength_abcissa keyword argument \" + \n \"storing an array of the same length as correlation_strength.\")\n raise(msg)\n else:\n self.correlation_strength_abcissa = [0]\n correlation_strength = [correlation_strength]\n\n self._param_dict_keys = ['correlation_param' + str(i+1) for i in range(len(correlation_strength))]\n self.param_dict = {key:value for key, value in zip(self._param_dict_keys, correlation_strength)}\n else:\n self.param_dict = {'correlation_param1': 1.0}\n self._set_correlation_strength()",
"def evaluate_reco_param(self):\n evals = self.input_binning['true_energy'].weighted_centers.magnitude\n n_e = len(self.input_binning['true_energy'].weighted_centers.magnitude)\n n_cz = len(self.input_binning['true_coszen'].weighted_centers.magnitude)\n eval_dict = deepcopy(self.param_dict)\n for flavintgroup, dim_dict in eval_dict.items():\n for dim, dist_list in dim_dict.items():\n for dist_prop_dict in dist_list:\n for dist_prop in dist_prop_dict.keys():\n if dist_prop == 'dist':\n continue\n if callable(dist_prop_dict[dist_prop]):\n func = dist_prop_dict[dist_prop]\n vals = func(evals)\n dist_prop_dict[dist_prop] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n elif isinstance(dist_prop_dict[dist_prop], dict):\n assert dist_prop == 'kwargs'\n for kwarg in dist_prop_dict['kwargs'].keys():\n func = dist_prop_dict['kwargs'][kwarg]\n vals = func(evals)\n dist_prop_dict['kwargs'][kwarg] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n # Now check for consistency, to not have to loop over all dict\n # entries again at a later point in time\n self.check_reco_dist_consistency(dist_list)\n return eval_dict",
"def glcmProps(P, prop='contrast'):\n\n (num_level, num_level2, num_dist, num_angle) = P.shape\n assert num_level == num_level2\n assert num_dist > 0\n assert num_angle > 0\n\n # create weights for specified property\n I, J = np.ogrid[0:num_level, 0:num_level]\n if prop == 'contrast':\n weights = (I - J) ** 2\n elif prop in ['ASM', 'energy', 'correlation']:\n pass\n elif prop == 'mean':\n weights, _ = np.mgrid[0:num_level, 0:num_level]\n elif prop == 'dissimilarity':\n weights = np.abs(I - J)\n elif prop == 'homogeneity':\n weights = 1. / (1. + (I - J) ** 2)\n else:\n raise ValueError('%s is an invalid property' % (prop))\n\n # compute property for each GLCM\n if prop == 'energy':\n asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]\n results = np.sqrt(asm)\n elif prop == 'ASM':\n results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]\n elif prop == 'correlation':\n results = np.zeros((num_dist, num_angle), dtype=np.float64)\n I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))\n J = np.array(range(num_level)).reshape((1, num_level, 1, 1))\n diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0]\n diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0]\n\n std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2),\n axes=(0, 1))[0, 0])\n std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2),\n axes=(0, 1))[0, 0])\n cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)),\n axes=(0, 1))[0, 0]\n\n # handle the special case of standard deviations near zero\n mask_0 = std_i < 1e-15\n mask_0[std_j < 1e-15] = True\n results[mask_0] = 1\n\n # handle the standard case\n mask_1 = mask_0 == False\n results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])\n elif prop in ['contrast', 'dissimilarity', 'homogeneity', 'mean']:\n weights = weights.reshape((num_level, num_level, 1, 1))\n results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0]\n\n return results",
"def update_parameters(parameters, grads, learning_rate = 1.2):\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters['W1']\n b1 = parameters['b1']\n W2 = parameters['W2']\n b2 = parameters['b2']\n ### END CODE HERE ###\n \n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads['dW1']\n db1 = grads['db1']\n dW2 = grads['dW2']\n db2 = grads['db2']\n ## END CODE HERE ###\n \n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - learning_rate * dW1\n b1 = b1 - learning_rate * db1\n W2 = W2 - learning_rate * dW2\n b2 = b2 - learning_rate * db2\n ### END CODE HERE ###\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters",
"def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)",
"def _updateParameters(self):\n pass #future tool\n #update A <-- shouldn't be needed since A is an early-life stabilizing parameter\n #update alpha <-- if we're moving basically in the same direction, don't damp!\n #update a <-- increase or decrease step size based on gradient information\n # determine the minimum desirable step size at early calcualtion (1/10 of average\n #update c <-- estimate stochasticity of the response; if low, \"c\" tends to 0\n #update gamma <-- distance between samples to determine gradient. Scales with step size?",
"def UpdateParameters(self, param):\n\n for i, attribute in enumerate(self._fit_key.keys()):\n if attribute in param.keys():\n # Set attribute according to if it is a range or not\n if ';' in self._fit_key[attribute]:\n varmin = float(min(self._fit_key[attribute].split(';')))\n varmax = float(max(self._fit_key[attribute].split(';')))\n var = ROOT.RooRealVar(\n attribute,\n attribute,\n varmin,\n varmax)\n param[attribute] = var\n else:\n param[attribute] = float(self._fit_key[attribute])\n\n info(\n 'Change default value of {} (= {}) for signal PDF'\n .format(attribute, self._fit_key[attribute]))",
"def update(self, newparams):\n for k, v in list(newparams.items()):\n if k in self.basis_params:\n # Make sure parameter is in dict, and check if it changed\n if k not in self.params:\n self.basis_dirty = True\n self.params[k] = v\n if np.any(v != self.params.get(k)):\n self.basis_dirty = True\n else:\n try:\n # here the sps.params.dirtiness should increase to 2 if\n # there was a change\n self.ssp.params[k] = v[0]\n except KeyError:\n pass\n # now update params\n self.params[k] = np.copy(np.atleast_1d(v))\n # if we changed only csp_params but are relying on COMPSP, make\n # sure we remake the basis\n if self.safe and (self.ssp.params.dirtiness == 1):\n self.basis_dirty = True\n # if we changed only csp_params propagate them through but don't\n # force basis remake (unless basis_dirty)\n if self.ssp.params.dirtiness == 1:\n self.ssp._update_params()\n\n if self.basis_dirty | (self.ssp.params.dirtiness == 2):\n self.build_basis()",
"def update_params (self):\n all_good = [\" \",\"-\"*60]\n #----------------------------------------------------#\n # check to make sure self._wl, self._data, self._inv_var are still ndarrays\n def check_arr (arr,all_good,which):\n if type(arr) != np.ndarray: \n try: arr = np.asarray(arr)\n except: all_good.append(\"ERROR: could not convert\"+which+\" array to an ndarray\")\n \n if arr.ndim != 3: all_good.append(\"ERROR: \"+which+\" array is not of dimension 3, instead:\"+str(arr.ndim))\n return arr,all_good\n \n self._wl, all_good = check_arr(self._wl,all_good,'wavelength')\n self._data, all_good = check_arr(self._data,all_good,'data')\n self._inv_var, all_good = check_arr(self._inv_var,all_good,'inverse variance')\n \n # update:\n self.shape = self._data.shape\n\n #----------------------------------------------------#\n # check the shape\n if self.shape != self._wl.shape: all_good.append('ERROR: obj._wl does not match shape:'+str(self.shape))\n if self.shape != self._inv_var.shape: all_good.append('ERROR: obj._inv_var does not match shape:'+str(self.shape))\n\n #----------------------------------------------------#\n if type(self._private_info).__name__ != 'dict': all_good.append('ERROR: obj._private_info not of type dict\\n>>perhaps you should run obj._reset_notes_and_info()')\n if type(self._notes).__name__ != 'dict': all_good.append('ERROR: obj._private_info not of type dict\\n>>perhaps you should run obj._reset_notes_and_info()')\n\n #----------------------------------------------------#\n # check self._bands and self._orders\n self.edit.sort_orders()\n\n #----------------------------------------------------#\n self._check_band_num()\n \n #----------------------------------------------------#\n if len(all_good) > 2: \n print \"\\n\".join(all_good)\n raise AttributeError(\"------ SEE ABOVE ERRORS ------------------\")",
"def update_penalty(self, **params):\n # TODO: document\n # this will only update any non non-convex penalty parameters that were changed\n self.penalty_config_.set_params(**params)\n sp_penalty = get_lla_subproblem_penalty(self.penalty_config_)\n self.solver_.update_penalty(**sp_penalty.get_params(deep=True))",
"def update_recognize_params(self, inps, trgs, preds):\r\n\r\n #self.delta_weight_v_to_h = np.matmul(inps.T,(trgs-preds))\r\n #self.delta_bias_h = np.sum(trgs-preds,axis=0)\r\n \r\n #weight_v_h += (1/inps.shape[0])*self.delta_weight_v_to_h\r\n #bias_h = (1/inps.shape[0])*self.delta_bias_h\r\n\r\n #self.weight_v_to_h = weight_v_h\r\n #self.bias_h = bias_h\r\n \r\n \r\n support = np.tensordot(inps, trgs-preds, axes=((0), (0)))\r\n self.weight_v_to_h += (self.learning_rate*support)\r\n \r\n self.bias_h += self.learning_rate*np.sum(trgs - preds, axis=0)\r\n \r\n \r\n\r\n return",
"def update_arm_parameters(self, arm_intuition, arm_selection, success):\n if success:\n self.alpha_params[arm_intuition, arm_selection] += 1\n else:\n self.beta_params[arm_intuition, arm_selection] += 1",
"def histalp_scaling_params():\n # start logger with OGGM settings\n cfg.set_logging_config()\n\n # get HISTALP RGI IDs\n rgi_ids = pd.read_csv('/home/users/moberrauch/data/histalp_rgi_ids.csv',\n index_col=0)['RGIId'].values\n compute_scaling_params(rgi_ids, path=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method calls ``new_haloprop_func_dict`` to create new halo properties as columns to the mock catalog, if applicable.
|
def add_new_haloprops(self, galaxy_table):
if hasattr(self, 'new_haloprop_func_dict'):
d = self.new_haloprop_func_dict
for key, func in d.iteritems():
if key not in galaxy_table.keys():
galaxy_table[key] = func(galaxy_table=galaxy_table)
|
[
"def test_make_hmp(self):\n table_factory = DataTableFactory(PACKET_DIR)\n table_factory.hmp()",
"def _parse_constructor_kwargs(self, **kwargs):\n\n try:\n halo_id = np.array(kwargs['halo_id'])\n assert type(halo_id) is np.ndarray\n Nhalos = custom_len(halo_id)\n except KeyError:\n msg = (\"\\nThe UserSuppliedHaloCatalog requires a ``halo_id`` keyword argument.\")\n raise HalotoolsError(msg)\n\n halo_table_dict = (\n {key: np.array(kwargs[key]) for key in kwargs\n if ((type(kwargs[key]) is np.ndarray) | (type(kwargs[key]) is Column)) and\n (custom_len(kwargs[key]) == Nhalos) and (key[:5] == 'halo_')})\n self._test_halo_table_dict(halo_table_dict)\n\n metadata_dict = (\n {key: kwargs[key] for key in kwargs\n if (key not in halo_table_dict) and (key != 'ptcl_table')}\n )\n\n return halo_table_dict, metadata_dict",
"def test_tinker13_populate():\n model = PrebuiltHodModelFactory(\n \"tinker13\",\n quiescent_fraction_abscissa=[1e12, 1e15],\n quiescent_fraction_ordinates=[0.5, 0.5],\n )\n fake_sim = FakeSim(num_halos_per_massbin=500)\n model.populate_mock(fake_sim, seed=43)\n\n mask = model.mock.galaxy_table[\"halo_mvir\"] > 1e12\n mask *= model.mock.galaxy_table[\"halo_mvir\"] < 1e15\n mask *= model.mock.galaxy_table[\"gal_type\"] == \"centrals\"\n cens = model.mock.galaxy_table[mask]\n mc_quiescent_fraction = np.mean(cens[\"central_sfr_designation\"] == \"quiescent\")\n assert np.allclose(mc_quiescent_fraction, 0.5, rtol=0.1)",
"def _add_hybrid_cols(self):\n for new_col_name, method in HYBRID_METHODS.items():\n out = method(self)\n if out is not None:\n try:\n self._hybrid_meta[new_col_name] = out\n except ValueError as e:\n msg = (\"Unable to add {!r} column to hybrid meta. The \"\n \"following exception was raised when adding \"\n \"the data output by '{}': {!r}.\")\n w = msg.format(new_col_name, method.__name__, e)\n logger.warning(w)\n warn(w, OutputWarning)",
"def _populate(self):\n skip = (\n dir(type(\"dummy\", (object,), {}))\n + [\n \"_is_rhino\",\n \"dataframe\",\n \"_populate\",\n \"config\",\n \"recipes\",\n \"_drop_features\",\n \"current_recipe\",\n ]\n + RECIPES\n + COMPONENTS\n )\n\n attrs = (\n item\n for item in dir(RhinoPhysics)\n if (item not in skip)\n and ((\"__\" not in item))\n and (not item.startswith(\"_\"))\n # and (not np.isnan(getattr(self, item)))\n # and (not getattr(self, item) is None)\n and (\n (not item.startswith(\"a_\"))\n if \"axial\" not in self.components_to_process\n else True\n )\n and (\n (not item.startswith(\"t_\"))\n if \"tangential\" not in self.components_to_process\n else True\n )\n and (\n (not item.startswith(\"r_\"))\n if \"radial\" not in self.components_to_process\n else True\n )\n )\n\n for col in attrs:\n logger.debug(\"Adding {} to dataframe.\".format(col))\n try:\n self.dataframe[col] = getattr(self, col)\n except Exception as e:\n logger.debug(\"Failed to add {} to dataframe, ERROR: {}\".format(col, e))\n\n self._is_populated = True",
"def test__init__with_metadata(self, gaussian_copula_mock):\n # Setup\n metadata = MagicMock(spec_set=Table)\n\n # Run\n TabularPreset(name='FAST_ML', metadata=metadata)\n\n # Assert\n gaussian_copula_mock.assert_called_once_with(\n table_metadata=metadata.to_dict(),\n constraints=None,\n categorical_transformer='categorical_fuzzy',\n default_distribution='gaussian',\n rounding=None,\n )",
"def _generate_hcs_meta(self):\n self.hcs_meta = {'plate': self.plate_meta}\n\n well_metas = []\n for well in self.wells:\n meta = self.store[well].attrs.get('well')\n well_metas.append(meta)\n\n self.hcs_meta['well'] = well_metas",
"def set_halo(self, halo_dict):\n self.halo.set_halo(halo_dict)",
"def addfunctions2new(abunch, key):\n snames = [\n \"BuildingSurface:Detailed\",\n \"Wall:Detailed\",\n \"RoofCeiling:Detailed\",\n \"Floor:Detailed\",\n \"FenestrationSurface:Detailed\",\n \"Shading:Site:Detailed\",\n \"Shading:Building:Detailed\",\n \"Shading:Zone:Detailed\",\n ]\n snames = [sname.upper() for sname in snames]\n if key in snames:\n func_dict = {\n \"area\": fh.area,\n \"height\": fh.height, # not working correctly\n \"width\": fh.width, # not working correctly\n \"azimuth\": fh.azimuth,\n \"tilt\": fh.tilt,\n \"coords\": fh.getcoords, # needed for debugging\n }\n try:\n abunch.__functions.update(func_dict)\n except KeyError as e:\n abunch.__functions = func_dict\n return abunch",
"def mock_Ih_table():\n Ih_table = Mock()\n Ih_table.variances = flex.double([1.0, 1.0, 2.0, 2.0])\n Ih_table.intensities = flex.double([1.0, 1.0, 2.0, 4.5])\n Ih_table.inverse_scale_factors = flex.double(4, 1.0)\n Ih_table.Ih_values = flex.double(4, 1.5)\n Ih_table.size = 4\n return Ih_table",
"def test_hbamanager_list_full_properties(\n self, full_properties_kwargs, prop_names):\n\n # Add two faked HBAs\n faked_hba1 = self.add_hba1()\n faked_hba2 = self.add_hba2()\n\n exp_faked_hbas = [faked_hba1, faked_hba2]\n hba_mgr = self.partition.hbas\n\n # Execute the code to be tested\n hbas = hba_mgr.list(**full_properties_kwargs)\n\n assert_resources(hbas, exp_faked_hbas, prop_names)",
"def make(self, halo_spots):\n calls = {}\n generated = OrderedDict()\n for hs in halo_spots:\n # 1) Callables/Calls for send/recv\n begin_exchange = []\n for f, v in hs.fmapper.items():\n # Sanity check\n assert f.is_Function\n assert f.grid is not None\n\n # Note: to construct the halo exchange Callables, use the generic `df`,\n # instead of `f`, so that we don't need to regenerate code for Functions\n # that are symbolically identical to `f` except for the name\n df = f.__class__.__base__(name='a', grid=f.grid, shape=f.shape_global,\n dimensions=f.dimensions)\n # `gather`, `scatter`, `sendrecv` and `haloupdate` are generic by\n # construction -- they only need to be generated once for each unique\n # pair (`ndim`, `halos`)\n if (f.ndim, v) not in generated:\n key = len(generated)\n haloupdate = self._make_haloupdate(df, v.loc_indices, v.halos, key)\n sendrecv = self._make_sendrecv(df, v.loc_indices)\n gather = self._make_copy(df, v.loc_indices)\n scatter = self._make_copy(df, v.loc_indices, swap=True)\n # Arrange the newly constructed Callables in a suitable data\n # structure to capture the call tree. This may be useful to\n # the HaloExchangeBuilder user\n haloupdate = EFuncNode(haloupdate)\n sendrecv = EFuncNode(sendrecv, haloupdate)\n gather = EFuncNode(gather, sendrecv)\n scatter = EFuncNode(scatter, sendrecv)\n\n generated[(f.ndim, v)] = haloupdate\n\n # `haloupdate` Call construction\n comm = f.grid.distributor._obj_comm\n nb = f.grid.distributor._obj_neighborhood\n loc_indices = list(v.loc_indices.values())\n args = [f, comm, nb] + loc_indices\n begin_exchange.append(Call(generated[(f.ndim, v)].name, args))\n\n # 2) Callables/Calls for wait (no-op in case of synchronous halo exchange)\n wait_exchange = []\n for f, v in hs.fmapper.items():\n # TODO\n pass\n\n # 3) Callables/Calls for remainder computation (no-op in case of\n # synchronous halo exchange)\n remainder = []\n\n calls[hs] = List(body=begin_exchange + [hs.body] + wait_exchange + remainder)\n\n return flatten(generated.values()), calls",
"def init_properties(data_frame):\n # Validate input parameters\n\n # # The input object should be of type pandas DataFrame\n validate_object_type(data_frame, pd.DataFrame)\n\n # Get the catalog instance\n catalog = Catalog.Instance()\n\n # Initialize the property in the catalog.\n # Relay the return value from the underlying catalog object's function.\n # The return value is typically True if the initialization was successful\n return catalog.init_properties(data_frame)",
"def test_clf_support():\n dummy_model_dict = {'centrals_occupation': DummyCLF('centrals', -19, 1., 'halo_mvir')}\n model = HodModelFactory(**dummy_model_dict)\n\n # Ensure that the additional galaxy property defined by the component model\n # makes it into the _galprop_dtypes_to_allocate of the composite model\n assert 'luminosity' in model._galprop_dtypes_to_allocate.names\n\n # Ensure that we can populate a mock\n # and that the luminosity assignment occurs as expected\n halocat = FakeSim()\n model.populate_mock(halocat)\n assert np.allclose(model.mock.galaxy_table['luminosity'],\n np.linspace(0, 1, len(model.mock.galaxy_table)))",
"def _apply_harmonized_metadata_to_sample(sample: Sample, harmonized_metadata: dict):\n for key, value in harmonized_metadata.items():\n setattr(sample, key, value)",
"def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2",
"def test_create_tbl_dict(self):\n self.table.name = 'myTable'\n self.table.cols = {'col1': 'INTEGER', 'col2': 'TEXT'}\n with self.assertRaises(AssertionError):\n self.table.get_create_statement()",
"def columns_setup(self):\n self.required = None\n self.addition = None\n self.deletion = None\n self.retention = None\n self.rename = None",
"def test_single_property_column_factory_call(mocked_column_factory: mock.MagicMock):\n model_schema = {\n \"x-tablename\": \"table 1\",\n \"type\": \"object\",\n \"properties\": {\"id\": {\"type\": \"integer\"}},\n }\n model_name = \"SingleProperty\"\n schemas = {model_name: model_schema}\n artifacts = schemas_artifacts.get_from_schemas(\n schemas=schemas, stay_within_model=True\n )\n model_factory.model_factory(\n name=model_name,\n get_base=_mock_get_base,\n schemas=copy.deepcopy(schemas),\n artifacts=artifacts,\n )\n\n mocked_column_factory.assert_called_once_with(\n artifacts=artifacts[model_name].properties[0][1]\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Currently not implemented First print returns date of modifications to the video file Second print prints date of Creation of the video file, literally time when it was written to folder
|
def creation_date_video(path_to_file):
print("Last modified: %s" % time.ctime(os.path.getmtime(path_to_file)))
print("Created: %s" % time.ctime(os.path.getctime(path_to_file)))
# return os.path.getctime(path_to_file)
|
[
"def add_timestamps(dir_video):\n print(\"Adding creation dates to file names\")\n os.chdir(dir_video)\n # get only top level dir info\n dir_data_video_files = next(os.walk(dir_video))\n list_video_files = dir_data_video_files[2] # get file list\n for f_name in list_video_files:\n if GOPRO_PATTERN.search(f_name):\n f_time = time.strftime(r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(f_name)))\n os.rename(f_name, f\"{f_time}_{f_name}\")",
"def test_extract_date_video2(datafiles, tmpdir):\n for file in datafiles.listdir():\n new_file = File(str(file), os.path.join(str(tmpdir), 'Videos'))\n assert new_file.filename_with_path.endswith('IMG_0036.MOV')\n assert new_file.copy_successful == False\n assert new_file.file_type == 'Video'\n assert new_file.date_created == '2019-01-08'",
"def test_extract_date_video(datafiles, tmpdir):\n for file in datafiles.listdir():\n new_file = File(str(file), os.path.join(str(tmpdir), 'Videos'))\n assert new_file.filename_with_path.endswith('2019-01-05_13-25-15_IMG_0036.MOV')\n assert new_file.copy_successful == False\n assert new_file.file_type == 'Video'\n assert new_file.date_created == '2019-01-05'",
"def media_file_info(self):\n\n if self.observationId and self.playerType == VLC:\n\n media = self.mediaplayer.get_media()\n\n logging.info(\"State: {}\".format(self.mediaplayer.get_state()))\n logging.info(\"Media (get_mrl): {}\".format(bytes_to_str(media.get_mrl())))\n logging.info(\"media.get_meta(0): {}\".format(media.get_meta(0)))\n logging.info(\n \"Track: {}/{}\".format(self.mediaplayer.video_get_track(), self.mediaplayer.video_get_track_count()))\n logging.info(\"number of media in media list: {}\".format(self.media_list.count()))\n logging.info(\"get time: {} duration: {}\".format(self.mediaplayer.get_time(), media.get_duration()))\n logging.info(\"Position: {} %\".format(self.mediaplayer.get_position()))\n logging.info(\"FPS: {}\".format(self.mediaplayer.get_fps()))\n logging.info(\"Rate: {}\".format(self.mediaplayer.get_rate()))\n logging.info(\"Video size: {}\".format(self.mediaplayer.video_get_size(0)))\n logging.info(\"Scale: {}\".format(self.mediaplayer.video_get_scale()))\n logging.info(\"Aspect ratio: {}\".format(self.mediaplayer.video_get_aspect_ratio()))\n logging.info(\"is seekable? {0}\".format(self.mediaplayer.is_seekable()))\n logging.info(\"has_vout? {0}\".format(self.mediaplayer.has_vout()))\n\n vlc_output = (\"State: {}<br>\"\n \"Media Resource Location: {}<br>\"\n \"File name: {}<br>\"\n \"Track: {}/{}<br>\"\n \"Number of media in media list: {}<br>\"\n \"get time: {}<br>\"\n \"duration: {}<br>\"\n \"Position: {} %<br>\"\n \"FPS: {}<br>\"\n \"Rate: {}<br>\"\n \"Video size: {}<br>\"\n \"Scale: {}<br>\"\n \"Aspect ratio: {}<br>\"\n \"is seekable? {}<br>\"\n \"has_vout? {}<br>\").format(self.mediaplayer.get_state(),\n bytes_to_str(media.get_mrl()),\n media.get_meta(0),\n self.mediaplayer.video_get_track(),\n self.mediaplayer.video_get_track_count(),\n self.media_list.count(),\n self.mediaplayer.get_time(),\n self.convertTime(media.get_duration() / 1000),\n self.mediaplayer.get_position(),\n self.mediaplayer.get_fps(),\n self.mediaplayer.get_rate(),\n self.mediaplayer.video_get_size(0),\n self.mediaplayer.video_get_scale(),\n self.mediaplayer.video_get_aspect_ratio(),\n \"Yes\" if self.mediaplayer.is_seekable() else \"No\",\n \"Yes\" if self.mediaplayer.has_vout() else \"No\"\n )\n\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Media file information\")\n self.results.ptText.setReadOnly(True)\n\n self.results.ptText.appendHtml(\"<b>VLC analysis</b><hr>\" + vlc_output)\n\n # FFmpeg analysis\n self.results.ptText.appendHtml(\"<br><b>FFmpeg analysis</b><hr>\")\n for nplayer in self.pj[OBSERVATIONS][self.observationId][FILE]:\n for filePath in self.pj[OBSERVATIONS][self.observationId][FILE][nplayer]:\n media_full_path = project_functions.media_full_path(filePath, self.projectFileName)\n # nframes, duration_ms, duration, fps, hasVideo, hasAudio = accurate_media_analysis(self.ffmpeg_bin, media_full_path)\n\n r = utilities.accurate_media_analysis2(self.ffmpeg_bin, media_full_path)\n nframes = r[\"frames_number\"]\n\n if \"error\" in r:\n self.results.ptText.appendHtml(\n \"File path: {filePath}<br><br>{error}<br><br>\".format(filePath=media_full_path,\n error=r[\"error\"]))\n else:\n self.results.ptText.appendHtml(\n \"File path: {}<br>Duration: {}<br>Bitrate: {}k<br>FPS: {}<br>Has video: {}<br>Has audio: {}<br><br>\".\n format(media_full_path, self.convertTime(r[\"duration\"]), r[\"bitrate\"], r[\"fps\"],\n r[\"has_video\"], r[\"has_audio\"]))\n\n self.results.ptText.appendHtml(\"Total duration: {} (hh:mm:ss.sss)\".\n format(self.convertTime(sum(self.duration) / 1000)))\n\n self.results.show()\n\n else:\n\n fn = QFileDialog(self).getOpenFileName(self, \"Select a media file\", \"\", \"Media files (*)\")\n filePath = fn[0] if type(fn) is tuple else fn\n\n if filePath:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Media file information\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(\"<br><b>FFmpeg analysis</b><hr>\")\n # nframes, duration_ms, duration, fps, hasVideo, hasAudio = accurate_media_analysis(self.ffmpeg_bin, filePath)\n r = utilities.accurate_media_analysis2(self.ffmpeg_bin, filePath)\n if \"error\" in r:\n self.results.ptText.appendHtml(\n \"File path: {filePath}<br><br>{error}<br><br>\".format(filePath=filePath, error=r[\"error\"]))\n else:\n self.results.ptText.appendHtml(\n \"File path: {}<br>Duration: {}<br>Bitrate: {}k<br>FPS: {}<br>Has video: {}<br>Has audio: {}<br><br>\".\n format(filePath, self.convertTime(r[\"duration\"]), r[\"bitrate\"], r[\"fps\"], r[\"has_video\"],\n r[\"has_audio\"]))\n\n self.results.show()",
"def getModifiedTime(self):\n return os.stat(\"%s\" % self.file)[8]",
"def date_modified(self):\n return self.phasset.modificationDate()",
"def get_creation_time(ts):\n path_to_embed_file = os.path.join(DATA_DIR, STUDY, \"experiment_files\", \"experiment_\"+ ts, \"triplet_training_validation_embeddings.h5\")\n\n if os.path.exists(path_to_embed_file):\n stat = os.stat(path_to_embed_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime\n else:\n print (\"here, path is: \", path_to_embed_file)\n return None",
"def daily_update_video():\n PATHS = BeWatch.db.get_paths()\n # find video files\n mp4_files = glob.glob(os.path.join(PATHS['video_dir'], '*.mp4'))\n mkv_files = glob.glob(os.path.join(PATHS['video_dir'], '*.mkv'))\n video_files = mp4_files + mkv_files\n \n # Load existing video file dataframe and use as a cache\n # This way we don't have to reprocess videos we already know about\n vdf = BeWatch.db.get_video_df() \n \n # Parse into df\n video_files_df = BeWatch.db.parse_video_filenames(\n video_files, verbose=True,\n cached_video_files_df=vdf)\n\n # store copy for error check (to ensure that localeifying and\n # writing to disk didn't corrupt anything)\n video_files_df_local = video_files_df.copy()\n\n # locale-ify\n video_files_df['filename'] = video_files_df['filename'].str.replace(\n PATHS['video_dir'], '$video_dir$')\n \n # Save\n filename = os.path.join(PATHS['database_root'], 'video.csv')\n video_files_df.to_csv(filename, index=False) \n \n # Test the reading/writing is working\n # Although if it failed, it's too late\n vdf = BeWatch.db.get_video_df()\n if not (video_files_df_local == vdf).all().all():\n raise ValueError(\"read/write error in video database\")",
"def get_mod_time(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n raw_date_time = file_meta_plist['$objects'][1]['LastModified']\n converted_time = datetime.datetime.fromtimestamp(raw_date_time)\n converted_time = converted_time.timetuple()\n return converted_time\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['modified'].timetuple()",
"def _modification_lapse(self, filename):\n ct1 = os.path.getmtime(filename)\n ct2 = os.path.getctime(filename)\n\n # it can differ from windows to UN*X\n ct = max(ct1, ct2)\n\n now = time.mktime(time.gmtime())\n\n return now - ct + self._fs_time_skew",
"def get_upload_date(self, video_ID):\n self.cur.execute(\"SELECT upload_date FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]",
"def watch_movie():\r\n if os.path.isfile('files/final_movie.mp4'): # if the file exists\r\n with open('files/final_movie.mp4', 'rb') as f:\r\n video_data = f.read()\r\n st.video(video_data)\r\n else: # if the file doesn't exist, let the user know\r\n st.header(\"You haven't created a movie yet!\")",
"def modification_date(filepath):\n if not filepath:\n return ''\n t = os.path.getmtime(filepath)\n return datetime.datetime.utcfromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S')",
"def timestamp():\n print(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\") + \" \" + __file__)",
"def timestamp():\n print(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\") + \" \" +\n __file__)",
"def test_new_video_file_nominal():\n new_file = File('/Users/me/Pictures/iPhone8/img003.MOV', '/Users/me/Pictures/Pictures')\n assert new_file.filename_with_path == '/Users/me/Pictures/iPhone8/img003.MOV'\n assert new_file.base_destination_directory == '/Users/me/Pictures/Pictures/'\n assert new_file.copy_successful == False\n assert new_file.file_type == 'Video'\n assert new_file.date_created == ''\n assert new_file.destination_directory == '/Users/me/Pictures/Pictures/Date_Unknown'",
"def last_videos_recorded(self) -> list:\n return sorted(glob.glob(VIDEOS_DIR), key=os.path.getmtime)[-20:]",
"def GetModTime(self):\n return self.file.ModTime",
"def writeMetadata(path,filename,filetype,ObjectList,VideoRecorder = None):\n\tprint('writing metadata, for saving to {}'.format(path+filename+'.pickle'))\n\tnow = datetime.datetime.now() # current date and time\n\tmetadata = OrderedDict()\n\tmetadata['Path'] = path\n\tmetadata['Filename'] = filename\n\tmetadata['Format'] = filetype\n\tmetadata['datetime'] = now\n\tv = cv2.VideoCapture(path+filename+filetype)\n\tmetadata['Frames'] = v.get(cv2.CAP_PROP_FRAME_COUNT)\n\n\tif VideoRecorder is not None:\n\t\tfps = VideoRecorder.FPStracker.fps() # if you have a more accurate measure\n\telse:\n\t\ttry:\n\t\t\tfps = loadData(path,filename)[0]['FPS']\n\t\texcept:\n\t\t\tfps = None\n\t\tif fps is not None:\n\t\t\tpass\n\t\telse:\n\t\t\tfps = v.get(cv2.CAP_PROP_FPS) # trusting camera FPS\n\tmetadata['FPS'] = fps\n\tmetadata['Length'] = metadata['Frames']/metadata['FPS']\n\tmetadata['Resolution'] = [v.get(3),v.get(4)]\n\tv.release()\n\t# Save the object description (not the x,y,theta data: no processing yet)\n\t# and tracker coordinates for every object\n\tmetadata['Num Objects'] = len(ObjectList)\n\tfor i,object in enumerate(ObjectList):\n\t\tkey = \"object{}\".format(i)\n\t\tt1 = object.Tracker1\n\t\tt2 = object.Tracker2\n\t\tcoord1 = [t1.x,t1.y,t1.w,t1.h,t1.ang]\n\t\tcoord2 = [t2.x,t2.y,t2.w,t2.h,t2.ang]\n\t\tmetadata[key+'_ID'] = object.ID\n\t\tmetadata[key+'_profile'] = object.IDprofile\n\t\tmetadata[key+'_Tracker1_Coords'] = coord1\n\t\tmetadata[key+'_Tracker1_BGR_range'] = t1.bgrRange\n\t\tmetadata[key+'_Tracker2_Coords'] = coord2\n\t\tmetadata[key+'_Tracker2_BGR_range'] = t2.bgrRange\n\treturn metadata"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Actions for Diffrn objects.
|
def action_diffrn(obj: Diffrn, thread: QtCore.QThread):
w_actions = []
f_setup = obj.is_attribute("setup")
f_diffrn_radiation = obj.is_attribute("diffrn_radiation")
f_diffrn_orient_matrix = obj.is_attribute("diffrn_orient_matrix")
f_diffrn_refln = obj.is_attribute("diffrn_refln")
f_phase = obj.is_attribute("phase")
if not(f_setup & f_diffrn_radiation & f_diffrn_orient_matrix &
f_diffrn_refln & f_phase):
if not(f_setup):
qtb_1 = QtWidgets.QToolButton()
qtb_1.setText("Add setup")
qtb_1.clicked.connect(lambda: add_items(obj, [Setup()], thread))
w_actions.append(qtb_1)
if not(f_diffrn_radiation):
qtb_1 = QtWidgets.QToolButton()
qtb_1.setText("Add diffrn_radiation")
qtb_1.clicked.connect(lambda: add_items(
obj, [DiffrnRadiation()], thread))
w_actions.append(qtb_1)
if not(f_diffrn_orient_matrix):
qtb_1 = QtWidgets.QToolButton()
qtb_1.setText("Add diffrn_orient_matrix")
qtb_1.clicked.connect(lambda: add_items(obj, [DiffrnOrientMatrix(
ub_11=1., ub_12=0., ub_13=0., ub_21=0., ub_22=1., ub_23=0.,
ub_31=0., ub_32=0., ub_33=1.,)], thread))
w_actions.append(qtb_1)
if not(f_diffrn_refln):
qtb_1 = QtWidgets.QToolButton()
qtb_1.setText("Add diffrn_refln")
qtb_1.clicked.connect(lambda: add_items(
obj, [DiffrnReflnL()], thread))
w_actions.append(qtb_1)
if not(f_phase):
qtb_1 = QtWidgets.QToolButton()
qtb_1.setText("Add phase")
qtb_1.clicked.connect(lambda: add_items(obj, [
Phase(label="phase")], thread))
w_actions.append(qtb_1)
if f_diffrn_refln:
diffrn_refln = obj.diffrn_refln
w_actions.extend(action_diffrn_refln_l(diffrn_refln, thread))
if f_diffrn_orient_matrix:
diffrn_orient_matrix = obj.diffrn_orient_matrix
w_actions.extend(action_diffrn_orient_matrix(
diffrn_orient_matrix, thread))
return w_actions
|
[
"def diff(self, **kargs):\n refs, count, objs = self.collect() ## refs contains the list of ALL objects\n \n ## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)\n delRefs = {}\n for i in list(self.startRefs.keys()):\n if i not in refs:\n delRefs[i] = self.startRefs[i]\n del self.startRefs[i]\n self.forgetRef(delRefs[i])\n for i in list(self.newRefs.keys()):\n if i not in refs:\n delRefs[i] = self.newRefs[i]\n del self.newRefs[i]\n self.forgetRef(delRefs[i])\n #print \"deleted:\", len(delRefs)\n \n ## Which refs have appeared since call to start() or diff()\n persistentRefs = {} ## created since start(), but before last diff()\n createRefs = {} ## created since last diff()\n for o in refs:\n if o not in self.startRefs: \n if o not in self.newRefs: \n createRefs[o] = refs[o] ## object has been created since last diff()\n else:\n persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)\n #print \"new:\", len(newRefs)\n \n ## self.newRefs holds the entire set of objects created since start()\n for r in self.newRefs:\n self.forgetRef(self.newRefs[r])\n self.newRefs.clear()\n self.newRefs.update(persistentRefs)\n self.newRefs.update(createRefs)\n for r in self.newRefs:\n self.rememberRef(self.newRefs[r])\n #print \"created:\", len(createRefs)\n \n ## self.persistentRefs holds all objects considered persistent.\n self.persistentRefs.clear()\n self.persistentRefs.update(persistentRefs)\n \n \n print(\"----------- Count changes since start: ----------\")\n c1 = count.copy()\n for k in self.startCount:\n c1[k] = c1.get(k, 0) - self.startCount[k]\n typs = list(c1.keys())\n typs.sort(key=lambda a: c1[a])\n for t in typs:\n if c1[t] == 0:\n continue\n num = \"%d\" % c1[t]\n print(\" \" + num + \" \"*(10-len(num)) + str(t))\n \n print(\"----------- %d Deleted since last diff: ------------\" % len(delRefs))\n self.report(delRefs, objs, **kargs)\n print(\"----------- %d Created since last diff: ------------\" % len(createRefs))\n self.report(createRefs, objs, **kargs)\n print(\"----------- %d Created since start (persistent): ------------\" % len(persistentRefs))\n self.report(persistentRefs, objs, **kargs)",
"def diff(self, other=diff.Diffable.Index, paths=None, create_patch=False, **kwargs):\r\n # index against index is always empty\r\n if other is self.Index:\r\n return diff.DiffIndex()\r\n\r\n # index against anything but None is a reverse diff with the respective\r\n # item. Handle existing -R flags properly. Transform strings to the object\r\n # so that we can call diff on it\r\n if isinstance(other, basestring):\r\n other = self.repo.rev_parse(other)\r\n # END object conversion\r\n\r\n if isinstance(other, Object):\r\n # invert the existing R flag\r\n cur_val = kwargs.get('R', False)\r\n kwargs['R'] = not cur_val\r\n return other.diff(self.Index, paths, create_patch, **kwargs)\r\n # END diff against other item handlin\r\n\r\n # if other is not None here, something is wrong\r\n if other is not None:\r\n raise ValueError( \"other must be None, Diffable.Index, a Tree or Commit, was %r\" % other )\r\n\r\n # diff against working copy - can be handled by superclass natively\r\n return super(IndexFile, self).diff(other, paths, create_patch, **kwargs)",
"def svn_ra_do_diff(*args):\r\n return _ra.svn_ra_do_diff(*args)",
"def make_actions_from_diff(self, diff):\n (p1,\n p2,\n m_shared,\n m_to_be_deleted,\n m_to_be_added,\n parameter_changes,\n c_shared,\n c_to_be_deleted,\n c_to_be_added) = (diff.p1,\n diff.p2,\n diff.v1andv2,\n diff.v1only,\n diff.v2only,\n diff.paramchanged,\n diff.c1andc2,\n diff.c1only,\n diff.c2only)\n\n p1_c = copy.copy(p1)\n result = []\n\n module_id_remap = Bidict()\n module_id_remap.update(m_shared)\n\n connection_id_remap = Bidict()\n connection_id_remap.update(c_shared)\n \n for ((m_id_from, m_id_to), _) in parameter_changes:\n module_id_remap[m_id_from] = m_id_to\n\n # First all the modules to get the remap\n for p2_m_id in m_to_be_added:\n add_module = AddModuleAction()\n add_module.module = copy.copy(p2.modules[p2_m_id])\n add_module.module.id = p1_c.fresh_module_id()\n module_id_remap[add_module.module.id] = p2_m_id\n result.append(add_module)\n add_module.perform(p1_c)\n\n\n # Then all the connections using the remap\n for p2_c_id in c_to_be_added:\n c2 = p2.connections[p2_c_id]\n add_connection = AddConnectionAction()\n new_c = copy.copy(c2)\n add_connection.connection = new_c\n new_c.id = p1_c.fresh_connection_id()\n new_c.sourceId = module_id_remap.inverse[c2.sourceId]\n new_c.destinationId = module_id_remap.inverse[c2.destinationId]\n connection_id_remap[c2.id] = new_c.id\n result.append(add_connection)\n add_connection.perform(p1_c)\n\n\n # Now delete all connections:\n delete_conns = DeleteConnectionAction()\n delete_conns.ids = copy.copy(c_to_be_deleted)\n if len(delete_conns.ids) > 0:\n delete_conns.perform(p1_c)\n result.append(delete_conns)\n\n # And then all the modules\n delete_modules = DeleteModuleAction()\n delete_modules.ids = copy.copy(m_to_be_deleted)\n if len(delete_modules.ids) > 0:\n delete_modules.perform(p1_c)\n result.append(delete_modules)\n\n # From now on, module_id_remap is not necessary, we can act\n # on p1 ids without worry. (they still exist)\n\n # Now move everyone\n move_action = MoveModuleAction()\n for (p1_m_id, p2_m_id) in m_shared.iteritems():\n delta = p2.modules[p2_m_id].location - p1.modules[p1_m_id].location\n move_action.addMove(p1_m_id, delta.x, delta.y)\n move_action.perform(p1_c)\n result.append(move_action)\n\n # Now change parameters\n def make_param_change(fto_name, fto_params,\n m_id, f_id, m):\n action = ChangeParameterAction()\n for (p_id, param) in enumerate(fto_params):\n p_name = m.functions[f_id].params[p_id].name\n p_alias = m.functions[f_id].params[p_id].alias\n (p_type, p_value) = param\n action.addParameter(m_id, f_id, p_id, fto_name,\n p_name, p_value, p_type, p_alias)\n return action\n \n if len(parameter_changes):\n # print parameter_changes\n for ((m_from_id, m_to_id), plist) in parameter_changes:\n m_from = p1.modules[m_to_id]\n for ((ffrom_name, ffrom_params),\n (fto_name, fto_params)) in plist:\n for (f_id, f) in enumerate(m_from.functions):\n if f.name != fto_name: continue\n new_action = make_param_change(fto_name,\n fto_params,\n m_from_id,\n f_id,\n m_from)\n new_action.perform(p1_c)\n result.append(new_action)\n\n return (result,\n module_id_remap,\n connection_id_remap)",
"def svn_ra_plugin_invoke_do_diff(*args):\r\n return _ra.svn_ra_plugin_invoke_do_diff(*args)",
"def update_actions(self):\n pass",
"def diff(self, *args):\n return _almathswig.Transform_diff(self, *args)",
"def diff(cls, request):\n run_pager(request.diff())",
"def update_actions(self):\r\n super().update_actions()",
"def diff():\n print('SVN diff')",
"def diff(\n self,\n other: Union[Type[\"Index\"], \"Tree\", \"Commit\", None, str, object] = Index,\n paths: Union[PathLike, List[PathLike], Tuple[PathLike, ...], None] = None,\n create_patch: bool = False,\n **kwargs: Any,\n ) -> \"DiffIndex\":\n args: List[Union[PathLike, Diffable, Type[\"Diffable.Index\"], object]] = []\n args.append(\"--abbrev=40\") # we need full shas\n args.append(\"--full-index\") # get full index paths, not only filenames\n\n # remove default '-M' arg (check for renames) if user is overriding it\n if not any(x in kwargs for x in (\"find_renames\", \"no_renames\", \"M\")):\n args.append(\"-M\")\n\n if create_patch:\n args.append(\"-p\")\n else:\n args.append(\"--raw\")\n args.append(\"-z\")\n\n # in any way, assure we don't see colored output,\n # fixes https://github.com/gitpython-developers/GitPython/issues/172\n args.append(\"--no-color\")\n\n if paths is not None and not isinstance(paths, (tuple, list)):\n paths = [paths]\n\n if hasattr(self, \"Has_Repo\"):\n self.repo: \"Repo\" = self.repo\n\n diff_cmd = self.repo.git.diff\n if other is self.Index:\n args.insert(0, \"--cached\")\n elif other is NULL_TREE:\n args.insert(0, \"-r\") # recursive diff-tree\n args.insert(0, \"--root\")\n diff_cmd = self.repo.git.diff_tree\n elif other is not None:\n args.insert(0, \"-r\") # recursive diff-tree\n args.insert(0, other)\n diff_cmd = self.repo.git.diff_tree\n\n args.insert(0, self)\n\n # paths is list here or None\n if paths:\n args.append(\"--\")\n args.extend(paths)\n # END paths handling\n\n kwargs[\"as_process\"] = True\n proc = diff_cmd(*self._process_diff_args(args), **kwargs)\n\n diff_method = Diff._index_from_patch_format if create_patch else Diff._index_from_raw_format\n index = diff_method(self.repo, proc)\n\n proc.wait()\n return index",
"def svn_ra_do_diff2(*args):\r\n return _ra.svn_ra_do_diff2(*args)",
"def __actions__(self, obj):\n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n #if has_permission('manage'):############\n \n historial = DBSession.query(Item.nrohistorial).filter_by(id=pklist).first()\n idlineabase = DBSession.query(Item.idLineaBase).filter_by(nrohistorial=historial, ultimaversion=1).first()\n lineabase = DBSession.query(LineaBase).filter_by(id=idlineabase).first()\n \n value = '<div></div>'\n \n if lineabase != None:\n if str(lineabase.estado).__eq__('abierta'):\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n else:\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n \n return value",
"def PostProcessDiff(self, diff):\r\n return diff",
"def _render_diff(self, req, ticket, data, text_fields):\r\n new_version = int(req.args.get('version', 1))\r\n old_version = int(req.args.get('old_version', new_version))\r\n if old_version > new_version:\r\n old_version, new_version = new_version, old_version\r\n\r\n # get the list of versions having a description change\r\n history = self._get_history(req, ticket)\r\n changes = {}\r\n descriptions = []\r\n old_idx = new_idx = -1 # indexes in descriptions\r\n for change in history:\r\n version = change['version']\r\n changes[version] = change\r\n if any([f in text_fields for f in change['fields']]):\r\n if old_version and version <= old_version:\r\n old_idx = len(descriptions)\r\n if new_idx == -1 and new_version and version >= new_version:\r\n new_idx = len(descriptions)\r\n descriptions.append((version, change))\r\n\r\n # determine precisely old and new versions\r\n if old_version == new_version:\r\n if new_idx >= 0:\r\n old_idx = new_idx - 1\r\n if old_idx >= 0:\r\n old_version, old_change = descriptions[old_idx]\r\n else:\r\n old_version, old_change = 0, None\r\n num_changes = new_idx - old_idx\r\n if new_idx >= 0:\r\n new_version, new_change = descriptions[new_idx]\r\n else:\r\n raise TracError(_('No differences to show'))\r\n\r\n tnew = ticket.resource(version=new_version)\r\n told = ticket.resource(version=old_version)\r\n\r\n req.perm(tnew).require('TICKET_VIEW')\r\n req.perm(told).require('TICKET_VIEW')\r\n\r\n # determine prev and next versions\r\n prev_version = old_version\r\n next_version = None\r\n if new_idx < len(descriptions) - 1:\r\n next_version = descriptions[new_idx+1][0]\r\n\r\n # -- old properties (old_ticket) and new properties (new_ticket)\r\n\r\n # assume a linear sequence of change numbers, starting at 1, with gaps\r\n def replay_changes(values, old_values, from_version, to_version):\r\n for version in range(from_version, to_version+1):\r\n if version in changes:\r\n for k, v in changes[version]['fields'].iteritems():\r\n values[k] = v['new']\r\n if old_values is not None and k not in old_values:\r\n old_values[k] = v['old']\r\n\r\n old_ticket = {}\r\n if old_version:\r\n replay_changes(old_ticket, None, 1, old_version)\r\n\r\n new_ticket = dict(old_ticket)\r\n replay_changes(new_ticket, old_ticket, old_version+1, new_version)\r\n\r\n changes = []\r\n\r\n def version_info(t, field=None):\r\n path = 'Ticket #%s' % ticket.id\r\n # TODO: field info should probably be part of the Resource as well\r\n if field:\r\n path = tag(path, Markup(' – '), field)\r\n if t.version:\r\n rev = _('Version %(num)s', num=t.version)\r\n shortrev = 'v%d' % t.version\r\n else:\r\n rev, shortrev = _('Initial Version'), _('initial')\r\n return {'path': path, 'rev': rev, 'shortrev': shortrev,\r\n 'href': get_resource_url(self.env, t, req.href)}\r\n\r\n # -- prop changes\r\n props = []\r\n for k, v in new_ticket.iteritems():\r\n if k not in text_fields:\r\n old, new = old_ticket[k], new_ticket[k]\r\n if old != new:\r\n prop = {'name': k,\r\n 'old': {'name': k, 'value': old},\r\n 'new': {'name': k, 'value': new}}\r\n rendered = self._render_property_diff(req, ticket, k,\r\n old, new, tnew)\r\n if rendered:\r\n prop['diff'] = tag.li('Property ', tag.strong(k),\r\n ' ', rendered)\r\n props.append(prop)\r\n changes.append({'props': props, 'diffs': [],\r\n 'new': version_info(tnew),\r\n 'old': version_info(told)})\r\n\r\n # -- text diffs\r\n diff_style, diff_options, diff_data = get_diff_options(req)\r\n diff_context = 3\r\n for option in diff_options:\r\n if option.startswith('-U'):\r\n diff_context = int(option[2:])\r\n break\r\n if diff_context < 0:\r\n diff_context = None\r\n\r\n for field in text_fields:\r\n old_text = old_ticket.get(field)\r\n old_text = old_text and old_text.splitlines() or []\r\n new_text = new_ticket.get(field)\r\n new_text = new_text and new_text.splitlines() or []\r\n diffs = diff_blocks(old_text, new_text, context=diff_context,\r\n ignore_blank_lines='-B' in diff_options,\r\n ignore_case='-i' in diff_options,\r\n ignore_space_changes='-b' in diff_options)\r\n\r\n changes.append({'diffs': diffs, 'props': [],\r\n 'new': version_info(tnew, field),\r\n 'old': version_info(told, field)})\r\n\r\n # -- prev/up/next links\r\n if prev_version:\r\n add_link(req, 'prev', get_resource_url(self.env, ticket.resource,\r\n req.href, action='diff',\r\n version=prev_version),\r\n _('Version %(num)s', num=prev_version))\r\n add_link(req, 'up', get_resource_url(self.env, ticket.resource,\r\n req.href, action='history'),\r\n 'Ticket History')\r\n if next_version:\r\n add_link(req, 'next', get_resource_url(self.env, ticket.resource,\r\n req.href, action='diff',\r\n version=next_version),\r\n _('Version %(num)s', num=next_version))\r\n\r\n prevnext_nav(req, _('Change'), _('Ticket History'))\r\n add_stylesheet(req, 'common/css/diff.css')\r\n add_script(req, 'common/js/diff.js')\r\n\r\n data.update({\r\n 'title': _('Ticket Diff'),\r\n 'resource': ticket.resource,\r\n 'old_version': old_version, 'new_version': new_version,\r\n 'changes': changes, 'diff': diff_data,\r\n 'num_changes': num_changes, 'change': new_change,\r\n 'old_ticket': old_ticket, 'new_ticket': new_ticket,\r\n 'longcol': '', 'shortcol': ''\r\n })\r\n\r\n return 'diff_view.html', data, None",
"def gitDiff(self, event: Event = None) -> None:\n GitDiffController(c=self.c).git_diff(rev1='HEAD')",
"def run(self, obj, diff):\n for a in AlertRule.objects.all():\n a = json.loads(a.alert_rule)\n\n if a[\"object\"] == obj.__class__.__name__:\n if a[\"attribute\"] in diff:\n if diff[a[\"attribute\"]] == a[\"changed_to\"]:\n # Criteria Satisfied. Run Alert Action\n\n subject = Template(a[\"action\"][\"subject\"]).render(c)\n msg = Template(a[\"action\"][\"message\"]).render(c)\n\n if \"type\" == \"email\":\n # Fill out subject/message Template\n c = Context({\n \"object\": obj,\n \"diff\": diff\n })\n\n if a[\"action\"][\"type\"] == \"email\":\n send_mail(\n subject,\n msg,\n settings.DEFAULT_FROM_EMAIL,\n [a[\"action\"][\"to\"]],\n fail_silently=False,\n )\n\n # TODO Add More Alert Types (phone, text, im)",
"def on_diffRevisionsButton_clicked(self):\n items = self.logTree.selectedItems()\n if len(items) != 2:\n self.diffRevisionsButton.setEnabled(False)\n return\n \n rev2 = int(items[0].text(0))\n rev1 = int(items[1].text(0))\n \n itm = self.logTree.topLevelItem(0)\n if itm is None:\n self.diffPreviousButton.setEnabled(False)\n return\n peg_rev = int(itm.text(0))\n \n self.__diffRevisions(min(rev1, rev2), max(rev1, rev2), peg_rev)",
"def __diffRevisions(self, rev1, rev2, peg_rev):\n if self.sbsCheckBox.isEnabled() and self.sbsCheckBox.isChecked():\n self.vcs.svnSbsDiff(self.filename,\n revisions=(str(rev1), str(rev2)))\n else:\n if self.diff is None:\n from .SvnDiffDialog import SvnDiffDialog\n self.diff = SvnDiffDialog(self.vcs)\n self.diff.show()\n self.diff.raise_()\n QApplication.processEvents()\n self.diff.start(self.filename, [rev1, rev2], pegRev=peg_rev)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method to scan product. Adds the product order to the list of orders.
|
def scan(self, product_code):
self.order.add_product(product_code)
|
[
"def add_product(self, product):\n self.products.append(product)",
"def add(self, product):\n pass",
"def add_product(self, product):\n return self._inventory.append(product)",
"def orderWatch(self, order):\r\n\t\tself.orders.append(order)",
"def process_orders(self, file_name: str) -> None:\n op = OrderProcessor()\n for an_order in op.process_data(file_name):\n\n product_id = an_order.product_id\n if product_id not in self.item_dic:\n self.item_dic[product_id] = []\n\n # if the order contains more than current inventory place\n # 100 more in inventory.\n if an_order.is_valid and \\\n len(self.item_dic[product_id]) < an_order.quantity:\n if an_order.item.lower() == 'candy':\n for i in range(0, 100):\n self.item_dic[product_id].append(\n an_order.factory.create_candy(\n **an_order.item_details))\n elif an_order.item.lower() == 'stuffedanimal':\n for i in range(0, 100):\n self.item_dic[product_id].append(\n an_order.factory.create_stuffed_animal(\n **an_order.item_details))\n elif an_order.item.lower() == 'toy':\n for i in range(0, 100):\n self.item_dic[product_id].append(\n an_order.factory.create_toy(\n **an_order.item_details))\n\n # subtract the order amount from inventory\n self.item_dic[product_id] = self.item_dic[product_id][\n :-an_order.quantity]\n self.orders.append(an_order)",
"def place_order(self, order):\n # check if input is a filepath\n if type(order) == str:\n with open(order, \"r\") as data:\n ordered = data.read().split('\\n')[0].split(\" \")\n\n else:\n ordered = order\n\n # check if all items are part of the warehouses stock\n for item in ordered:\n if item not in ordered:\n return []\n\n # check if enough of each product is in stock\n uniques = set(ordered)\n occurrences = Counter(ordered)\n for item in uniques:\n if occurrences[item] > self.availability(item):\n return []\n\n # if everything checks out return list of sets of what psus carry each item\n else:\n return [list(set(self.look_up(item))) for item in ordered]",
"def orderWatch(self, order):\r\n\t\tself.pair.orders.append(order)",
"def add_product(self, product: Product) -> None:\n self.__check_short_section_name(product.short_name())\n self.__products.append(product)",
"def product(self, product):\n self._product = product",
"def insert(self, product):\n pass",
"def payload_add_products(self, payload: dict, order: Order, language: str):\n order_lines: [OrderLine] = OrderLine.objects.filter(order=order.id)\n items: [dict] = []\n\n area = resolve_area(order)\n\n # Additional product orders doesn't have berth product\n if hasattr(order, \"product\") and order.product:\n product = order.product\n int_tax = int(order.tax_percentage)\n assert (\n int_tax == product.tax_percentage\n ) # make sure the tax is a whole number\n with override(language):\n lease = order.lease\n place = (\n lease.berth\n if hasattr(lease, \"berth\")\n else lease.place\n if hasattr(lease, \"place\") and lease.place\n else lease.section\n if hasattr(lease, \"section\") and lease.section\n else area\n )\n product_name = f\"{product.name}: {place}\"\n items.append(\n {\n \"id\": get_talpa_product_id(product.id, area),\n \"title\": product_name,\n \"price\": price_as_fractional_int(order.price),\n \"pretax_price\": price_as_fractional_int(order.pretax_price),\n \"tax\": int_tax,\n \"count\": 1,\n \"type\": 1,\n }\n )\n\n for order_line in order_lines:\n product: AdditionalProduct = order_line.product\n int_tax = int(product.tax_percentage)\n assert (\n int_tax == product.tax_percentage\n ) # make sure the tax is a whole number\n with override(language):\n product_name = product.name\n items.append(\n {\n \"id\": get_talpa_product_id(\n product.id,\n area,\n is_storage_on_ice=product.service\n == ProductServiceType.STORAGE_ON_ICE,\n ),\n \"title\": product_name,\n \"price\": price_as_fractional_int(order_line.price),\n \"pretax_price\": price_as_fractional_int(order_line.pretax_price),\n \"tax\": int_tax,\n \"count\": order_line.quantity,\n \"type\": 1,\n }\n )\n payload[\"amount\"] = price_as_fractional_int(order.total_price)\n payload[\"products\"] = items",
"def product(self, product):\n\n self._product = product",
"def add_order(self, order, verbose):\n order.toid = self.get_quote_id()\n self.increment_quote_id()\n\n if verbose:\n print(f'QUID: order.quid={order.qid} self.quote.id={self.quote_id}')\n\n if order.otype == 'Bid':\n response = self.bids.book_add(order)\n best_price = self.bids.lob_anon[-1][0]\n self.bids.best_price = best_price\n self.bids.best_tid = self.bids.lob[best_price][1][0][2]\n else:\n response = self.asks.book_add(order)\n best_price = self.asks.lob_anon[0][0]\n self.asks.best_price = best_price\n self.asks.best_tid = self.asks.lob[best_price][1][0][2]\n return [order.toid, response]",
"def add_order(self, order):\n\n self.orders.add(order, bulk=False)\n self.capacity -= order.weight\n\n self.save()",
"def process_orders(self):\n\n try:\n cont = True\n while cont:\n user = input(\"Enter the name of the excel file to be processed(.xlsx): \")\n if user == '':\n print(\"Invalid file name.\")\n else:\n cont = False\n finally:\n pass\n\n # orders holds all valid orders, self._print_list holds all orders regardless of their status\n (orders, self._print_list) = OrderProcessor.read_file_to_orders(user + \".xlsx\")\n\n for i in orders:\n self._orders.append(orders[i])\n kwargs = {'name': orders[i][1].name,\n 'desc': orders[i][1].details['description'],\n 'product_id': orders[i][1].product_id,\n 'has_batteries': orders[i][1].details['has_batteries'],\n 'min_age': orders[i][1].details['min_age'],\n 'dimensions': orders[i][1].details['dimensions'],\n 'num_rooms': orders[i][1].details['num_rooms'],\n 'speed': orders[i][1].details['speed'],\n 'jump_height': orders[i][1].details['jump_height'],\n 'has_glow': orders[i][1].details['has_glow'],\n 'spider_type': orders[i][1].details['spider_type'],\n 'num_sound': orders[i][1].details['num_sound'],\n 'colour': orders[i][1].details['colour'],\n 'has_lactose': orders[i][1].details['has_lactose'],\n 'has_nuts': orders[i][1].details['has_nuts'],\n 'variety': orders[i][1].details['variety'],\n 'pack_size': orders[i][1].details['pack_size'],\n 'stuffing': orders[i][1].details['stuffing'],\n 'size': orders[i][1].details['size'],\n 'fabric': orders[i][1].details['fabric']\n }\n\n item = orders[i][1].factory.create(orders[i][1].factory, **kwargs)\n\n # Initializing inventory quantity\n self.restock_inv(orders, item, i, kwargs)\n\n # Handling file not found errors\n if Path(user+ \".xlsx\").is_file():\n print(\"Successfully processed orders.\")\n else:\n print(\"Unsuccessful, file was not found.\")",
"def test_add_product_to_cart(self, driver):\n logging.info(\"Start test case: Continue Shop\")\n data = self.test_data[\"Continue Shop\"][\"Products\"][0]\n logging.info(f\"Test data: [{data}]\")\n product_name = data[\"Product Name\"]\n\n select_product(driver, data[\"Page\"], product_name)\n add_product_to_cart(driver, data[\"Size\"], data[\"Color\"], data[\"Quantity\"])\n assert is_product_in_cart(driver, product_name)\n continue_shopping_from_order_summary(driver)\n assert verify_current_page_is_home(driver)",
"def add_product(self, product, producer_id):\n self.products.append({\"product\": product, \"producer_id\": producer_id})",
"def on_scan(self, product):\n self.new_product = product\n if self.active:\n self.sm.on_state_event(self.events.SCAN)",
"def place_order(self, new_order: Order):\n\n if len(self.orders_list) >= 0:\n new_order.order_id = len(self.orders_list)\n else:\n new_order.order_id = 0\n self.orders_list.append(new_order)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Attribute which calculates the total amount on the order after deducting discounts.
|
def total(self):
total_price = self.get_total_amount()
discounts = self.get_total_discount()
return total_price - discounts
|
[
"def total_order_discounts(order):\n total_discounts = D('0.0')\n\n discounts = order.basket_discounts\n\n for discount in discounts:\n total_discounts += discount.amount\n\n return total_discounts",
"def discount_amount(self):\r\n customer = self.records.find_customers(str(self.__customer).strip())\r\n order_value = self.order_value\r\n discount = customer.get_discount(order_value)\r\n return discount",
"def total_items_discount(self):\n return self._total_items_discount",
"def debt_amount(self):\n return self._debt_amount",
"def basket_total_before_discounts_excl_tax(self):\n result = self.lines.aggregate(total=Sum(\"line_price_before_discounts_excl_tax\"))\n return result[\"total\"]",
"def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price",
"def discount_amount(self):\n return self._discount_amount",
"def amount(self):\n return self.subtotal + self.tax_subtotal + self.shipping",
"def _compute_amount(self):\n for line in self:\n subtotal = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n line.update({\n 'price_subtotal': subtotal * line.product_uom_qty\n })",
"def total_donated(self):\n if not hasattr(self, 'dynamic_total'):\n agg = self.donations.aggregate(Sum('amount'))\n self.dynamic_total = agg['amount__sum']\n return self.current + (self.dynamic_total or 0)",
"def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))",
"def calculate(self, amount):\n if self.discount_deduct is not None:\n # Don't apply to amounts that would be negative after deduction.\n if self.discount_deduct < amount:\n return self.discount_deduct\n elif self.discount_percent is not None:\n return amount / Decimal(\"100\") * self.discount_percent\n return 0",
"def update_on_delete(sender, instance, **kwargs):\n instance.order.update_grand_total()",
"def basket_total_before_discounts_incl_tax(self):\n result = self.lines.aggregate(total=Sum(\"line_price_before_discounts_incl_tax\"))\n return result[\"total\"]",
"def discount(self):\n return self._discount",
"def _compute_amount(self):\r\n\t\tfor line in self:\r\n\t\t\tprice = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\r\n\t\t\ttaxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\r\n\t\t\tline.update({\r\n\t\t\t\t'price_tax': taxes['total_included'] - taxes['total_excluded'],\r\n\t\t\t\t'price_total': round(taxes['total_included'], 1),\r\n\t\t\t\t'price_subtotal': round(taxes['total_excluded'], 1),\r\n\t\t\t})",
"def _compute_amount(self):\n\t\tfor line in self:\n\n\t\t\tprice = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n\t\t\t\n\t\t\ttaxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_id)\n\t\t\t\n\t\t\tline.update({\n\t\t\t\t'price_tax': taxes['total_included'] - taxes['total_excluded'],\n\t\t\t\t\n\t\t\t\t'price_total': taxes['total_included'],\n\n\t\t\t\t'price_subtotal': taxes['total_excluded'],\n\t\t\t})",
"def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if(line.is_discount_allow and line.price_subtotal > 100):\n line.price_subtotal = line.price_subtotal - 100",
"def update_on_delete(sender, instance, **kwargs):\n instance.order.update_total()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculates total discount applicable on this order.
|
def get_total_discount(self):
total_discount = 0.00
for promotion in self.pricing_rules:
discount = promotion.get_discount(self.order)
total_discount += discount
return total_discount
|
[
"def discount_amount(self):\r\n customer = self.records.find_customers(str(self.__customer).strip())\r\n order_value = self.order_value\r\n discount = customer.get_discount(order_value)\r\n return discount",
"def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price",
"def discount_amount(self):\n if self._promotion:\n return int(self.price * self._promotion.discount_percent / 100.0)\n return 0",
"def discounted_price(self):\n if self._promotion:\n return int(self.price - self.discount_amount)\n return self.price",
"def total_items_discount(self):\n return self._total_items_discount",
"def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))",
"def total_price(self) -> float:\n\n discount = self.invoice_obj.discount\n total_price = 0\n\n for record in self.raw_records:\n discount_price = self._discount_price(record, discount)\n if discount_price:\n total_price += discount_price\n else:\n return None\n\n return total_price",
"def discount(self, discount):\n return self.price * (1 - discount)",
"def get_total_discount(self, report_df):\n\n return report_df.loc[:, 'full_price_amount'].sum() - report_df.loc[:, 'discounted_amount'].sum()",
"def total(self):\n total_price = self.get_total_amount()\n discounts = self.get_total_discount()\n\n return total_price - discounts",
"def discount_amount(self):\n return self._discount_amount",
"def discount(self):\n return self._discount",
"def total_order_discounts(order):\n total_discounts = D('0.0')\n\n discounts = order.basket_discounts\n\n for discount in discounts:\n total_discounts += discount.amount\n\n return total_discounts",
"def calculate(self, amount):\n if self.discount_deduct is not None:\n # Don't apply to amounts that would be negative after deduction.\n if self.discount_deduct < amount:\n return self.discount_deduct\n elif self.discount_percent is not None:\n return amount / Decimal(\"100\") * self.discount_percent\n return 0",
"def discount_rate(self):\n assert self.res\n if self.cost_orig == 0:\n return 0\n return self.cost_opt / self.cost_orig",
"def compute_price(self):\n self.np_payoffs = np.array(self.payoffs, dtype=float)\n self.np_Vi = self.discount_factor * self.np_payoffs\n self.price = np.average(self.np_Vi)",
"def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if(line.is_discount_allow and line.price_subtotal > 100):\n line.price_subtotal = line.price_subtotal - 100",
"def apply_discount(self):\n self.flag = False\n count = dict(Counter(self.total_items))\n\n #Checks for discount BOGO\n if 'CF1' in count.keys():\n if count['CF1'] % 2 == 0:\n self.discount_amount = (count['CF1']/2)*self.price_dict['CF1']\n self.price = self.price - self.discount_amount\n self.flag = True\n else:\n self.discount_amount = (int(count['CF1']/2)*self.price_dict['CF1'])\n self.price = self.price - self.discount_amount\n self.flag = True\n self.discount_token = 'BOGO'\n\n #Checks for discount APPL\n if 'AP1' in count.keys():\n if count['AP1'] == 3 and self.total_items[-1] == 'AP1':\n self.price = self.price - 4.50\n self.discount_token, self.discount_amount = 'APPL', 4.50\n self.flag = True\n\n #Checks for discount CHMK\n if 'CH1' in count.keys() and 'MK1' in count.keys():\n if count['MK1'] == 1:\n self.price = self.price - self.price_dict['MK1']\n self.discount_token, self.discount_amount = 'CHMK', self.price_dict['MK1']\n self.flag = True\n\n #Checks for discount APOM\n if 'OM1' in count.keys() and self.total_items[-1] == 'OM1':\n if count['OM1'] > 1:\n last_occ = max(loc for loc, val in enumerate(self.total_items) if val == 'OM1')\n temp_list = self.total_items[last_occ:-1]\n if 'AP1' in temp_list:\n new_count = temp_list.count('AP1')\n self.discount_amount = (self.price_dict['AP1']*new_count)/2\n self.price, self.discount_token = self.price - self.discount_amount, 'APOM'\n self.flag = True\n elif 'AP1' in count.keys():\n self.discount_amount = (self.price_dict['AP1']*count['AP1'])/2\n self.price, self.discount_token = self.price - self.discount_amount, 'APOM'\n self.flag = True",
"def apply_discount(self, price):\n discount = getattr(self.client, 'discount', None)\n now = arrow.utcnow().datetime\n skip = False\n\n if not discount or not price:\n return price\n\n if not discount.remaining_uses:\n skip = True\n\n if discount.start_date and discount.start_date > now:\n skip = True\n\n if discount.end_date and discount.end_date < now:\n skip = True\n\n if discount == self.discount:\n skip = False\n\n if skip:\n return price\n\n price = price * (100 - discount.percentage_discount) / 100\n if discount != self.discount:\n discount.usage_count += 1\n self.discount = discount\n discount.save()\n\n return price"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return total but in a pretty format with Euro sign.
|
def get_total_display(self):
total = self.total
return '%.2f\N{euro sign}' % total
|
[
"def formatted_price(self) -> str:\n return fmt_money(self.price)",
"def display_price(self):\n return '$ '+str(self.price)",
"def total_str(self, total):\n return \"{:>{}} {:{}.2f}\".format(\n \"Total:\", self._an + self._ac + self._ap + 2,\n total, self._ast)",
"def currency(self, commas=True):\n sign, digits, exp = self.quantize(Decimal('0.01')).as_tuple()\n digits = list(map(unicode, digits))\n result = []\n for i in range(2):\n result.append(digits.pop() if digits else u'0')\n result.append(u'.')\n if not digits:\n result.append(u'0')\n count = 0\n while digits:\n result.append(digits.pop())\n count += 1\n if count == 3 and digits and commas:\n count = 0\n result.append(u',')\n result.append(u'-' if sign else u'')\n return u''.join(reversed(result))",
"def eur(value):\n float(value)\n return f\"€{value:,.2f}\"",
"def format_usd(my_price):\n return f\"${my_price:,.2f}\"",
"def formatted_string(self) -> str:\n return babel.numbers.format_currency(self, CURRENCY_FORMAT,\n locale=LOCALE)",
"def Total(self):\n return Decimal(self.Amount + self.VAT).quantize(Decimal('.01'))",
"def euro(value):\n try:\n val = u\"%.2f\" % (float(value))\n except:\n return u''\n return val.replace('.', ',')",
"def euro_string(value: int) -> str:\n result = \"€ {:.2f}\".format(round(value / 100, 2))\n return result",
"def money_format(ammount):\n\td = Decimal(ammount) / Decimal(\"100\")\n\treturn u'£%s' % d.quantize(Decimal(\"0.01\"))",
"def html_euro(value):\n \n try:\n value = float(value)\n except ValueError:\n return value\n except TypeError:\n return value\n return mark_safe('%.2f €' % (value))",
"def to_usd(my_price): \n\n return f\"${my_price:,.2f}\" \n ## Taken from shopping-cart project",
"async def get_total_amount_message(self):\n rub_total = self.database[\"rub\"][\"balance\"]\n for currency in self.foreign_currency_names:\n rub_total += self.database[currency][\"rate\"] * self.database[currency][\"balance\"]\n message = f\"\\n sum {rub_total:.2f} rub /\"\n for currency in self.foreign_currency_names:\n message += f\" {rub_total / self.database[currency]['rate']:.2f} {currency} /\"\n return message",
"def format_as_usd(value):\n return f\"${value:,.2f}\"",
"def cart_summary_as_string(self):\n total_num_items = self.shopping_cart_items.aggregate(\n total_num_items=Sum('num_items'),\n ).get('total_num_items', 0)\n\n total_price = self.shopping_cart_items.aggregate(\n total_price=Sum(\n F('product__price')*F('num_items')\n )\n ).get('total_price', 0.00)\n\n if total_num_items is None:\n total_num_items = 0\n if total_price is None:\n total_price = Decimal(\"0.00\")\n\n summary_string = f\"{total_num_items} items | {total_price} €\"\n return summary_string",
"def dollar_format(amount):\n # create a string\n return \"$\" + str(int(amount))",
"def format_currency(amount):\n pretty_amount = str(amount)\n\n if amount < 0:\n pretty_amount = pretty_amount[:1] + \"$\" + pretty_amount[1:]\n else:\n pretty_amount = \"$%s\" % pretty_amount\n\n return pretty_amount",
"def format_currency(money):\n money = str(money)\n cents = money[-2:]\n dollars = money[0:-2]\n return f\"${dollars}.{cents}\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
MessagingCampaign a model defined in Swagger
|
def __init__(self):
self.swagger_types = {
'id': 'str',
'name': 'str',
'date_created': 'datetime',
'date_modified': 'datetime',
'version': 'int',
'division': 'DomainEntityRef',
'campaign_status': 'str',
'callable_time_set': 'DomainEntityRef',
'contact_list': 'DomainEntityRef',
'dnc_lists': 'list[DomainEntityRef]',
'always_running': 'bool',
'contact_sorts': 'list[ContactSort]',
'messages_per_minute': 'int',
'errors': 'list[RestErrorDetail]',
'sms_config': 'SmsConfig',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'date_created': 'dateCreated',
'date_modified': 'dateModified',
'version': 'version',
'division': 'division',
'campaign_status': 'campaignStatus',
'callable_time_set': 'callableTimeSet',
'contact_list': 'contactList',
'dnc_lists': 'dncLists',
'always_running': 'alwaysRunning',
'contact_sorts': 'contactSorts',
'messages_per_minute': 'messagesPerMinute',
'errors': 'errors',
'sms_config': 'smsConfig',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._date_created = None
self._date_modified = None
self._version = None
self._division = None
self._campaign_status = None
self._callable_time_set = None
self._contact_list = None
self._dnc_lists = None
self._always_running = None
self._contact_sorts = None
self._messages_per_minute = None
self._errors = None
self._sms_config = None
self._self_uri = None
|
[
"def _measurement_campaign():\n return {\n 'type' : 'class',\n 'name' : 'measurement_campaign',\n 'base' : 'activity.activity',\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n # todo - clarify type\n ('duration', 'int', '1.1', None),\n # todo - resolve circular dependencies.\n #('experiments', 'activity.experiment', '1.N', None),\n ],\n }",
"def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'conversation': 'Conversation',\n 'read': 'bool',\n 'audio_recording_duration_seconds': 'int',\n 'audio_recording_size_bytes': 'int',\n 'created_date': 'datetime',\n 'modified_date': 'datetime',\n 'deleted_date': 'datetime',\n 'caller_address': 'str',\n 'caller_name': 'str',\n 'caller_user': 'User',\n 'deleted': 'bool',\n 'note': 'str',\n 'user': 'User',\n 'group': 'Group',\n 'queue': 'Queue',\n 'copied_from': 'VoicemailCopyRecord',\n 'copied_to': 'list[VoicemailCopyRecord]',\n 'delete_retention_policy': 'VoicemailRetentionPolicy',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'conversation': 'conversation',\n 'read': 'read',\n 'audio_recording_duration_seconds': 'audioRecordingDurationSeconds',\n 'audio_recording_size_bytes': 'audioRecordingSizeBytes',\n 'created_date': 'createdDate',\n 'modified_date': 'modifiedDate',\n 'deleted_date': 'deletedDate',\n 'caller_address': 'callerAddress',\n 'caller_name': 'callerName',\n 'caller_user': 'callerUser',\n 'deleted': 'deleted',\n 'note': 'note',\n 'user': 'user',\n 'group': 'group',\n 'queue': 'queue',\n 'copied_from': 'copiedFrom',\n 'copied_to': 'copiedTo',\n 'delete_retention_policy': 'deleteRetentionPolicy',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._conversation = None\n self._read = None\n self._audio_recording_duration_seconds = None\n self._audio_recording_size_bytes = None\n self._created_date = None\n self._modified_date = None\n self._deleted_date = None\n self._caller_address = None\n self._caller_name = None\n self._caller_user = None\n self._deleted = None\n self._note = None\n self._user = None\n self._group = None\n self._queue = None\n self._copied_from = None\n self._copied_to = None\n self._delete_retention_policy = None\n self._self_uri = None",
"def test_get_sms_campaign(self):\n pass",
"def test_create_sms_campaign(self):\n pass",
"def post(self):\n json_data = request.get_json()\n json_data[\"sender_id\"] = current_user.id\n try:\n new_campaign = self.schema.load(json_data)\n except ValidationError as err:\n return {\"message\": err.messages}, HTTPStatus.BAD_REQUEST\n if Campaign.query.filter_by(mailchimp_id=new_campaign.mailchimp_id).first() is not None:\n return {\"message\": \"Campaign already exists.\"}, HTTPStatus.CONFLICT\n db.session.add(new_campaign)\n db.session.commit()\n return self.schema.dump(new_campaign), HTTPStatus.CREATED",
"def test_get_sms_campaigns(self):\n pass",
"def test_create_campaign(self):\n campaign = self.campaign\n\n self.assertTrue(isinstance(campaign, Campaign))\n self.assertEqual(campaign.name, \"Test Campaign\")",
"def __init__(self):\n self.swagger_types = {\n 'id': 'int',\n 'create_date': 'datetime',\n 'modify_date': 'datetime',\n 'scheduled_plan_id': 'int',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'status': 'str',\n 'message': 'str',\n 'link_url': 'str',\n 'link_text': 'str',\n 'custom_fields': 'dict(str, object)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'create_date': 'createDate',\n 'modify_date': 'modifyDate',\n 'scheduled_plan_id': 'scheduledPlanId',\n 'start_time': 'startTime',\n 'end_time': 'endTime',\n 'status': 'status',\n 'message': 'message',\n 'link_url': 'linkURL',\n 'link_text': 'linkText',\n 'custom_fields': 'customFields'\n }\n\n self._id = None\n self._create_date = None\n self._modify_date = None\n self._scheduled_plan_id = None\n self._start_time = None\n self._end_time = None\n self._status = None\n self._message = None\n self._link_url = None\n self._link_text = None\n self._custom_fields = None",
"def load(payload):\n payload, errors = Campaign.CAMPAIGN_SCHEMA.load(to_python(payload))\n if errors:\n raise ValidationError(errors)\n\n return Campaign(**payload)",
"def __init__(__self__, *,\n action: pulumi.Input['CampaignUpdateCampaignAction'],\n collection_scheme: pulumi.Input[Union['CampaignCollectionScheme0PropertiesArgs', 'CampaignCollectionScheme1PropertiesArgs']],\n signal_catalog_arn: pulumi.Input[str],\n target_arn: pulumi.Input[str],\n compression: Optional[pulumi.Input['CampaignCompression']] = None,\n data_destination_configs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['CampaignDataDestinationConfig0PropertiesArgs', 'CampaignDataDestinationConfig1PropertiesArgs']]]]] = None,\n data_extra_dimensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n diagnostics_mode: Optional[pulumi.Input['CampaignDiagnosticsMode']] = None,\n expiry_time: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n post_trigger_collection_duration: Optional[pulumi.Input[float]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n signals_to_collect: Optional[pulumi.Input[Sequence[pulumi.Input['CampaignSignalInformationArgs']]]] = None,\n spooling_mode: Optional[pulumi.Input['CampaignSpoolingMode']] = None,\n start_time: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['CampaignTagArgs']]]] = None):\n pulumi.set(__self__, \"action\", action)\n pulumi.set(__self__, \"collection_scheme\", collection_scheme)\n pulumi.set(__self__, \"signal_catalog_arn\", signal_catalog_arn)\n pulumi.set(__self__, \"target_arn\", target_arn)\n if compression is not None:\n pulumi.set(__self__, \"compression\", compression)\n if data_destination_configs is not None:\n pulumi.set(__self__, \"data_destination_configs\", data_destination_configs)\n if data_extra_dimensions is not None:\n pulumi.set(__self__, \"data_extra_dimensions\", data_extra_dimensions)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if diagnostics_mode is not None:\n pulumi.set(__self__, \"diagnostics_mode\", diagnostics_mode)\n if expiry_time is not None:\n pulumi.set(__self__, \"expiry_time\", expiry_time)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if post_trigger_collection_duration is not None:\n pulumi.set(__self__, \"post_trigger_collection_duration\", post_trigger_collection_duration)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if signals_to_collect is not None:\n pulumi.set(__self__, \"signals_to_collect\", signals_to_collect)\n if spooling_mode is not None:\n pulumi.set(__self__, \"spooling_mode\", spooling_mode)\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'start_date': 'datetime',\n 'length_minutes': 'int',\n 'activities': 'list[BuAgentScheduleActivity]',\n 'manually_edited': 'bool',\n 'schedule': 'BuScheduleReference'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'start_date': 'startDate',\n 'length_minutes': 'lengthMinutes',\n 'activities': 'activities',\n 'manually_edited': 'manuallyEdited',\n 'schedule': 'schedule'\n }\n\n self._id = None\n self._start_date = None\n self._length_minutes = None\n self._activities = None\n self._manually_edited = None\n self._schedule = None",
"def test_update_sms_campaign(self):\n pass",
"def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'version': 'int',\n 'contact_list': 'DialerContactlistfilterConfigChangeUriReference',\n 'contact_list_columns': 'list[str]',\n 'clauses': 'list[DialerContactlistfilterConfigChangeFilterClause]',\n 'filter_type': 'str',\n 'additional_properties': 'object'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'version': 'version',\n 'contact_list': 'contactList',\n 'contact_list_columns': 'contactListColumns',\n 'clauses': 'clauses',\n 'filter_type': 'filterType',\n 'additional_properties': 'additionalProperties'\n }\n\n self._id = None\n self._name = None\n self._date_created = None\n self._date_modified = None\n self._version = None\n self._contact_list = None\n self._contact_list_columns = None\n self._clauses = None\n self._filter_type = None\n self._additional_properties = None",
"def create_campaign(req):\n dict = {}\n return render_to_response('create_campaign.html', dict)",
"def create_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)",
"def __init__(self, campaign, campaign_email, *args, **kwargs):\n super(TrackedEmailMessage, self).__init__(*args, **kwargs)\n\n self._set_campaign(campaign)\n self._set_campaign_email(campaign_email)",
"def __init__(self, ChannelModel):\n self.Channel = ChannelModel",
"def testAddCampaigns(self):\n operations = [\n {\n 'operator': 'ADD',\n 'operand': {\n 'name': 'Campaign #%s' % Utils.GetUniqueName(),\n 'status': 'PAUSED',\n 'biddingStrategy': {\n 'type': 'ManualCPC'\n },\n 'endDate': date(date.today().year + 1,\n 12, 31).strftime('%Y%m%d'),\n 'budget': {\n 'period': 'DAILY',\n 'amount': {\n 'microAmount': '1000000'\n },\n 'deliveryMethod': 'STANDARD'\n }\n }\n },\n {\n 'operator': 'ADD',\n 'operand': {\n 'name': 'Campaign #%s' % Utils.GetUniqueName(),\n 'status': 'PAUSED',\n 'biddingStrategy': {\n 'type': 'ManualCPC'\n },\n 'endDate': date(date.today().year + 1,\n 12, 31).strftime('%Y%m%d'),\n 'budget': {\n 'period': 'DAILY',\n 'amount': {\n 'microAmount': '2000000'\n },\n 'deliveryMethod': 'STANDARD'\n }\n }\n }\n ]\n campaigns = self.__class__.service.Mutate(operations)\n self.__class__.campaign1 = campaigns[0]['value'][0]\n self.__class__.campaign2 = campaigns[0]['value'][1]\n self.assert_(isinstance(campaigns, tuple))",
"def __init__(self):\n self.swagger_types = {\n 'id_template_notificacao': 'int',\n 'destinatarios': 'list[str]',\n 'anexos': 'list[AnexoNotificacaoEmailRequest]',\n 'parametros_conteudo': 'dict(str, object)'\n }\n\n self.attribute_map = {\n 'id_template_notificacao': 'idTemplateNotificacao',\n 'destinatarios': 'destinatarios',\n 'anexos': 'anexos',\n 'parametros_conteudo': 'parametrosConteudo'\n }\n\n self._id_template_notificacao = None\n self._destinatarios = None\n self._anexos = None\n self._parametros_conteudo = None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the date_created of this MessagingCampaign.
|
def date_created(self, date_created):
self._date_created = date_created
|
[
"def date_created(self, date_created: datetime):\n\n self._date_created = date_created",
"def date_created(self, date_created):\n\n self._date_created = date_created",
"def datecreated(self, datecreated):\n\n self._datecreated = datecreated",
"def created_date(self, created_date):\n \n self._created_date = created_date",
"def created_date(self, created_date):\n\n self._created_date = created_date",
"def created_date(self, created_date):\n self._created_date = created_date",
"def SetDateCreated(self, date):\n self.datecreated = str(date)",
"def created_datetime(self, created_datetime):\n self._created_datetime = created_datetime",
"def created_date_time(self, created_date_time):\n\n self._created_date_time = created_date_time",
"def set_createddate(self, date: str):\r\n\r\n self.metadata['common']['createddate'] = date",
"def set_account_created_date(self, account_created_date):\n self.account_created_date = account_created_date",
"def create_date(self, create_date):\n\n self._create_date = create_date",
"def creation_date(self, creation_date):\n\n self._creation_date = creation_date",
"def created(self, created):\n\n self._created = created",
"def set_account_created_date_formatted(self, account_created_date_formatted):\n self.account_created_date_formatted = account_created_date_formatted",
"def create_datetime(self, create_datetime):\n\n self._create_datetime = create_datetime",
"def created_time(self, created_time):\n\n self._created_time = created_time",
"def setCreatedDate(self, *args):\n return _libsbml.ModelHistory_setCreatedDate(self, *args)",
"def created_at(self, created_at):\n\n self._created_at = created_at"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the date_modified of this MessagingCampaign.
|
def date_modified(self):
return self._date_modified
|
[
"def date_modified(self) -> datetime:\n return self._date_modified",
"def modified_date(self):\n return self._modified_date",
"def last_modified(self) -> datetime:\n return self._last_modified",
"def modification_date_time(self):\n return self._modification_date_time",
"def date_modified(self):\n return self.phasset.modificationDate()",
"def modified_at(self):\n return self._modified_at",
"def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetLastModifiedDate', self.handle)",
"def modified_time(self) -> str:\n return pulumi.get(self, \"modified_time\")",
"def get_inbound_statement_details_last_modified_date(self):\n return self.get_text_from_element(self.inbound_statements_details_last_modified_date_locator, False)",
"def modificationDate(self):\r\n if hasattr(aq_base(self.context), 'modified'):\r\n modifiedDate = self.context.modified()\r\n\r\n translationService = getToolByName(self.context, 'translation_service')\r\n return translationService.ulocalized_time(modifiedDate,\r\n context=self.context,\r\n domain='plonelocales'\r\n )\r\n return None",
"def modified_timestamp(self) -> str:\n return pulumi.get(self, \"modified_timestamp\")",
"def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetLastModifiedDate', self.handle)",
"def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")",
"def last_modified_time(self) -> float:\n return self._last_modified_time",
"def last_modified(self):\n last_changed_file = Session.query(sa.func.max(Entity.last_modified_date)).filter_by(project=self).first()[0]\n\n if last_changed_file:\n return max(self.last_modified_date, last_changed_file)\n \n return self.last_modified_date",
"def modified_time(self) -> float:\n return self._modified_time",
"def last_modify_time(self):\n return self._last_modify_time",
"def last_plan_modified_date(self) -> str:\n return pulumi.get(self, \"last_plan_modified_date\")",
"def last_modified_at(self):\n return self.viztrail.last_modified_at"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the date_modified of this MessagingCampaign.
|
def date_modified(self, date_modified):
self._date_modified = date_modified
|
[
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def modified_date(self, modified_date):\n \n self._modified_date = modified_date",
"def modified_date(self, modified_date):\n\n self._modified_date = modified_date",
"def last_modified_date(self, last_modified_date):\n\n self._last_modified_date = last_modified_date",
"def modified_at(self, modified_at):\n\n self._modified_at = modified_at",
"def setModifiedDate(self, *args):\n return _libsbml.ModelHistory_setModifiedDate(self, *args)",
"def modify_date(self, modify_date):\n\n self._modify_date = modify_date",
"def last_modified_date_time(self, last_modified_date_time):\n\n self._last_modified_date_time = last_modified_date_time",
"def modify_date(self, modify_date):\n self._modify_date = modify_date",
"def modified(self, modified):\n\n self._modified = modified",
"def last_modification_date(self, last_modification_date):\n\n self._last_modification_date = last_modification_date",
"def last_modified(self, last_modified):\n\n self._last_modified = last_modified",
"def modified_time(self, modified_time):\n self._modified_time = modified_time",
"def last_modified(self, last_modified: datetime):\n if last_modified is None:\n raise ValueError(\"Invalid value for `last_modified`, must not be `None`\") # noqa: E501\n\n self._last_modified = last_modified",
"def set_modification_date(self, modification_date):\n\t\t\n\t\tif (modification_date.__class__ != str or modification_date ==\"\") and (modification_date.__class__ != time.struct_time or len(modification_date) != 9 ):\n\t\t\traise InvalidParameterError(\"modification_date\", \"modification_date is not in a proper format\")\n\t\ttry:\n\t\t\tif modification_date.__class__ == str:\n\t\t\t\ttmp_md = time.strptime(modification_date, '%S %M %H %d %m %Y')\n\t\t\telif modification_date.__class__ == time.struct_time:\n\t\t\t\ttmp_md = modification_date\n\t\t\tself.__modification_date = datetime(tmp_md[0], tmp_md[1], tmp_md[2], tmp_md[3], tmp_md[4], tmp_md[5])\t\n\t\texcept:\n\t\t\traise InvalidDate, \"date is not valid modification_date is not in a proper format\"",
"def modification_date_time(self, modification_date_time):\n self._modification_date_time = modification_date_time",
"def last_modified(self, last_modified):\n self._last_modified = last_modified",
"def last_modified_on(self, last_modified_on):\n\n self._last_modified_on = last_modified_on",
"def date_modified(self) -> datetime:\n return self._date_modified"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the version of this MessagingCampaign. Required for updates, must match the version number of the most recent update
|
def version(self, version):
self._version = version
|
[
"def version(self, version):\n self._version = version",
"def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")",
"def version(self, version):\n\n self._version = version",
"def version(self, version: str):\n\n self._version = version",
"def version(self, version):\n self._version = utils.VersionParser().parse(version)",
"def update_version(self, version):",
"def _set_version(self, msg, vers):\n v = vers if vers else self.default_version\n if (self.version_cap and not\n rpc_common.version_is_compatible(self.version_cap, v)):\n raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)\n msg['version'] = v",
"def _set_package_version_values_on_self(self):\n self.version_id = self.upload[\"MetadataPackageVersionId\"]\n version = self._get_one_record(\n (\n \"SELECT MajorVersion, \"\n \"MinorVersion, \"\n \"PatchVersion, \"\n \"BuildNumber, \"\n \"ReleaseState \"\n \"FROM MetadataPackageVersion \"\n f\"WHERE Id='{self.version_id}'\"\n ),\n f\"Version {self.version_id} not found\",\n )\n version_parts = [str(version[\"MajorVersion\"]), str(version[\"MinorVersion\"])]\n if version[\"PatchVersion\"]:\n version_parts.append(str(version[\"PatchVersion\"]))\n\n self.version_number = \".\".join(version_parts)\n\n if version[\"ReleaseState\"] == \"Beta\":\n self.version_number += f\" (Beta {version['BuildNumber']})\"",
"def SetVersion(self, addonVersion):\n self._addonVersion = addonVersion",
"def builder_version(self, builder_version):\n\n self._builder_version = builder_version",
"def version(self, newVersion=None):\n pass",
"def version(self, newVersion=None):\n if newVersion != None:\n self._setValue('version', newVersion)\n return self._getValue('version')",
"def _set_version(self, version):\n with self.db.atomic():\n JambiModel.delete().execute()\n JambiModel.create(ref=str(version))\n self.logger.debug('Set jambi version to {}'.format(version))",
"def model_version(self, model_version):\n\n self._model_version = model_version",
"def api_version(self, api_version):\n self._api_version = api_version",
"def app_version(self, app_version):\n self._app_version = app_version",
"def hxdp_build_version(self, hxdp_build_version):\n\n self._hxdp_build_version = hxdp_build_version",
"def meta_version(self, meta_version):\n\n self._meta_version = meta_version",
"def ces_version(self, ces_version):\n self._ces_version = ces_version"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the division of this MessagingCampaign. The division this entity belongs to.
|
def division(self):
return self._division
|
[
"def get_group(self):\n return self._group",
"def get_division(id_, cache_time=5):\n s = session(cache_time)\n res = s.get(f'{BASE_URL}/division?id={id_}')\n return check_network_response(res)",
"def get_sent_folder(self):\n try:\n return self.get_sent_folders()[0]\n except IndexError:\n return",
"def get_blb_division_by_division_id(self, division_id=None):\n\n division = self.session.query(BLBDivision).get(division_id)\n if division is not None:\n raise gen.Return(division.to_dict())",
"def getDivider(self):\n return _libsbml.CompModelPlugin_getDivider(self)",
"def domain(self):\n return self._pci_address['segment']",
"def campaign_id(self):\n\n return self._parent_id",
"def get_local_group(self):\n if self.local_group is None:\n call_campaign = self.get_object()\n self.local_group = call_campaign.local_group\n return self.local_group",
"def get_group(cls):\n return cls.get(\"group\", \"general\")",
"def district(self) -> str:\n return pulumi.get(self, \"district\")",
"def get_divide(self, ):\n return self.get_parameter('divide')",
"def group(self):\n return self.__group",
"def _getConversationsFolder(self):\n if not self.hasObject('conversations'):\n log.warn(\"The chatservice's 'Conversations' folder did not exist, \"\n \"and has been automatically recreated.\")\n manage_addBTreeFolder(self, 'conversations', 'Conversations')\n\n return self._getOb('conversations')",
"def group(self):\n return self._group",
"def getCommission(self):\n return self.__commission",
"def get_divergence_hor(self):\n for focus_mode in self.focus_modes:\n if focus_mode['modeName'] == self.active_focus_mode:\n return focus_mode['diverg'][0]",
"def get_part(self):\n return self.get_object()",
"def conversation(self):\n return self._conversation",
"def get_domain_id(self):\r\n return self.__domain_id"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the division of this MessagingCampaign. The division this entity belongs to.
|
def division(self, division):
self._division = division
|
[
"def division(self, division):\n\n self._division = division",
"def setSyncDiv(self, syncDivider):\n self._settings['syncDivider'] = syncDivider\n self._api.setSyncDiv(syncDivider)",
"def set_divide(self, a_divide):\n self.set_parameter('divide', a_divide)\n return self",
"def participation(self, participation):\n\n self._participation = participation",
"def setBoundary(self, boundary):\n self.boundary = boundary\n return",
"def setSyncDiv(self, syncDivider):\n tryfunc(th260lib.TH260_SetSyncDiv(\n self._device_number, ct.c_int(syncDivider)))\n # Note: after Init or SetSyncDiv allow 150 ms\n # for valid count rate readings\n time.sleep(thdef.INIT_WAIT_TIME)",
"def set_part(self, connection_part):\n self.part = connection_part",
"def setSplit(self,split):\n self.split=split",
"def campaign(self, campaign):\n\n self._campaign = campaign",
"def domain_group(self, domain_group):\n\n self._domain_group = domain_group",
"def _set_campaign(self, campaign):\n if isinstance(campaign, str):\n campaign = TrackedCampaign.objects.create(name=campaign)\n\n campaign.save()\n\n self.campaign = campaign",
"async def set_chat_discussion_group(\n self,\n chat_id: int,\n discussion_chat_id: int,\n *,\n request_id: str = None,\n request_timeout: int = None,\n skip_validation: bool = False\n ) -> Ok:\n _constructor = SetChatDiscussionGroup.construct if skip_validation else SetChatDiscussionGroup\n\n return await self.client.request(\n _constructor(\n chat_id=chat_id,\n discussion_chat_id=discussion_chat_id,\n ),\n request_id=request_id,\n request_timeout=request_timeout,\n )",
"def dividend(self, dividend):\n\n self._dividend = dividend",
"def setSplit(self, split):\r\n self.split = split",
"def district(self, district):\n\n self._district = district",
"def set_divisions(self, nx=1, ny=1):\n\n self.nx = nx\n self.ny = ny",
"def division(self):\n return self._division",
"def create_division(self, division_title):\n request = post(url=self.base_url + 'api/services/etender/division/CreateDivision',\n headers=self.headers,\n data=json.dumps({\"title\": division_title}))\n self.division = json.loads(request.content).get('result')\n print('Created division:', self.division)\n return self.division",
"def register_division_trigger(self, division_trigger, division_trigger_parameters={}):\n \n self.division_trigger = division_trigger\n self.division_trigger_parameters = division_trigger_parameters"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the campaign_status of this MessagingCampaign. The current status of the messaging campaign. A messaging campaign may be turned 'on' or 'off'.
|
def campaign_status(self):
return self._campaign_status
|
[
"def get_status(self):\n return self._status",
"def campaign_status(self, campaign_status):\n allowed_values = [\"on\", \"stopping\", \"off\", \"complete\", \"invalid\"]\n if campaign_status.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for campaign_status -> \" + campaign_status)\n self._campaign_status = \"outdated_sdk_version\"\n else:\n self._campaign_status = campaign_status",
"def getgamestatus(self):\n return self._status",
"def claim_status(self):\n return self._claim_status",
"def get_service_status(self):\n return self.make_request(\"GetServiceStatus\")",
"def migration_project_status(self):\n return self._migration_project_status",
"def getStatus(self):\n return self.battleDelegate.status",
"def status(self):\n return self.job_proto.status",
"def job_status(self, job_id):\n return self.api_client.job(job_id).status()",
"def status(self):\n return self.__status",
"def consumable_status(self) -> ConsumableStatus:\n return ConsumableStatus(self.send(\"get_consumable\")[0])",
"def campaign_status(self, campaign_status):\n\n self._campaign_status = campaign_status",
"def get_status(self):\n\n return str(self.le_status.text())",
"def Status(self):\n if self.Claim:\n return self.Claim.Status\n return 'Draft'",
"def status(self) -> ALDBStatus:\n return self._status",
"def get_status(self, identifier, **kwargs):\n return self.get(identifier, **kwargs).status",
"def connection_status(self):\n return self._connection_status",
"def custom_status(self):\n return self._custom_status",
"def rule_status(self) -> pulumi.Output[Optional['AutomationRuleRuleStatus']]:\n return pulumi.get(self, \"rule_status\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the campaign_status of this MessagingCampaign. The current status of the messaging campaign. A messaging campaign may be turned 'on' or 'off'.
|
def campaign_status(self, campaign_status):
allowed_values = ["on", "stopping", "off", "complete", "invalid"]
if campaign_status.lower() not in map(str.lower, allowed_values):
# print("Invalid value for campaign_status -> " + campaign_status)
self._campaign_status = "outdated_sdk_version"
else:
self._campaign_status = campaign_status
|
[
"def campaign_status(self, campaign_status):\n\n self._campaign_status = campaign_status",
"def campaign_status(self):\n return self._campaign_status",
"def set_status(self, status):\n self.status = status\n self.save()",
"def set_status(self, status: str) -> None:\n\n try:\n self.status = Buddy.status_map[status.lower()]\n except KeyError:\n self.status = status",
"def job_status(self, job_status):\n\n self._job_status = job_status",
"def SetStatus(self, status):\r\n self.status = status",
"def set_status(cls, actor_id, status, status_message=None):\n logger.debug(\"top of set_status for status: {}\".format(status))\n actors_store.update(actor_id, 'status', status)\n if status_message:\n actors_store.update(actor_id, 'status_message', status_message)",
"def set_custom_status(self, status: Any):\n self._custom_status = status",
"def set_connection_status(self, connection_status: Literal[ConnectionState]) -> None:\n self.connection_status = connection_status\n self.publish(self.key_gen(\"connection_status\"), connection_status)",
"def setStatus(self, status):\n self.battleDelegate.status = status",
"def set_status(self, status: HTTPProxyStatus) -> None:\n self._status = status\n self.update_actor_details(status=self._status)",
"def set_status(self, status):\n self.set_attr('procstatus', status)",
"def status(self, status):\n\n self._status = status",
"async def set_status(self, ctx, *, status: str = \"online\"):\n\n try:\n status = discord.Status[status.lower()]\n except KeyError:\n await ctx.error(\"Invalid Status\", \"Only `online`, `idle` or `dnd` statuses are available.\")\n else:\n await self.bot.change_presence(status=status, activity=ctx.me.activity)\n await ctx.success(f\"Status changed to {status}.\")",
"def card_status(self, card_status):\n\n self._card_status = card_status",
"def setPeerStatus(self, status):\n self.status = status",
"def status(self, status):\n self._status = status",
"def migration_project_status(self, migration_project_status):\n self._migration_project_status = migration_project_status",
"def set_bounced(self, status: bool):\n self._bounced = status"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the callable_time_set of this MessagingCampaign. The callable time set for this messaging campaign.
|
def callable_time_set(self):
return self._callable_time_set
|
[
"def callable_time_set(self, callable_time_set):\n \n self._callable_time_set = callable_time_set",
"def getScheduleOnset(self):\n return DPxGetMicSchedOnset()",
"def schedule_times(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"schedule_times\")",
"def getScheduleOnset(self):\n return DPxGetDinSchedOnset()",
"def get_scheduled_tasks(self):\n return getattr(self, 'SCHEDULED_TASKS', {})",
"def get_last_tasks(self):\n set_TO = set()\n [set_TO.add(case[-1][\"concept:name\"]) for case in self.log]\n return set_TO",
"def _get_timers(self):\n return self.__timers",
"def ScheduledVentilationSetpoint(self):\n return self._scheduled_ventilation_setpoint",
"def scheduledTimes(self, runnable):\n events = self.store.query(\n TimedEvent, TimedEvent.runnable == runnable)\n return (event.time for event in events if not event.running)",
"def schedule(self):\n return self._schedule",
"def getCallSets(self):\n return [self._callSetIdMap[id_] for id_ in self._callSetIds]",
"def queue_times(self):\r\n queue_times = []\r\n for task in self.__tasks.values():\r\n if task.complete():\r\n queue_times.append(task.queued_time())\r\n return queue_times",
"def get_all_times(self):\n times = set()\n for d in self.doctors:\n times.update(d.get_all_times(self.start_time))\n times = list(times)\n times.sort()\n return times",
"def queue_times(self):\r\n return [task.scheduler_launch_time - self.__arrival_time\r\n for task in self.__tasks.values() if task.complete()]",
"def get_time(self):\n return [self.hours, self.mins, self.secs]",
"def get_last_set(self):\n return self.set",
"def get_times_collection():\n return client[\"timetracker\"][\"times\"]",
"def get_call_set(self, call_set_id):\n return self._run_get_request(\n \"callsets\", protocol.CallSet, call_set_id)",
"def get_schedules(self):\n return self.data['schedules'];"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the callable_time_set of this MessagingCampaign. The callable time set for this messaging campaign.
|
def callable_time_set(self, callable_time_set):
self._callable_time_set = callable_time_set
|
[
"def callable_time_set(self):\n return self._callable_time_set",
"def setScheduleOnset(self, onset):\n DPxSetMicSchedOnset(onset)",
"def set_time(self, set_time):\n\n self._set_time = set_time",
"def setScheduleOnset(self, onset):\n DPxSetDinSchedOnset(onset)",
"def set_scheduled_call_list(self):\n request = RQ_SCH_CALL_LIST\n data = None\n message = (self.operator_id, request, data)\n self.scheduled_call_list = client(message)",
"def setShowCallables(self, show_callables):\n logger.debug(\"setShowCallables: {}\".format(show_callables))\n self._show_callables = show_callables\n self.invalidateFilter()",
"def collection_time(self, collection_time):\n\n self._collection_time = collection_time",
"def set_enqueue_time(self, time):\n self.enqueue_time = time\n for task in self.tasks:\n task.enqueue_time = time",
"def conference_call_time(self, conference_call_time):\n\n self._conference_call_time = conference_call_time",
"def test_set_notifications_tasks(self, redis_set, get_seconds_until_midnight):\n redis_set.return_value = True\n expected_result = set_notifications_tasks({'test_key': 'test_value'})\n self.assertTrue(get_seconds_until_midnight.called)\n self.assertTrue(redis_set.called)\n self.assertTrue(expected_result)\n\n redis_set.return_value = False\n expected_result = set_notifications_tasks({'test_key': 'test_value'})\n self.assertTrue(get_seconds_until_midnight.called)\n self.assertTrue(redis_set.called)\n self.assertFalse(expected_result)",
"def queue_time(self, queue_time):\n\n self._queue_time = queue_time",
"def set_exec_time(self, time):\n for task in self.tasks:\n task.exec_time = time",
"def scheduled_at(self, scheduled_at):\n\n self._scheduled_at = scheduled_at",
"def scheduled(self, scheduled):\n\n self._scheduled = scheduled",
"def set_set_later(self, value):\r\n self.set_later = value",
"def set_time_function(self, function):\n if isinstance(function, types.FunctionType):\n self.get_time = function\n else:\n raise ValueError(\"Invalid value for DUT time function\")",
"def time_utc(self, time_utc):\n\n self._time_utc = time_utc",
"def scheduled_reset_at(self, scheduled_reset_at):\n\n self._scheduled_reset_at = scheduled_reset_at",
"def new_should_trigger(self, callable_):\n self.should_trigger = callable_"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
|
def contact_list(self):
return self._contact_list
|
[
"def get_contacts(self):\n\n\t\treturn self.__contacts",
"def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]",
"def getContacts(self):\n return self._getContactsFromList(FORWARD_LIST)",
"def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)",
"def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts",
"def contacts(self):\n return ContactCollection(self.request)",
"def get_contacts(self, directory_id, mailing_list_id):\n return self.get(\n f\"directories/{directory_id}/mailinglists/{mailing_list_id}/contacts\"\n )",
"async def member_list(self) -> List[Contact]:\n log.info('Get room <%s> all members', self)\n\n member_ids = await self.puppet.room_members(self.room_id)\n contacts = [\n self.wechaty.Contact.load(member_id)\n for member_id in member_ids\n ]\n\n return contacts",
"def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names",
"def contact(self):\n return self._contact",
"def get_mailing_lists(self):\n return self.get(\"mailinglists\")",
"def get_contact_persons(self):\n return self.technic_contact",
"def get_message_list(self):\n (resp_message, mail_list, octets) = self.server_connection.list()\n return mail_list",
"def get_recipient_list(self):\n if self.recipient_list:\n return self.recipient_list\n else:\n raise NotImplementedError('You must send recipient_list or recipient as argument to __init__, '\n 'or override get_recipient_list().')",
"async def get_contacts(self, **kwargs) -> List[CertificateContact]:\n contacts = await self._client.get_certificate_contacts(\n vault_base_url=self._vault_url, **kwargs\n )\n return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]",
"def get_cached_contacts(self):\n return list(self._replacement_cache)",
"def get_active_contact_no_subscriber(self):\n # The list of active contacts that doesn't\n # exist in SMSCampaignSubscriber\n\n #TODO : This might kill performance on huge phonebook...\n query = \\\n 'SELECT dc.id, dc.phonebook_id, dc.contact, dc.last_name, \\\n dc.first_name, dc.email, dc.city, dc.description, \\\n dc.status, dc.additional_vars, dc.created_date, dc.updated_date \\\n FROM dialer_contact as dc \\\n INNER JOIN dialer_phonebook ON \\\n (dc.phonebook_id = dialer_phonebook.id) \\\n INNER JOIN sms_campaign_phonebook ON \\\n (dialer_phonebook.id = sms_campaign_phonebook.phonebook_id) \\\n WHERE sms_campaign_phonebook.smscampaign_id = %s \\\n AND dc.status = 1 \\\n AND dc.id NOT IN \\\n (SELECT sms_campaign_subscriber.contact_id \\\n FROM sms_campaign_subscriber \\\n WHERE sms_campaign_subscriber.sms_campaign_id = %s)' % \\\n (str(self.id), str(self.id),)\n\n raw_contact_list = Contact.objects.raw(query)\n return raw_contact_list",
"def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]",
"def list_contacts(self):\n print('List of contancts: ')\n if self.contacts == []:\n print('-- Empty --')\n else:\n for i, contact in enumerate(self.contacts):\n new_msg_flag = self.chats[contact.mob_num][0].new_msg == self\n if new_msg_flag:\n print(\" ({0})-> {1} -- {2} \".format(i+1, contact.username, contact.mob_num) + 5*'- - ' + \"New Msg\")\n else:\n print(\" ({0})-> {1} -- {2} \".format(i+1, contact.username, contact.mob_num)) \n print('\\n')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
|
def contact_list(self, contact_list):
self._contact_list = contact_list
|
[
"def contacts(self, contacts):\n\n self._contacts = contacts",
"def contact_list(self):\n return self._contact_list",
"def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list",
"def contact_list_columns(self, contact_list_columns):\n \n self._contact_list_columns = contact_list_columns",
"def contact(self, contact):\n\n self.logger.debug(\"In 'contact' setter.\")\n\n self._contact = contact",
"async def set_contacts(self, contacts: List[CertificateContact], **kwargs) -> List[CertificateContact]:\n new_contacts = await self._client.set_certificate_contacts(\n vault_base_url=self.vault_url,\n contacts=self._models.Contacts(contact_list=[c._to_certificate_contacts_item() for c in contacts]),\n **kwargs\n )\n return [\n CertificateContact._from_certificate_contacts_item(contact_item=item) for item in new_contacts.contact_list\n ]",
"def _set_contact(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"contact\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/module-catalog', defining_module='openconfig-module-catalog', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"contact must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"contact\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/module-catalog', defining_module='openconfig-module-catalog', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__contact = t\n if hasattr(self, '_set'):\n self._set()",
"def customer_list(self, customer_list):\n\n self._customer_list = customer_list",
"def contact_points(self, contact_points: object):\n\n self._contact_points = contact_points",
"def contact(self, contact):\n\n self._contact = contact",
"def support_contacts(self, support_contacts):\n self._support_contacts = support_contacts",
"def populate_test_contacts(self, list_id):\n for i in range(0,5):\n contact_data = {\n \"first_name\" : \"Testy\",\n \"last_name\" : \"McTest%s\" % str(i),\n \"email\" : \"test123+%s@vlrst.com\" % str(i),\n \"tags\" : \"test\",\n \"p[%s]\"%list_id : list_id\n }\n r = self.post(\"contact_sync\", contact_data)",
"def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)",
"def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')",
"def client_contact(self, client_contact):\n\n self._client_contact = client_contact",
"def set_certificate_contacts(\n self, vault_base_url, id=None, contact_list=None, custom_headers=None, raw=False, **operation_config):\n contacts = models.Contacts(id=id, contact_list=contact_list)\n\n # Construct URL\n url = '/certificates/contacts'\n path_format_arguments = {\n 'vaultBaseUrl': self._serialize.url(\"vault_base_url\", vault_base_url, 'str', skip_quote=True)\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.config.api_version\", self.config.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct body\n body_content = self._serialize.body(contacts, 'Contacts')\n\n # Construct and send request\n request = self._client.put(url, query_parameters)\n response = self._client.send(\n request, header_parameters, body_content, **operation_config)\n\n if response.status_code not in [200]:\n raise models.KeyVaultErrorException(self._deserialize, response)\n\n deserialized = None\n\n if response.status_code == 200:\n deserialized = self._deserialize('Contacts', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized",
"def send_mass_messages(self, recipient_list, sender, message=\"\", subject=\"\"):\n try:\n for s in recipient_list:\n self.send_message(to=s, sender=sender, message=message, subject=subject)\n except TypeError:\n return -1\n return 1",
"def contact_point(self, contact_point: object):\n\n self._contact_point = contact_point",
"def update_phone_lists(self, xform, case_list):\n raise NotImplementedError()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the dnc_lists of this MessagingCampaign. The dnc lists to check before sending a message for this messaging campaign.
|
def dnc_lists(self):
return self._dnc_lists
|
[
"def get_mailing_lists(self):\n return self.get(\"mailinglists\")",
"def get_dmarc_messages(self):\n messages = []\n try:\n if self.opt_use_ssl:\n self.server = poplib.POP3_SSL(self.opt_pop3_server)\n self.server.user(self.opt_global_account[\"username\"])\n self.server.pass_(self.opt_global_account[\"password\"])\n else:\n self.server = poplib.POP3(self.opt_pop3_server)\n self.server.user(self.opt_global_account[\"username\"])\n self.server.pass_(self.opt_global_account[\"password\"])\n except Exception as e:\n raise Exception(\n \"Error connecting to %s with exception %s\" %\n (self.opt_pop3_server, str(e)))\n else:\n self.helper.log_debug(\n 'get_dmarc_messages: successfully connected to %s' %\n self.opt_pop3_server)\n messages = self.byte2str(self.server.uidl()[1])\n self.helper.log_info(\n 'get_dmarc_messages: %d messages' %\n len(messages))\n return messages",
"def get_message_list(self):\n (resp_message, mail_list, octets) = self.server_connection.list()\n return mail_list",
"def contact_list(self):\n return self._contact_list",
"def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))",
"def _get_isns_discovery_domain_list(self):\n return self.__isns_discovery_domain_list",
"def get_message_list(self):\n \n result = requests.get(\n url = root_url + '/{}'.format(\"message\"),\n headers = { 'Authorization': api_key },\n )\n\n message_list = result.json()\n\n self.message_list = message_list",
"def dns_list(self):\n return self._dns_list",
"def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))",
"def get_messages(self):\n with self.lock:\n return list(self.messages)",
"def get_campaign_name_list(self):\n campaigns = self.find('campaigns', {})\n campaign_names = []\n for campaign in campaigns:\n if 'name' in campaign:\n campaign_names.append(campaign['name'])\n return campaign_names",
"def get_auto_lists(self):\n result = []\n \n try:\n properties = getToolByName(self, \"portal_properties\")\n lists = properties.mailman_properties.getProperty(\"lists\", [])\n lists = list(lists)\n \n # Browse through all mailing lists.\n for mailing_list in lists:\n list_data = mailing_list.split(\":\")\n \n # Verify that this is a correct list definition.\n if len(list_data) < 4:\n continue\n \n # Verify that this is an auto list.\n if list_data[0] == \"auto\":\n result.append(list_data[1])\n except:\n result = [self.plone_utils.exceptionString()]\n \n return result",
"def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)",
"def get_messages(self):\n return self._messenger.get_messages(self._messenger._project_name, self._messenger._design_name)",
"def conversations(self):\n return list(self._conversations)",
"def get_member_clients(self) -> List[Client]:\n\t\tmember_clients = []\n\t\tfor member_aid in self.member_aids:\n\t\t\tmember_client = self.synergy.connected_clients.get(member_aid, None)\n\t\t\tif member_client is not None:\n\t\t\t\tmember_clients.append(member_client)\n\t\treturn member_clients",
"def getContacts(self):\n return self._getContactsFromList(FORWARD_LIST)",
"def get_contacts(self, directory_id, mailing_list_id):\n return self.get(\n f\"directories/{directory_id}/mailinglists/{mailing_list_id}/contacts\"\n )",
"def ad_domains(self):\n return self._ad_domains"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the dnc_lists of this MessagingCampaign. The dnc lists to check before sending a message for this messaging campaign.
|
def dnc_lists(self, dnc_lists):
self._dnc_lists = dnc_lists
|
[
"def dns_list(self, dns_list):\n self._dns_list = dns_list",
"def SetDomainsList(self, domainsList) :\n\t\t...",
"def communications_campaign_names(self, communications_campaign_names):\n\n self._communications_campaign_names = communications_campaign_names",
"def checklists(self, checklists):\n\n self._checklists = checklists",
"def setWfsCcdList(self, wfsCcdList):\n\n self._wfsCcd = wfsCcdList",
"def send_to_lists(self, **kwargs):\n kwargs['list_ids'] = kwargs.get('list_ids', None)\n response = self._post(\n path='/do/send/', params=kwargs)\n return response",
"def fdsid_list(self, fdsid_list):\n\n self._fdsid_list = fdsid_list",
"def contact_list(self, contact_list):\n \n self._contact_list = contact_list",
"def campaigns(self, campaigns):\n\n self._campaigns = campaigns",
"def neighbor_distribute_lists(self, neighbor_distribute_lists):\n\n self._neighbor_distribute_lists = neighbor_distribute_lists",
"def dnc_lists(self):\n return self._dnc_lists",
"def SetStringList(self, stringList):\n callResult = self._Call(\"SetStringList\", stringList)",
"def _set_isns_discovery_domain_list(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"isns_discovery_domain_name\",isns_discovery_domain_list.isns_discovery_domain_list, yang_name=\"isns-discovery-domain-list\", rest_name=\"isns-discovery-domain-list\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='isns-discovery-domain-name', extensions=None), is_container='list', yang_name=\"isns-discovery-domain-list\", rest_name=\"isns-discovery-domain-list\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"isns_discovery_domain_list must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"isns_discovery_domain_name\",isns_discovery_domain_list.isns_discovery_domain_list, yang_name=\"isns-discovery-domain-list\", rest_name=\"isns-discovery-domain-list\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='isns-discovery-domain-name', extensions=None), is_container='list', yang_name=\"isns-discovery-domain-list\", rest_name=\"isns-discovery-domain-list\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__isns_discovery_domain_list = t\n if hasattr(self, '_set'):\n self._set()",
"def device_reset_list(self, device_reset_list):\n\n self._device_reset_list = device_reset_list",
"def cc_groups(self, cc_groups):\n\n self._cc_groups = cc_groups",
"def messages(self, messages: List[Message]):\n\n self._messages = messages",
"def agent_id_list(self, agent_id_list):\n self._agent_id_list = agent_id_list",
"def whitelists(self, whitelists):\n\n self._whitelists = whitelists",
"def set_server_list(self, server_list):\r\n if callable(server_list):\r\n self.server_list = list(server_list())\r\n else:\r\n self.server_list = list(server_list)\r\n\r\n random.shuffle(self.server_list)\r\n self._list_position = 0\r\n self._notify_on_server_list(self.server_list)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the always_running of this MessagingCampaign. Whether this messaging campaign is always running
|
def always_running(self):
return self._always_running
|
[
"def get_running(self):\n return self.running",
"def isScheduleRunning(self):\n if DPxIsMicSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running",
"def running(self):\n return self.scheduler.running",
"def always_on(self) -> bool:\n return pulumi.get(self, \"always_on\")",
"def running(self):\n return self._running",
"def isScheduleRunning(self):\n if DPxIsDinSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running",
"def always_running(self, always_running):\n \n self._always_running = always_running",
"def is_running(self):\n return self.type_id == STATE_RUNNING",
"def is_running(self):\n return self._stream_running",
"def running(self):\n return self.state == service_states.RUNNING",
"def running(self):\n return self._state == RUNNING_STATE",
"def is_running(self):\n return self.current_state == self.States.RUNNING",
"def is_running(self):\n return self._task.running()",
"def is_running(self):\n return self.status().state == ApplicationState.RUNNING",
"def isRunning(self):\n return _yarp.Thread_isRunning(self)",
"def running(self):\n return self.window.is_running()",
"def IsCaptureRunning(self):\n return self._get_attribute('isCaptureRunning')",
"def get_started(self):\n return self.start is not None",
"def sequence_running(self) -> bool:\n return self.is_sequence and self._sequence_run.running"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the always_running of this MessagingCampaign. Whether this messaging campaign is always running
|
def always_running(self, always_running):
self._always_running = always_running
|
[
"def always_running(self):\n return self._always_running",
"def set_running(self, boolean):\n self.running = boolean",
"def always_on(self) -> bool:\n return pulumi.get(self, \"always_on\")",
"def always_run(self, always_run: bool = True) -> Self:\n self._always_run = always_run\n return self",
"def set_as_running(self):\n with self._running_condition:\n assert self._state == PENDING_STATE\n self._state = RUNNING_STATE\n self._running_condition.notify()",
"def set_always_build(self, always_build = 1):\n self.always_build = always_build",
"def isScheduleRunning(self):\n if DPxIsMicSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running",
"def mark_running(self):\n with self._lock:\n self._set_state(self._RUNNING, self._PAUSED)",
"def running(self, running):\n\n self._running = running",
"def running(self, value):\n self.window.set_is_running(value)",
"def is_always(self, is_always):\n\n self._is_always = is_always",
"def isScheduleRunning(self):\n if DPxIsDinSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running",
"def mark_started(self):\n self._update_state(JobState.RUNNING)",
"def is_running(self):\n return self.type_id == STATE_RUNNING",
"def set_running(self):\n\t\tif self.state is not State.Run:\n\t\t\tself.animation.set_sprite_animation(self.sprite.index, seq_pack.sequences[\"seq_run\"], 0)\n\t\t\tself.state = State.Run",
"def always_on_requested(self, always_on_requested):\n\n self._always_on_requested = always_on_requested",
"def sequence_start(self, stime: datetime) -> bool:\n result = (\n self.is_sequence\n and self.is_running(stime)\n and not self._sequence_run.running\n )\n if result:\n self._sequence_run.running = True\n return result",
"def is_running(self, flag: bool):\n self._lock.acquire()\n self._is_running = flag\n self._lock.release()",
"def is_running(self):\n return self.current_state == self.States.RUNNING"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the contact_sorts of this MessagingCampaign. The order in which to sort contacts for dialing, based on up to four columns.
|
def contact_sorts(self):
return self._contact_sorts
|
[
"def contact_sorts(self, contact_sorts):\n \n self._contact_sorts = contact_sorts",
"def sortByDistance(self, contact_list):\n ExpensiveSort(contact_list, self.distance.to_contact).sort()",
"def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts",
"def __getSortedSortDataList(self):\n sortDataList = []\n for ycName in self.__nameToOidAndDepthMap:\n oidAndDepth = self.__nameToOidAndDepthMap[ycName]\n sortDataList.append(self.__SORT_DATA(depth=oidAndDepth.depth,\n oid=oidAndDepth.oid, name=ycName))\n return sorted(sortDataList)",
"def sort_orders(self):\n return self._sort_orders",
"def sort_contacts(contacts):\n \n key_list = list(contacts.keys()) #get keys\n key_list.sort() #sort key_list\n sorted_list = [] #initialize sorted list\n for key in key_list:\n contact = (key, contacts[key][0], contacts[key][1]) #create tuple\n sorted_list += [contact] #add tuple to list\n \n return(sorted_list)",
"def sort_fields(self):\n return self._sort_fields",
"def sorted_fields(self) -> List[MessageField]:\n return sorted(self.fields(), key=lambda field: field.number)",
"def sort_contacts(self, method, order):\n \n method_l = method.lower()\n order_l = order.lower()\n \n if method_l == 'name' and order_l == 'asc':\n name_sort = sorted(self.contacts, key=lambda x: x[0])\n for x in name_sort:\n print(x)\n return name_sort\n elif method_l == 'name' and order_l == 'desc':\n name_sort = sorted(self.contacts, key=lambda x: x[0], reverse=True)\n for x in name_sort:\n print(x)\n return name_sort \n \n elif method_l == 'zipcode' and order_l == 'asc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3])\n for x in zip_sort:\n print(x)\n return zip_sort\n elif method_l == 'zipcode' and order_l == 'desc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3],reverse=True)\n for x in zip_sort:\n print(x)\n return zip_sort",
"def get_sort_fields(self) -> List[int]:\n return list(range(len(self.key_fields)))",
"def query_sort(self):\n if self.sort_order_by and self.sort_asc:\n result = {}\n # Motor规定find里sort的格式为Tuple[]\n result_tuple = []\n # i为索引/index,val为i位上的值\n for i, attr in enumerate(self.sort_order_by):\n result[attr] = self.sort_asc[i]\n result_tuple += [(attr, self.sort_asc[i])]\n return result, result_tuple\n return {}, []",
"def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]",
"def contact_list_columns(self):\n return self._contact_list_columns",
"def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]",
"def _sort_records(self):\n\n idx_start = self._columns.index('start')\n idx_end = self._columns.index('end')\n for c in TranscriptDB.allowed_chroms:\n if c in self._records:\n self._records[c] = sorted(self._records[c], key=itemgetter(idx_start, idx_end))",
"def listSortFields():",
"def sort_orders(self) -> List[FrameworkSortOrder]:\n return self._sort_orders",
"def order_by_columns(self):\n # Ensure the labels are generated\n if not self._labels:\n list(self.labeled_columns)\n\n if self.group_by_strategy == \"labels\":\n if self.ordering == \"desc\":\n suffix = \" DESC\"\n else:\n suffix = \"\"\n\n return [\n text(f\"{lbl}{suffix}\")\n for _, lbl in reversed(list(zip(self.columns, self._labels)))\n ]\n else:\n if self.ordering == \"desc\":\n return [col.desc() for col in reversed(self.columns)]\n else:\n return reversed(self.columns)",
"def sortOptions(self):\n return list(self.sort) if self.sort is not None else []"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the contact_sorts of this MessagingCampaign. The order in which to sort contacts for dialing, based on up to four columns.
|
def contact_sorts(self, contact_sorts):
self._contact_sorts = contact_sorts
|
[
"def contact_sorts(self):\n return self._contact_sorts",
"def sort_fields(self, sort_fields):\n\n self._sort_fields = sort_fields",
"def sortByDistance(self, contact_list):\n ExpensiveSort(contact_list, self.distance.to_contact).sort()",
"def sort_orders(self, sort_orders):\n\n self._sort_orders = sort_orders",
"def sort_values(self, sort_values):\n\n self._sort_values = sort_values",
"def set_sort_order(self, sort_list):\n sort_attrs = []\n for attr in sort_list:\n if type(attr) != str or len(attr) == 0:\n raise ValueError(\"All element of sort_list must be a non empty string.\")\n if attr[0] == '-':\n sort_attrs.append((attr[1:], True))\n else:\n sort_attrs.append((attr, False))\n if len(sort_list) > len(set(map(lambda x: x[0].lower, sort_attrs))):\n raise ValueError(\"Attribute names must be different from each other.\")\n self.__sort_attrs = sort_attrs",
"def sort_orders(self, sort_orders: List[FrameworkSortOrder]):\n\n self._sort_orders = sort_orders",
"def sort(self, sort):\n\n self._sort = sort",
"def contacts(self, contacts):\n\n self._contacts = contacts",
"def _sort_records(self):\n\n idx_start = self._columns.index('start')\n idx_end = self._columns.index('end')\n for c in TranscriptDB.allowed_chroms:\n if c in self._records:\n self._records[c] = sorted(self._records[c], key=itemgetter(idx_start, idx_end))",
"def _date_sort(self):\n if len(self.case_names) > 0:\n obj_list_attrs = [\n self.__getattribute__(attr)\n for attr in self._all_attrs\n if isinstance(self.__getattribute__(attr), list)\n ]\n zipped = list(zip(*obj_list_attrs))\n zipped.sort(reverse=True)\n i = 0\n obj_list_attrs = list(zip(*zipped))\n for attr in self._all_attrs:\n if isinstance(self.__getattribute__(attr), list):\n self.__setattr__(attr, obj_list_attrs[i][:])\n i += 1",
"def sort_order(self, sort_order):\n self._sort_order = sort_order",
"def sort_order(self, sort_order: int):\n\n self._sort_order = sort_order",
"def sort_order(self, sort_order):\n\n self._sort_order = sort_order",
"def _sort_columns(self, order):\n unknown = set(self._columns) - set(order)\n if unknown:\n names = \", \".join(str(name) for name in unknown)\n raise ValueError(f\"Unknown columns: {names}\")\n\n cols = [self.column_location(column) for column in order]\n\n self._columns = [self._columns[col] for col in cols]\n self._data = [[row[col] for col in cols] for row in self._data]",
"def prepare_sorting_fields(self):\n if self.sorting_parameter_name in self.request.query_params:\n self._sorting_fields = []\n for one_field in self.request.query_params.get(self.sorting_parameter_name).split(','):\n sorting_field = one_field.strip()\n if sorting_field.strip():\n self._sorting_fields.append(sorting_field)\n\n if self._sorting_fields:\n # Create a list of sorting parameters. Each parameter is a tuple: (field:str, descending:bool)\n self._sorting_fields = [\n (self.sorting_fields_map.get(field.lstrip('-'), field.lstrip('-')), field[0] == '-')\n for field in self._sorting_fields\n ]",
"def sort_contacts(self, method, order):\n \n method_l = method.lower()\n order_l = order.lower()\n \n if method_l == 'name' and order_l == 'asc':\n name_sort = sorted(self.contacts, key=lambda x: x[0])\n for x in name_sort:\n print(x)\n return name_sort\n elif method_l == 'name' and order_l == 'desc':\n name_sort = sorted(self.contacts, key=lambda x: x[0], reverse=True)\n for x in name_sort:\n print(x)\n return name_sort \n \n elif method_l == 'zipcode' and order_l == 'asc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3])\n for x in zip_sort:\n print(x)\n return zip_sort\n elif method_l == 'zipcode' and order_l == 'desc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3],reverse=True)\n for x in zip_sort:\n print(x)\n return zip_sort",
"def sort_by(self, sort_by):\n self._sort_by = sort_by",
"def _setSortingEnabled(self):\r\n\r\n self._controller.setSortingEnabled(True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the messages_per_minute of this MessagingCampaign. How many messages this messaging campaign will send per minute.
|
def messages_per_minute(self):
return self._messages_per_minute
|
[
"def messages_per_minute(self, messages_per_minute):\n \n self._messages_per_minute = messages_per_minute",
"def requests_per_minute(self) -> int:\n if len(self.requests) == 0 or self.total_time == 0:\n return 0\n return round(60 * len(self.requests) / self.total_time)",
"def query_plans_per_minute(self) -> int:\n return pulumi.get(self, \"query_plans_per_minute\")",
"def getNumOfMsgSend_interval(self):\n return self.MsgSendCount_interval",
"def _get_duration_minutes(self):\n return self.duration_seconds / 60.0",
"def minutes(self) -> int:\n return pulumi.get(self, \"minutes\")",
"def get_total_minutes(self):\n minutes = 0\n for t in self:\n minutes += t.duration_minutes\n return minutes",
"def get_timer_duration_in_minutes(self):\n return self.timer_duration_in_minutes",
"def get_minute_count(self, emote):\n self.__update_record()\n return self.emoteCount.get(emote, 0)",
"def get_message_count(self):\n messages_raw = self._api_get(\"/sms/sms-count\")\n return {\n 'count': int(messages_raw['LocalInbox']),\n 'siminbox': int(messages_raw['SimInbox']),\n 'simoutbox': int(messages_raw['SimOutbox']),\n 'newmsg': int(messages_raw['NewMsg']),\n 'unread': int(messages_raw['LocalUnread']),\n 'simunread': int(messages_raw['SimUnread']),\n 'deleted': int(messages_raw['LocalDeleted']),\n 'localmax': int(messages_raw['LocalMax']),\n 'simmax': int(messages_raw['SimMax']),\n 'simdraft': int(messages_raw['SimDraft']),\n 'localdraft': int(messages_raw['LocalDraft']),\n 'outbox': int(messages_raw['LocalOutbox'])\n }",
"def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit",
"def unit_ms(self):\n return (self.time_base / 1000.0) / 60.0",
"def length_minutes(self):\n return self._length_minutes",
"def calculate_fetch_size(minutes: int):\n return round(minutes / CONF.interval) if minutes >= CONF.interval else 1",
"def calcMaxNumMsgs(self):\n self.maxNumMsgs = 0\n for participant, spiel in self.conversation.items():\n if len(spiel)>self.maxNumMsgs and participant[1] in self.participants:\n self.maxNumMsgs = len(spiel)",
"def units_per_minute(self) -> typing.Union[int, Fraction, None]:\n return self._units_per_minute",
"def getMinuteCount(self, emote):\n self.__updateRecord()\n return self.emoteCount.get(emote, 0)",
"def message_count(self): \n return len(self.list_of_messages)",
"def minutesPerTrajectory(self, connections):\n \tminutes = 0\n \tif len(connections) <= 56:\n \t\tminutes = 120\n \telse:\n \t\tminutes = 180\n\n \treturn minutes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the messages_per_minute of this MessagingCampaign. How many messages this messaging campaign will send per minute.
|
def messages_per_minute(self, messages_per_minute):
self._messages_per_minute = messages_per_minute
|
[
"def messages_per_minute(self):\n return self._messages_per_minute",
"def set_limit_per_second(self, rate_limit_per_second):\n pass",
"def minutes(self, minutes):\n\n self._minutes = minutes",
"def kills_per_min(self, kills_per_min):\n\n self._kills_per_min = kills_per_min",
"def set_fan_timer_duration(self, minutes: int = 5):\r\n self._fan_timer_duration = timedelta(minutes=minutes)\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"fan_timer_duration\", minutes))",
"def drive_time_minutes(self, drive_time_minutes):\n\n self._drive_time_minutes = drive_time_minutes",
"def set_timer_duration_in_minutes(self, timer_duration_in_minutes):\n self.timer_duration_in_minutes = timer_duration_in_minutes",
"def message_count_limit(self, message_count_limit: ConfigNodePropertyInteger):\n\n self._message_count_limit = message_count_limit",
"def cooldown_minutes(self, cooldown_minutes):\n\n self._cooldown_minutes = cooldown_minutes",
"def minute(self, minute):\n assert minute >= 0 and minute < 60, \"'분'은 0-59 사이의 정수여야 합니다.\"\n self.__minute = minute",
"def report_minute_distribution(self):\n self.histogram_granularities.add(histogram_granularity.MINUTE)\n return self",
"def mt_settings_per_language_list(self, mt_settings_per_language_list):\n\n self._mt_settings_per_language_list = mt_settings_per_language_list",
"def minute(self, minute):\n if minute is None:\n raise ValueError(\"Invalid value for `minute`, must not be `None`\") # noqa: E501\n\n self._minute = minute",
"def run_for_mins(bot, nr_mins):\n for i in range(1, nr_mins+1):\n time.sleep(60)\n bot.send_msg('It has been {} minutes.'.format(i))",
"def query_plans_per_minute(self) -> int:\n return pulumi.get(self, \"query_plans_per_minute\")",
"def miter_limit(self, millimeters):\n self._miter_limit = normalize(millimeters)\n self._viewport.setMiterLimit(self._miter_limit)",
"def length_minutes(self, length_minutes):\n \n self._length_minutes = length_minutes",
"def set_report_interval_in_ms(self, interval_ms):\n interval_ms = Utils.convert_number(interval_ms)\n if interval_ms:\n if 'progressReport' not in self.data['videoItem']['stream']:\n self.data['videoItem']['stream']['progressReport'] = {}\n self.data['videoItem']['stream']['progressReport']['progressReportIntervalInMilliseconds'] = int(interval_ms)",
"def requests_per_minute(self) -> int:\n if len(self.requests) == 0 or self.total_time == 0:\n return 0\n return round(60 * len(self.requests) / self.total_time)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the errors of this MessagingCampaign. A list of current error conditions associated with this messaging campaign.
|
def errors(self, errors):
self._errors = errors
|
[
"def errors(self, errors):\n\n self._errors = errors",
"def errors(self, errors):\n self._errors = errors",
"def validation_errors(self, validation_errors):\n\n self._validation_errors = validation_errors",
"def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors",
"def errors(self) -> Sequence['outputs.BatchAIErrorResponse']:\n return pulumi.get(self, \"errors\")",
"def overwrite_errors(self, errors_dictionary):\n assert errors_dictionary.keys() == self.__errors.keys(), \"The dictionary keys must match.\"\n self.__errors = errors_dictionary",
"def errors(self):\n errs = {}\n for results in self.results.itervalues():\n for result in results:\n if result.status == punc.model.Result.STATUS_ERROR:\n err_msg = result.error_message()\n if err_msg is not None:\n name = result.device_name()\n if name in errs:\n errs[name].add(err_msg)\n else:\n errs[name] = set([err_msg])\n return errs",
"def on_errors(self, on_errors):\n self._on_errors = on_errors",
"def getErrorsList(self):\n return self.__errors",
"def validation_errors(self):\n return self._validation_errors",
"def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors",
"def errors(self):\n return tuple(self._errors)",
"def artifact_import_errors(self, artifact_import_errors):\n\n self._artifact_import_errors = artifact_import_errors",
"def errors(self):\n self.addedPrograms()\n self.addedLibraries()\n self.addedVariables()\n self.modifiedVariables()\n return self._errors",
"def import_errors(self, import_errors):\n\n self._import_errors = import_errors",
"def ReportErrors(self, ReportErrors=True): \n self._ReportErrors = ReportErrors",
"def io_errors(self, io_errors):\n self._io_errors = io_errors",
"def errors(self) -> List[str]:\n return [e.get('message')\n for e in self._error.response.json().get('errors', [])]",
"def errors(self) -> str:\n return self.job_errors() + self.analysis_errors()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the sms_config of this MessagingCampaign. Configuration for this messaging campaign to send SMS messages.
|
def sms_config(self):
return self._sms_config
|
[
"def sms_config(self, sms_config):\n \n self._sms_config = sms_config",
"def get_smtp_config(self):\n\t\treturn self.smtp_config",
"def sms(self):\n return self._sms",
"def get_smtp_config(self):\n if self.smtp:\n config = {}\n config['mailfrom'] = self.smtp.mailfrom\n config['mailto'] = self.smtp.mailto\n config['host'] = self.smtp.host\n config['port'] = self.smtp.port\n config['ssl'] = self.smtp.ssl\n config['username'] = self.smtp.username\n config['password'] = self.smtp.password\n config['subject'] = self.smtp.subject\n return config\n\n return None",
"def sms_region_config(self) -> pulumi.Output['outputs.GoogleCloudIdentitytoolkitAdminV2SmsRegionConfigResponse']:\n return pulumi.get(self, \"sms_region_config\")",
"def sms_region_config(self) -> Optional[pulumi.Input['GoogleCloudIdentitytoolkitAdminV2SmsRegionConfigArgs']]:\n return pulumi.get(self, \"sms_region_config\")",
"def configdict(self) -> Dict[str, Any]:\n val: Dict[str, Any] = (_ba.app.config.setdefault('Campaigns',\n {}).setdefault(\n self._name, {}))\n assert isinstance(val, dict)\n return val",
"def sms(self):\r\n return sms.SMS(self)",
"def getReceiverConfig(self):\n \n return self.receiver_config",
"def get_config(self) -> Configuration:\n return self.config",
"def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})",
"def get_sentence_configuration(self, sentence_number=None):\n if sentence_number == None:\n return self.sentence_configuration\n return self.sentence_configuration.get(sentence_number)",
"def configuration(self):\n if self.integration is None:\n return None\n return self.integration.configuration",
"def mail_config(self):\n config = {\n 'MAIL_USERNAME': self.mail_username,\n 'MAIL_PASSWORD': self.decrypt_mail_password(),\n 'MAIL_SERVER': self.mail_server,\n 'MAIL_USE_TLS': self.mail_encryption == Encryption.TLS,\n 'MAIL_USE_SSL': self.mail_encryption == Encryption.SSL,\n 'MAIL_DEFAULT_SENDER': self.mail_default_sender or self.mail_username,\n 'MAIL_PORT': self.mail_port\n }\n return config",
"def get_running_sms_campaign(self):\n kwargs = {}\n kwargs['status'] = SMS_CAMPAIGN_STATUS.START\n tday = datetime.utcnow().replace(tzinfo=utc)\n kwargs['startingdate__lte'] = datetime(tday.year, tday.month,\n tday.day, tday.hour, tday.minute, tday.second, tday.microsecond).replace(tzinfo=utc)\n kwargs['expirationdate__gte'] = datetime(tday.year, tday.month,\n tday.day, tday.hour, tday.minute, tday.second, tday.microsecond).replace(tzinfo=utc)\n\n s_time = str(tday.hour) + \":\" + str(tday.minute) + \":\" + str(tday.second)\n kwargs['daily_start_time__lte'] = datetime.strptime(s_time, '%H:%M:%S')\n kwargs['daily_stop_time__gte'] = datetime.strptime(s_time, '%H:%M:%S')\n\n # weekday status 1 - YES\n # self.model._meta.get_field(tday.strftime(\"%A\").lower()).value()\n kwargs[tday.strftime(\"%A\").lower()] = 1\n\n return SMSCampaign.objects.filter(**kwargs)",
"def get_config(self):\n\n # make sure that the config reflects the state of the underlying logic\n self.logic_to_config()\n # and then return the config struct.\n return self._config",
"def get_configuration(self) -> Dict[str, Any]:\n config = self.get_raw_configuration\n\n if self.envelope:\n self.logger.debug(\"Envelope enabled; extracting data from config\", extra={\"envelope\": self.envelope})\n config = jmespath_utils.extract_data_from_envelope(\n data=config, envelope=self.envelope, jmespath_options=self.jmespath_options\n )\n\n return config",
"def config(self):\n return self.manager.config",
"def rabbitmq_settings(self):\n return self.application.settings.get('rabbitmq', dict)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the sms_config of this MessagingCampaign. Configuration for this messaging campaign to send SMS messages.
|
def sms_config(self, sms_config):
self._sms_config = sms_config
|
[
"def sms_config(self):\n return self._sms_config",
"def sms(self, sms):\n\n self._sms = sms",
"def sms_enabled(self, sms_enabled):\n\n self._sms_enabled = sms_enabled",
"def send_sms(self, sms):\n pass",
"def sms_phone_number(self, sms_phone_number):\n\n self._sms_phone_number = sms_phone_number",
"def ucsm_config(self, ucsm_config):\n\n self._ucsm_config = ucsm_config",
"def sms_disabled(self, sms_disabled):\n\n self._sms_disabled = sms_disabled",
"def sms_code(self, sms_code):\n\n self._sms_code = sms_code",
"def _send_sms_api(self, account, number, message, sms_id):\n if account.provider != \"smsdev\":\n return super(SmsApi, self)._send_sms_api(\n account, number, message, sms_id)\n\n params = self._prepare_smsdev_params(account, number, message)\n\n url = 'https://api.smsdev.com.br/v1/send'\n # Ambiente de homologacao\n if account.smsdev_type == \"1\":\n url = \"http://localhost/smsdev/homolog\"\n\n response = requests.post(url, params=params)\n\n if response:\n result = json.loads(response.content)\n sms_id.message_id = result.get(\"id\")\n\n if result.get(\"situacao\") == \"OK\":\n sms_id.set_sent()\n\n print(response.content)\n return {\n \"sid\": result.get(\"id\"),\n \"state\": result.get(\"situacao\")\n }",
"def can_send_envelopes_via_sms(self, can_send_envelopes_via_sms):\n\n self._can_send_envelopes_via_sms = can_send_envelopes_via_sms",
"def sendSMS(self,subject,msg):\n\n\n self.sendMsg(self.cellNumber+self.smsGateway,subject,msg)",
"def send_sms(self, body):\n message = self.twilio_client.sms.messages.create(to=self.to_num, from_=self.from_num, body=body)",
"def send_sms(self, context, sms_payload):\n client = util.plivo_client(context[\"headers\"])\n sms_entity = PlivoSendsms(**sms_payload)\n response =client.messages.create(\n src=sms_entity.source_number,\n dst=sms_entity.destination_number,\n text=sms_entity.text).__dict__\n \n return response",
"def set_sms_telephone_number(self, telephone_number, email):\n ngo_user_profile = NGOUserProfile.objects.get(user__email=email)\n org_setting = OrganizationSetting.objects.get(organization__org_id=ngo_user_profile.org_id)\n smsc = SMSC(vumi_username=\"smsc\")\n smsc.save()\n outgoing_number = OutgoingNumberSetting(phone_number=telephone_number, smsc=smsc)\n outgoing_number.save()\n org_setting.sms_tel_number = telephone_number\n org_setting.outgoing_number = outgoing_number\n org_setting.save()",
"def sms(self):\r\n return sms.SMS(self)",
"def delete_sms(self, sms_id: int) -> SetResponseType:\n return self._connection.post_set('sms/delete-sms', {'Index': sms_id})",
"def sms_region_config(self) -> Optional[pulumi.Input['GoogleCloudIdentitytoolkitAdminV2SmsRegionConfigArgs']]:\n return pulumi.get(self, \"sms_region_config\")",
"def sms(self, study, alerts, **kwargs):\n print(\"'sms' not implemented\")",
"def can_enable_for_sms(self, can_enable_for_sms):\n\n self._can_enable_for_sms = can_enable_for_sms"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Serializes C{result} to JSON and writes it to C{request}.
|
def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK):
response = {
u'code': code.value,
u'result': result}
request.setHeader('content-type', 'application/json')
request.setResponseCode(status)
request.write(json.dumps(response))
request.finish()
|
[
"def _convert_to_JSON(result):\n response = make_response(json.dumps(result))\n response.headers['Access-Control-Allow-Origin'] = \"*\"\n response.mimetype = \"application/json\"\n return response",
"def make_response(request, result):\n response = request.response\n response.text = json.dumps(result)\n return response",
"def json_result(self):\n response_dict = {\"Request_id\": self.request_id, \"Response\": self.response, \"Reason\": self.reason}\n\n json_obj = json.dumps(response_dict)\n #json_obj = json.loads(json_dump)\n return json_obj",
"def send_rpc_result(req, result):",
"def _post_exec(self, result):\n result_dict = zeep.helpers.serialize_object(result, target_cls=dict)\n return result_dict",
"def post(self, request, *args, **kwargs):\n result = simplejson.loads(request.POST['result'])\n pk = self.fetch_result(result)\n return http.HttpResponse(pk, status=201)",
"def send_result_data(request):\n result = dkredis.pop_pyval(\"CLIENT-TOKEN-\" + request.REQUEST['access_token'])\n r = http.HttpResponse(json.dumps(result, indent=4))\n r['Content-Type'] = 'application/json'\n return r",
"def send_result(result, encode=None):\n # encode result if requested\n if encode == 'json':\n result = json.dumps(result)\n elif not encode is None:\n raise Exception('No such encoder: %s' % encode)\n \n # send \"result\" IPC message to manager\n AMQPProcessManagerIPC._send_ipc_message(['result', result])",
"def render_result(self, result, request):\n # if result is not None:\n length = len(result)\n request.setHeader(\"Content-Length\", length)\n request.write(result)\n \n # Close the request and write to log\n request.finish()",
"def _to_response(result: Union[Dict, Response]) -> Response:\n if isinstance(result, Response):\n return result\n\n logger.debug(\"Simple response detected, serializing return before constructing final response\")\n return Response(\n status_code=200,\n content_type=\"application/json\",\n body=json.dumps(result, separators=(\",\", \":\"), cls=Encoder),\n )",
"def _store(self, result, status):\n response = Response(\n host=result._host.get_name(),\n status=status,\n task=result._task.get_name(),\n payload=result._result)\n self._bucket.append(response)",
"def serialize_save(result):\n serializer = BasicSerializer(data=result)\n if serializer.is_valid():\n serializer.save()\n # removing from the queue\n del task_queue[task_queue.index(result['VIN'])]\n # Django creates a new connection per thread and this needs to be manually closed:\n connection.close()",
"def normalize_transaction_result(cls, result: JSON) -> JSON:\n ...",
"def return_with_json(self, request):\n return JsonResponse({\"result\": True, \"data\": [], \"message\": \"xxx\", \"code\": \"1500200\"})",
"def _send_response(self, result, peer):\n try:\n response = json.dumps(result).encode()\n self._socket.sendto(response, peer)\n except (ConnectionRefusedError, FileNotFoundError, PermissionError,\n TypeError):\n pass",
"def upload_result():\n if len(request.files) == 0:\n return jsonify(success=False), 400\n\n file = next(request.files.values())\n filename = secure_filename(file.filename)\n file.save(op.join(RESULTS_FOLDER, filename))\n\n result = Result()\n result.file = op.join(RESULTS_FOLDER, filename)\n\n result.detector_start_time = datetime.fromtimestamp(float(request.form[\"detector_start_time\"]))\n result.detector_end_time = datetime.fromtimestamp(float(request.form[\"detector_end_time\"]))\n\n db.session.add(result)\n db.session.commit()\n\n return jsonify(success=True, result_id=result.id), 200",
"def put_result(self, result):\n self._result_queue.put(result)\n self.__class__.finished_objects.put(self)",
"def setResult(result):",
"def jsonify(query_result):\n result = [i.to_dict() for i in query_result]\n return flask.jsonify(result)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Maps a L{CODE} constant to a HTTP code.
|
def _mapErrorCodeToStatus(code):
if code == 103:
return http.NOT_FOUND
return http.INTERNAL_SERVER_ERROR
|
[
"def mapStatusCode(self, status_code):\n # Calibrator uses LSF status codes natively, so do nothing here.\n return status_code",
"def mapStatusCode(self, status_code):\n return self.STATUS_MAP[status_code]",
"def setResponseCode(code, message=None):",
"def http_status(code):\n return \"200 OK\" if code == 200 else \"404 Not Found\"",
"def _get_request_code(self, data) -> int:\n return int(self._request_code)",
"def _get_code(self, req):\n args = req.args\n error = args.getfirst('error')\n state = args.getfirst('state', '')\n code = args.getfirst('code')\n expected_state = req.session.pop(self.STATE_SKEY, None)\n\n if error is not None:\n raise AuthenticationFailed(error)\n elif not expected_state or strings_differ(state, expected_state):\n raise AuthenticationError(\"incorrect 'state' in redirect\")\n elif not code:\n raise AuthenticationError(\"no 'code' returned in redirect\")\n return code",
"def http_return_code(res_data) -> (int, str):\n\n start = re.search(\"[0-9]{3}\", res_data).start()\n end_of_line = res_data.find(\"\\r\\n\")\n code = int(res_data[start:start+3])\n if end_of_line == -1:\n end_of_line = len(res_data)\n meaning = res_data[start+4:end_of_line]\n return code, meaning",
"def set_code(self, code):\n self.__code = code",
"def reply_with_code(self, code: int) -> None:",
"def code(self, code):\n self._code = code",
"def assign_message_code(success: bool):\n return (HTTPStatus.OK.phrase, HTTPStatus.OK) if success\\\n else (HTTPStatus.INTERNAL_SERVER_ERROR.phrase, HTTPStatus.INTERNAL_SERVER_ERROR)",
"def build_status(code: int) -> str:\n\n status = http.HTTPStatus(code)\n\n def _process_word(_word: str) -> str:\n if _word == \"OK\":\n return _word\n return _word.capitalize()\n\n reason = \" \".join(_process_word(word) for word in status.name.split(\"_\"))\n\n text = f\"{code} {reason}\"\n return text",
"def error(request, code=404):\n # 之前上课我说过不要用数字来作为字典的 key\n # 但是在 HTTP 协议中 code 都是数字似乎更方便所以打破了这个原则\n e = {\n 404: b'HTTP/1.x 404 NOT FOUND\\r\\n\\r\\n<h1>NOT FOUND</h1>',\n }\n return e.get(code, b'')",
"def set_status( code ):",
"def from_code(code):\n if not (10 <= code <= 23):\n raise GeogotchiError(\"invalid code: %s\" % code)\n return _geonames_error_order[code - 10]",
"def code_to_name(code):\n upper_code = code.upper()\n if upper_code in code_dict:\n return code_dict[upper_code]\n else:\n return code",
"def code(self):\n code = getattr(self, '_code', None)\n if not code:\n if self.has_body():\n code = 200\n else:\n code = 204\n\n return code",
"def code_map(self) -> dict[str, str | NAType]:\n code_map = {code: code for code in self.df[\"code\"]}\n code_map.update(self.code_fixes)\n code_map.update({code: pd.NA for code in self.ignored_codes})\n return code_map",
"def response(code):\n\n def decorator(func):\n func.wsgi_code = code\n return func\n return decorator"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify PayPal IPN data.
|
def verify(self, request):
paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
if not self.SANDBOX:
paypalURL = 'https://www.paypal.com/cgi-bin/webscr'
def _cb(response):
if response == 'INVALID':
raise PaypalError(
'IPN data invalid. data: %s', (data,))
elif response == 'VERIFIED':
return True
else:
raise PaypalError('Unrecognized verification response: %s', (response,))
data = request.content.read()
params = '?cmd=_notify-validate&' + data
d = getPage(paypalURL+params, method='POST')
d.addCallback(_cb)
return d
|
[
"def verify_ipn(data):\n data = dict(data)\n data['cmd'] = '_notify-validate'\n resp = requests.post(app.config['PAYPAL']['endpoint'], data=data)\n if resp.text == 'VERIFIED':\n return True\n return False",
"def test_paypal_email_check(self, mock_postback):\n mock_postback.return_value = b\"VERIFIED\"\n self.assertFalse(PayPalIPN.objects.exists())\n params = dict(IPN_POST_PARAMS)\n params.update(\n {\n 'custom': b('obj=paypal_test ids=0 inv=test_invoice_1 '\n 'pp=test@test.com usr=user@test.com'),\n 'receiver_email': 'test@test.com'\n }\n )\n self.assertNotEqual(\n settings.DEFAULT_PAYPAL_EMAIL, params['receiver_email']\n )\n self.paypal_post(params)\n\n ipn = PayPalIPN.objects.first()\n self.assertEqual(ipn.payment_status, 'Completed')\n\n self.assertEqual(len(mail.outbox), 1)\n email = mail.outbox[0]\n self.assertEqual(email.to, ['user@test.com', settings.SUPPORT_EMAIL])\n self.assertEqual(\n email.subject,\n '{} Payment processed for test payment to PayPal email '\n 'test@test.com'.format(\n settings.ACCOUNT_EMAIL_SUBJECT_PREFIX\n )\n )",
"def validate_with_paypal(request, validate_type):\n if validate_type == 'PDT':\n # we are on return url\n # need to verify if payment is completed\n # MERCHANT_TXN_KEY is your PDT identity token\n params = {\n 'cmd': '_notify-synch',\n 'tx': request.GET.get('tx', ''),\n 'at': settings.MERCHANT_TXN_KEY\n }\n data = urllib.urlencode(params)\n\n # Sample response:\n # SUCCESS\n # first_name=Jane+Doe\n # last_name=Smith\n # payment_status=Completed payer_email=janedoesmith%40hotmail.com\n # payment_gross=3.99\n # mc_currency=USD custom=For+the+purchase+of+the+rare+book+Green+Eggs+%26+Ham\n\n # If the response is FAIL, PayPal recommends making sure that:\n # The Transaction token is not bad.\n # The ID token is not bad.\n # The tokens have not expired.\n\n else: # IPN\n data = 'cmd=_notify-validate&%s' % request.POST.urlencode()\n\n # The response is one single-word: VERIFIED or INVALID\n\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n 'encoding': 'utf-8',\n \"Accept\": \"text/plain\"}\n request = urllib2.Request(settings.PAYPAL_POST_URL,\n data,\n headers)\n response = urllib2.urlopen(request)\n data = response.read()\n\n if validate_type == 'PDT':\n return parse_pdt_validation(data)\n else:\n return data.strip('\\n').lower() == 'verified', None",
"def test_confirm_ipn(request):\n content = \"VERIFIED\"\n # content = \"INVALID\"\n return HttpResponse(content=content)",
"def test_paypal_email_check_unexpected_status(self, mock_postback):\n mock_postback.return_value = b\"VERIFIED\"\n self.assertFalse(PayPalIPN.objects.exists())\n params = dict(IPN_POST_PARAMS)\n params.update(\n {\n 'custom': b('obj=paypal_test ids=0 inv=test_invoice_1 '\n 'pp=test@test.com usr=user@test.com'),\n 'receiver_email': 'test@test.com',\n 'payment_status': 'Voided',\n }\n )\n self.assertNotEqual(\n settings.DEFAULT_PAYPAL_EMAIL, params['receiver_email']\n )\n self.paypal_post(params)\n\n ipn = PayPalIPN.objects.first()\n self.assertEqual(ipn.payment_status, 'Voided')\n\n self.assertEqual(len(mail.outbox), 1)\n email = mail.outbox[0]\n self.assertEqual(email.to, ['user@test.com', settings.SUPPORT_EMAIL])\n\n self.assertEqual(\n email.subject,\n '{} Unexpected payment status VOIDED '\n 'for test payment to PayPal email test@test.com'.format(\n settings.ACCOUNT_EMAIL_SUBJECT_PREFIX\n ),\n )",
"def test_paypal_notify_with_invalid_voucher_code(self, mock_postback):\n mock_postback.return_value = b\"VERIFIED\"\n ev_type = baker.make_recipe('booking.event_type_PC')\n\n user = baker.make_recipe('booking.user')\n booking = baker.make_recipe(\n 'booking.booking_with_user', event__event_type=ev_type,\n event__name='pole level 1', user=user,\n event__paypal_email=settings.DEFAULT_PAYPAL_EMAIL\n )\n pptrans = helpers.create_booking_paypal_transaction(\n booking.user, booking\n )\n\n self.assertFalse(PayPalIPN.objects.exists())\n params = dict(IPN_POST_PARAMS)\n params.update(\n {\n 'custom': b('obj=booking ids={} usr={} cde=invalid_code apd={}'.format(\n booking.id, booking.user.email, booking.id\n )),\n 'invoice': b(pptrans.invoice_id),\n 'txn_id': b'test_txn_id',\n }\n )\n self.assertIsNone(pptrans.transaction_id)\n self.paypal_post(params)\n self.assertEqual(PayPalIPN.objects.count(), 1)\n ppipn = PayPalIPN.objects.first()\n self.assertFalse(ppipn.flag)\n self.assertEqual(ppipn.flag_info, '')\n\n booking.refresh_from_db()\n self.assertTrue(booking.paid)\n\n # email to user, studio, and support email\n self.assertEqual(len(mail.outbox), 3)\n support_email = mail.outbox[2]\n self.assertEqual(support_email.to, [settings.SUPPORT_EMAIL])\n self.assertEqual(\n support_email.subject,\n '{} There was some problem processing payment for booking '\n 'id {}'.format(settings.ACCOUNT_EMAIL_SUBJECT_PREFIX, booking.id)\n )\n self.assertIn(\n 'The exception raised was \"EventVoucher matching query does '\n 'not exist.',\n support_email.body\n )",
"def test_paypal_email_check_pending_status(self, mock_postback):\n mock_postback.return_value = b\"VERIFIED\"\n self.assertFalse(PayPalIPN.objects.exists())\n params = dict(IPN_POST_PARAMS)\n params.update(\n {\n 'custom': b('obj=paypal_test ids=0 inv=test_invoice_1 '\n 'pp=test@test.com usr=user@test.com'),\n 'receiver_email': 'test@test.com',\n 'payment_status': 'Pending',\n }\n )\n self.assertNotEqual(\n settings.DEFAULT_PAYPAL_EMAIL, params['receiver_email']\n )\n self.paypal_post(params)\n\n ipn = PayPalIPN.objects.first()\n self.assertEqual(ipn.payment_status, 'Pending')\n\n self.assertEqual(len(mail.outbox), 1)\n email = mail.outbox[0]\n self.assertEqual(email.to, ['user@test.com', settings.SUPPORT_EMAIL])\n self.assertEqual(\n email.subject,\n '{} Payment status PENDING for test payment to PayPal email '\n 'test@test.com'.format(\n settings.ACCOUNT_EMAIL_SUBJECT_PREFIX\n )\n )",
"def validate_ipn(view_func):\n def _wrapped_view_func(request, *args, **kwargs):\n if VALIDATE_IPN:\n response, content = Http().request(endpoint % request.body)\n if not content == 'VERIFIED':\n return HttpResponseForbidden()\n return view_func(request, *args, **kwargs)\n return _wrapped_view_func",
"def test_payment_received_with_duplicate_txn_flag(self, mock_postback):\n mock_postback.return_value = b\"VERIFIED\"\n booking = baker.make_recipe(\n 'booking.booking_with_user',\n event__paypal_email=settings.DEFAULT_PAYPAL_EMAIL\n )\n pptrans = helpers.create_booking_paypal_transaction(\n booking.user, booking\n )\n # make an existing completed paypal ipn\n baker.make(PayPalIPN, txn_id='test_txn_id', payment_status='Completed')\n self.assertEqual(PayPalIPN.objects.count(), 1)\n\n params = dict(IPN_POST_PARAMS)\n params.update(\n {\n 'custom': b('obj=booking ids={}'.format(booking.id)),\n 'invoice': b(pptrans.invoice_id),\n 'txn_id': 'test_txn_id'\n }\n )\n self.paypal_post(params)\n booking.refresh_from_db()\n ppipn = PayPalIPN.objects.all()[0]\n ppipn1 = PayPalIPN.objects.all()[1]\n\n self.assertFalse(ppipn.flag)\n self.assertTrue(ppipn1.flag)\n self.assertEqual(ppipn1.flag_info, 'Duplicate txn_id. (test_txn_id)')\n\n # even if the postback is verified, it is flagged and processed as\n # invalid\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(\n mail.outbox[0].subject,\n 'WARNING! Invalid Payment Notification received from PayPal'\n )",
"def test_receive_ipn_renewal(self):\n user = baker.make(\"User\", username=\"foobar\")\n user_plan = baker.make(\"UserPlan\", user=user)\n baker.make(\"RecurringUserPlan\", user_plan=user_plan)\n order = baker.make(\n \"Order\", user=user, status=Order.STATUS.COMPLETED, tax=12, amount=100\n )\n baker.make(\"BillingInfo\", user=user)\n order.user.save()\n pricing = baker.make(\"Pricing\")\n ipn = baker.make(\n \"PayPalIPN\",\n txn_type=\"subscr_payment\",\n payment_status=ST_PP_COMPLETED,\n receiver_email=\"fake@email.com\",\n mc_gross=112.00,\n custom=\"{\"\n f\"'first_order_id': {order.id},\"\n f\"'user_plan_id': {user_plan.id},\"\n f\"'pricing_id': {pricing.id},\"\n \"}\",\n )\n paypal_payment = receive_ipn(ipn)\n self.assertEqual(paypal_payment.paypal_ipn, ipn)\n self.assertEqual(Order.objects.count(), 2)\n self.assertNotEqual(paypal_payment.order, order)\n self.assertEqual(paypal_payment.order.amount, 100.00)\n self.assertEqual(paypal_payment.order.tax, 12.0)\n self.assertEqual(paypal_payment.order.total(), 112.00)\n user.userplan.refresh_from_db()\n new_recurring_plan = user.userplan.recurring\n self.assertEqual(new_recurring_plan.amount, Decimal(\"100.00\"))\n self.assertEqual(new_recurring_plan.tax, 12.0)\n invoice = Invoice.objects.get(type=Invoice.INVOICE_TYPES.INVOICE)\n self.assertEqual(invoice.total, 112.00)\n self.assertEqual(invoice.total_net, 100.00)\n self.assertEqual(invoice.tax_total, 12.0)\n self.assertEqual(invoice.tax, 12.0)",
"def test_receive_ipn_renewal_wrong_amount(self):\n user = baker.make(\"User\", username=\"foobar\")\n user_plan = baker.make(\"UserPlan\", user=user)\n baker.make(\"RecurringUserPlan\", user_plan=user_plan)\n order = baker.make(\n \"Order\", user=user, status=Order.STATUS.COMPLETED, tax=12, amount=100\n )\n order.user.save()\n pricing = baker.make(\"Pricing\")\n ipn = baker.make(\n \"PayPalIPN\",\n txn_type=\"subscr_payment\",\n payment_status=ST_PP_COMPLETED,\n receiver_email=\"fake@email.com\",\n mc_gross=123.45,\n custom=\"{\"\n f\"'first_order_id': {order.id},\"\n f\"'user_plan_id': {user_plan.id},\"\n f\"'pricing_id': {pricing.id},\"\n \"}\",\n )\n with self.assertRaisesRegex(Exception, \"Received amount doesn't match\"):\n receive_ipn(ipn)",
"def test_paypal_notify_url_with_invalid_date(self):\n self.assertFalse(PayPalIPN.objects.exists())\n self.paypal_post(\n {\n \"payment_date\": b\"2015-10-25 01:21:32\",\n 'charset': b(CHARSET),\n 'txn_id': 'test',\n }\n )\n ppipn = PayPalIPN.objects.first()\n self.assertTrue(ppipn.flag)\n self.assertEqual(\n ppipn.flag_info,\n 'Invalid form. (payment_date: Invalid date format '\n '2015-10-25 01:21:32: not enough values to unpack (expected 5, got 2))'\n )\n\n self.assertEqual(mail.outbox[0].to, [settings.SUPPORT_EMAIL])\n self.assertEqual(\n mail.outbox[0].subject,\n 'WARNING! Error processing Invalid Payment Notification from PayPal'\n )\n self.assertEqual(\n mail.outbox[0].body,\n 'PayPal sent an invalid transaction notification while attempting '\n 'to process payment;.\\n\\nThe flag info was \"Invalid form. '\n '(payment_date: Invalid date format '\n '2015-10-25 01:21:32: not enough values to unpack (expected 5, got 2))\"'\n '\\n\\nAn additional error was raised: Unknown object type for '\n 'payment'\n )",
"def has_paypal(self):\n from django.core.validators import validate_email\n try:\n validate_email(self.paypal_email)\n return True\n except ValidationError:\n return False",
"def test_receive_ipn_completed_order_completed(self):\n user = baker.make(\"User\", username=\"foobar\")\n user_plan = baker.make(\"UserPlan\", user=user)\n order = baker.make(\n \"Order\", user=user, status=Order.STATUS.COMPLETED, amount=100\n )\n order.user.save()\n pricing = baker.make(\"Pricing\")\n ipn = baker.make(\n \"PayPalIPN\",\n txn_type=\"subscr_payment\",\n payment_status=ST_PP_COMPLETED,\n receiver_email=\"fake@email.com\",\n mc_gross=100.00,\n custom=\"{\"\n f\"'first_order_id': {order.id},\"\n f\"'user_plan_id': {user_plan.id},\"\n f\"'pricing_id': {pricing.id},\"\n \"}\",\n )\n paypal_payment = receive_ipn(ipn)\n self.assertEqual(paypal_payment.paypal_ipn, ipn)\n self.assertNotEqual(paypal_payment.order, order)",
"def test_receive_ipn_cancellation(self):\n user = baker.make(\"User\", username=\"foobar\")\n user_plan = baker.make(\"UserPlan\", user=user)\n baker.make(\"RecurringUserPlan\", user_plan=user_plan, token=1234)\n # recurring_up = baker.make(\"RecurringUserPlan\", user_plan=user_plan)\n order = baker.make(\"Order\", user=user, status=Order.STATUS.COMPLETED)\n order.user.save()\n pricing = baker.make(\"Pricing\")\n ipn = baker.make(\n \"PayPalIPN\",\n txn_type=\"subscr_cancel\",\n receiver_email=\"fake@email.com\",\n subscr_id=1234,\n custom=\"{\"\n f\"'first_order_id': {order.id},\"\n f\"'user_plan_id': {user_plan.id},\"\n f\"'pricing_id': {pricing.id},\"\n \"}\",\n )\n paypal_payment = receive_ipn(ipn)\n self.assertEqual(paypal_payment, None)\n user_plan.refresh_from_db()\n self.assertFalse(hasattr(user_plan, \"recurring\"))",
"def is_unverified(self):\n return self.get_status() == self.STATUS_UNVERIFIED",
"def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned",
"def test_paypal_notify_url_with_refunded_status(self, mock_postback):\n mock_postback.return_value = b\"VERIFIED\"\n booking = baker.make_recipe(\n 'booking.booking_with_user', payment_confirmed=True, paid=True,\n event__paypal_email=settings.DEFAULT_PAYPAL_EMAIL, status=\"CANCELLED\"\n )\n pptrans = helpers.create_booking_paypal_transaction(\n booking.user, booking\n )\n pptrans.transaction_id = \"test_trans_id\"\n pptrans.save()\n\n self.assertFalse(PayPalIPN.objects.exists())\n params = dict(IPN_POST_PARAMS)\n params.update(\n {\n 'custom': b('obj=booking ids={}'.format(booking.id)),\n 'invoice': b(pptrans.invoice_id),\n 'payment_status': b'Refunded'\n }\n )\n self.paypal_post(params)\n booking.refresh_from_db()\n self.assertFalse(booking.payment_confirmed)\n self.assertFalse(booking.paid)\n\n self.assertEqual(\n len(mail.outbox), 1,\n \"NOTE: Fails if SEND_ALL_STUDIO_EMAILS!=True in env/test settings\"\n )\n # emails sent to studio and support\n self.assertEqual(\n mail.outbox[0].to,\n [settings.DEFAULT_STUDIO_EMAIL, settings.SUPPORT_EMAIL],\n )",
"def verify_totp(self, code):\n return self.totp.verify(code)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve a list of recent donations.
|
def recent(self, limit):
def _cb(players, donations):
donators = []
for donation in donations:
player = players[donation.donator.steamID].copy()
player['date'] = donation.date.asPOSIXTimestamp()
player['amount'] = str(donation.amount)
donators.append(player)
return donators
donations = []
steamids = set()
for donation in self.store.query(Donation,
AND(Donation.donator == Donator.storeID,
Donator.anonymous == False,
Donator.steamID != None),
limit=limit,
sort=Donation.date.descending):
steamids.add(donation.donator.steamID)
donations.append(donation)
d = self.getPlayerSummaries(steamids)
d.addCallback(_cb, donations)
return d
|
[
"def donation(self):\n donations = self.donations.filter(expiration__gte=datetime.date.today()).order_by('-amount')\n return donations[0] if donations else None",
"def list_of_donations():\n try:\n database.connect()\n query = (Donors\n .select(Donors, Donations)\n .join(Donations, JOIN.LEFT_OUTER))\n ls=[(item.donor_name, item.amount, item.date, item.firstname,\n item.lastname) for item in query.objects()]\n return ls\n except Exception as e:\n logger.info(f'Error getting all donations and for all')\n logger.info(e)\n\n finally:\n database.close()",
"def get_list_of_donations():\n try:\n logger.info('opening get_list_of_donations database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n query_results = (Donations.select(Donations.id, Donations.donation_date,\n Donations.donation_amount, Donations.donated_by_id.alias('fullname')))\n return query_results\n except Exception as e:\n logger.info(f'Error getting list of donors')\n logger.info(e)\n\n finally:\n logger.info('closing get_list_of_donations database call')\n database.close()",
"def donations(self):\n return self.caller.player.Dominion.assets.donations.all().order_by(\"amount\")",
"async def api_get_donations(g: WalletTypeInfo = Depends(get_key_type)):\n user = await get_user(g.wallet.user)\n wallet_ids = user.wallet_ids if user else []\n donations = []\n for wallet_id in wallet_ids:\n new_donations = await get_donations(wallet_id)\n donations += new_donations if new_donations else []\n return [donation.dict() for donation in donations] if donations else []",
"def GetAllDateOfPaymentOfCost():\n\n logs.logger.debug(\n \"Start to get back all payment date of Cost objects from database.\")\n try:\n searchedCostsItems = session.query(Cost.Cost).all()\n logs.logger.info(\n \"Get back all payment date of Cost objects from database.\")\n return [CostItems.dateOfPayment for CostItems in searchedCostsItems]\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def list_donations():\n donor = input('Input Donor Name: ')\n controller.display_donor_donations(donor)",
"def get_list_of_donors():\n try:\n logger.info('opening get_list_of_donors database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n return Donors.select()\n\n except Exception as e:\n logger.info(e)\n\n finally:\n database.close()",
"def list_donations(self, caller):\n msg = \"{wDonations:{n\\n\"\n table = PrettyTable([\"{wGroup{n\", \"{wTotal{n\"])\n for donation in self.donations:\n table.add_row([str(donation.receiver), donation.amount])\n msg += str(table)\n caller.msg(msg)",
"def GetAllDifferentDateOfPaymentOfCost():\n\n logs.logger.debug(\n \"Start to get back all different payment date of \"\n \"Cost objects from database.\")\n try:\n ListOfAllDifferentDateOfPaymentOfCost = []\n searchedCostsItems = GetAllDateOfPaymentOfCost()\n for item in searchedCostsItems:\n if item not in ListOfAllDifferentDateOfPaymentOfCost:\n ListOfAllDifferentDateOfPaymentOfCost.append(item)\n logs.logger.info(\n \"Get back all different payment date of \"\n \"Cost objects from database.\")\n return ListOfAllDifferentDateOfPaymentOfCost\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def show_all():\n donations = Donation.select()\n return render_template('donations.jinja2', donations=donations)",
"def get_latest_transactions(self):",
"def get_donor_data():\n return [\n {\"name\": \"Ken Lenning\", \"donations\": [1325, 1232.24, 16325.5]},\n {\"name\": \"Thomas Timmy\",\"donations\": [521.3, 869.14]},\n {\"name\": \"Jane Chung\", \"donations\": [3259.3, 1282.74, 1525.5]},\n {\"name\": \"Lucy Nguyen\", \"donations\": [5253.82]},\n {\"name\": \"Ben Cormack\", \"donations\": [56230.25, 89532.0, 12025.8]}]",
"def get_max_donation_date_list():\n try:\n logger.info('opening get_max_donation_date_list database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n query_max_date = (Donations\n .select(Donations.donated_by_id.alias('fullname'),\n fn.MAX(Donations.donation_date).alias(\n 'last_donation_date'),\n Donations.donation_amount.alias('last_donation'))\n .group_by(Donations.donated_by_id)\n )\n return query_max_date\n\n except Exception as e:\n logger.info(e)\n\n finally:\n database.close()\n logger.info('closing get_max_donation_date_list database call')",
"def GetAllRegistrationDateOfCost():\n\n logs.logger.debug(\"Start to get back all registration date of\\\n Cost objects from database.\")\n try:\n searchedCostsItems = session.query(Cost.Cost).all()\n logs.logger.info(\n \"Get back all registration date of Cost objects from database.\")\n return [CostItems.registrationDate for CostItems in searchedCostsItems]\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def fetch_review(self):\n c = self.db.cursor()\n c.execute(\"\"\"SELECT * FROM cards\n WHERE date_last_reviewed < (DATETIME('now', 'localtime', '-8 hours'))\n OR correct = 0\"\"\")\n rows = c.fetchall()\n cards = [\n Card(\n id=id,\n card_type=card_type,\n text=text,\n created=created,\n uri=uri,\n updated=updated,\n difficulty=difficulty,\n days_between=days_between,\n date_last_reviewed=date_last_reviewed,\n correct=correct,\n )\n for id, card_type, text, uri, created, updated, difficulty, days_between, date_last_reviewed, correct in rows\n ]\n cards = filter(lambda card: card.percent_overdue >= 1, cards)\n cards = sorted(cards, key=lambda card: card.percent_overdue)\n\n return cards[:20]",
"def populate_donations():\n\n logger.info('Adding donations to database...')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for donor, donations in donor_list.items():\n with database.transaction():\n for item in donations:\n donation = Donation.create(\n donation_amount=item,\n donor_name=donor\n )\n donation.save()\n logger.info('Database add successful.')\n logger.info('Printing records just added...')\n for saved_donation in Donation:\n logger.info(f'Donation in the amount of {saved_donation.donation_amount} from {saved_donation.donor_name}.')\n\n except Exception as e:\n logger.info(f'Unable to add donation to database.')\n logger.info(e)\n\n finally:\n logger.info('Closing database.')\n database.close()",
"def print_donor_list():\n print('Below are the existing donors: ')\n for donor in donors_data:\n print('\\t- ', donor[\"name\"], ' ', donor[\"donations\"])",
"def GetAllDifferentRegistrationDateOfCost():\n\n logs.logger.debug(\n \"Start to get back all different registration date of \"\n \"Cost objects from database.\")\n try:\n ListOfAllDifferentRegistrationDateOfCost = []\n searchedCostsItems = GetAllRegistrationDateOfCost()\n for item in searchedCostsItems:\n if item not in ListOfAllDifferentRegistrationDateOfCost:\n ListOfAllDifferentRegistrationDateOfCost.append(item)\n logs.logger.info(\n \"Get back all different registration date of \"\n \"Cost objects from database.\")\n return ListOfAllDifferentRegistrationDateOfCost\n except Exception as e:\n logs.logger.error(e, exc_info=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks that certain pipeline files are not modified from template output. Iterates through the pipeline's directory content and compares specified files against output from the template using the pipeline's metadata. File content should not be modified / missing.
|
def files_unchanged(self):
passed = []
failed = []
ignored = []
fixed = []
could_fix = False
# Check that we have the minimum required config
required_pipeline_config = {"manifest.name", "manifest.description", "manifest.author"}
missing_pipeline_config = required_pipeline_config.difference(self.nf_config)
if missing_pipeline_config:
return {"ignored": [f"Required pipeline config not found - {missing_pipeline_config}"]}
try:
prefix, short_name = self.nf_config["manifest.name"].strip("\"'").split("/")
except ValueError:
log.warning(
"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'"
)
short_name = self.nf_config["manifest.name"].strip("\"'")
prefix = "nf-core"
# NB: Should all be files, not directories
# List of lists. Passes if any of the files in the sublist are found.
files_exact = [
[".gitattributes"],
[".prettierrc.yml"],
["CODE_OF_CONDUCT.md"],
["LICENSE", "LICENSE.md", "LICENCE", "LICENCE.md"], # NB: British / American spelling
[os.path.join(".github", ".dockstore.yml")],
[os.path.join(".github", "CONTRIBUTING.md")],
[os.path.join(".github", "ISSUE_TEMPLATE", "bug_report.yml")],
[os.path.join(".github", "ISSUE_TEMPLATE", "config.yml")],
[os.path.join(".github", "ISSUE_TEMPLATE", "feature_request.yml")],
[os.path.join(".github", "PULL_REQUEST_TEMPLATE.md")],
[os.path.join(".github", "workflows", "branch.yml")],
[os.path.join(".github", "workflows", "linting_comment.yml")],
[os.path.join(".github", "workflows", "linting.yml")],
[os.path.join("assets", "email_template.html")],
[os.path.join("assets", "email_template.txt")],
[os.path.join("assets", "sendmail_template.txt")],
[os.path.join("assets", f"nf-core-{short_name}_logo_light.png")],
[os.path.join("docs", "images", f"nf-core-{short_name}_logo_light.png")],
[os.path.join("docs", "images", f"nf-core-{short_name}_logo_dark.png")],
[os.path.join("docs", "README.md")],
[os.path.join("lib", "nfcore_external_java_deps.jar")],
[os.path.join("lib", "NfcoreTemplate.groovy")],
]
files_partial = [
[".gitignore", ".prettierignore", "pyproject.toml"],
]
# Only show error messages from pipeline creation
logging.getLogger("nf_core.create").setLevel(logging.ERROR)
# Generate a new pipeline with nf-core create that we can compare to
tmp_dir = tempfile.mkdtemp()
# Create a template.yaml file for the pipeline creation
template_yaml = {
"name": short_name,
"description": self.nf_config["manifest.description"].strip("\"'"),
"author": self.nf_config["manifest.author"].strip("\"'"),
"prefix": prefix,
}
template_yaml_path = os.path.join(tmp_dir, "template.yaml")
with open(template_yaml_path, "w") as fh:
yaml.dump(template_yaml, fh, default_flow_style=False)
test_pipeline_dir = os.path.join(tmp_dir, f"{prefix}-{short_name}")
create_obj = nf_core.create.PipelineCreate(
None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path
)
create_obj.init_pipeline()
# Helper functions for file paths
def _pf(file_path):
"""Helper function - get file path for pipeline file"""
return os.path.join(self.wf_path, file_path)
def _tf(file_path):
"""Helper function - get file path for template file"""
return os.path.join(test_pipeline_dir, file_path)
# Files that must be completely unchanged from template
for files in files_exact:
# Ignore if file specified in linting config
ignore_files = self.lint_config.get("files_unchanged", [])
if any([f in ignore_files for f in files]):
ignored.append(f"File ignored due to lint config: {self._wrap_quotes(files)}")
# Ignore if we can't find the file
elif not any([os.path.isfile(_pf(f)) for f in files]):
ignored.append(f"File does not exist: {self._wrap_quotes(files)}")
# Check that the file has an identical match
else:
for f in files:
try:
if filecmp.cmp(_pf(f), _tf(f), shallow=True):
passed.append(f"`{f}` matches the template")
else:
if "files_unchanged" in self.fix:
# Try to fix the problem by overwriting the pipeline file
shutil.copy(_tf(f), _pf(f))
passed.append(f"`{f}` matches the template")
fixed.append(f"`{f}` overwritten with template file")
else:
failed.append(f"`{f}` does not match the template")
could_fix = True
except FileNotFoundError:
pass
# Files that can be added to, but that must contain the template contents
for files in files_partial:
# Ignore if file specified in linting config
ignore_files = self.lint_config.get("files_unchanged", [])
if any([f in ignore_files for f in files]):
ignored.append(f"File ignored due to lint config: {self._wrap_quotes(files)}")
# Ignore if we can't find the file
elif not any([os.path.isfile(_pf(f)) for f in files]):
ignored.append(f"File does not exist: {self._wrap_quotes(files)}")
# Check that the file contains the template file contents
else:
for f in files:
try:
with open(_pf(f), "r") as fh:
pipeline_file = fh.read()
with open(_tf(f), "r") as fh:
template_file = fh.read()
if template_file in pipeline_file:
passed.append(f"`{f}` matches the template")
else:
if "files_unchanged" in self.fix:
# Try to fix the problem by overwriting the pipeline file
with open(_tf(f), "r") as fh:
template_file = fh.read()
with open(_pf(f), "w") as fh:
fh.write(template_file)
passed.append(f"`{f}` matches the template")
fixed.append(f"`{f}` overwritten with template file")
else:
failed.append(f"`{f}` does not match the template")
could_fix = True
except FileNotFoundError:
pass
# cleaning up temporary dir
shutil.rmtree(tmp_dir)
return {"passed": passed, "failed": failed, "ignored": ignored, "fixed": fixed, "could_fix": could_fix}
|
[
"def validate_modified_files(self, modified_files, tag='master'): # noqa: C901\n click.secho(\"\\n================= Running validation on modified files =================\", fg='bright_cyan')\n _modified_files = set()\n for mod_file in modified_files:\n if isinstance(mod_file, tuple):\n continue\n if not any(non_permitted_type in mod_file.lower() for non_permitted_type in ALL_FILES_VALIDATION_IGNORE_WHITELIST):\n if 'releasenotes' not in mod_file.lower():\n if 'readme' not in mod_file.lower():\n _modified_files.add(mod_file)\n changed_packs = self.get_packs(_modified_files)\n for file_path in modified_files:\n old_file_path = None\n # modified_files are returning from running git diff.\n # If modified file was renamed\\moved, file_path could be a tuple containing original path and new path\n if isinstance(file_path, tuple):\n old_file_path, file_path = file_path\n file_type = find_type(file_path)\n if file_type:\n file_type = file_type.value\n pack_name = get_pack_name(file_path)\n ignored_errors_list = self.get_error_ignore_list(pack_name)\n # unified files should not be validated\n if file_path.endswith('_unified.yml'):\n continue\n print('\\nValidating {}'.format(file_path))\n self.check_for_spaces_in_file_name(file_path)\n if not checked_type(file_path):\n print_warning('- Skipping validation of non-content entity file.')\n continue\n\n if re.search(TEST_PLAYBOOK_REGEX, file_path, re.IGNORECASE):\n continue\n\n elif 'README' in file_path:\n readme_validator = ReadMeValidator(file_path, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not readme_validator.is_valid_file():\n self._is_valid = False\n continue\n\n structure_validator = StructureValidator(file_path, old_file_path=old_file_path,\n ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors, tag=tag)\n if not structure_validator.is_valid_file():\n self._is_valid = False\n\n if self.validate_id_set:\n if not self.id_set_validator.is_file_valid_in_set(file_path):\n self._is_valid = False\n\n elif checked_type(file_path, YML_INTEGRATION_REGEXES) or file_type == 'integration':\n integration_validator = IntegrationValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors,\n skip_docker_check=self.skip_docker_checks)\n if self.is_backward_check and not integration_validator.is_backward_compatible():\n self._is_valid = False\n\n if not integration_validator.is_valid_file(skip_test_conf=self.skip_conf_json):\n self._is_valid = False\n\n elif file_type == 'betaintegration':\n integration_validator = IntegrationValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors,\n skip_docker_check=self.skip_docker_checks)\n if not integration_validator.is_valid_beta_integration():\n self._is_valid = False\n\n elif checked_type(file_path, [PACKS_SCRIPT_NON_SPLIT_YML_REGEX]):\n script_validator = ScriptValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors,\n skip_docker_check=self.skip_docker_checks)\n if self.is_backward_check and not script_validator.is_backward_compatible():\n self._is_valid = False\n if not script_validator.is_valid_file():\n self._is_valid = False\n elif checked_type(file_path, PLAYBOOKS_REGEXES_LIST) or file_type == 'playbook':\n playbook_validator = PlaybookValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not playbook_validator.is_valid_playbook(is_new_playbook=False):\n self._is_valid = False\n\n elif checked_type(file_path, PACKAGE_SCRIPTS_REGEXES):\n unifier = Unifier(os.path.dirname(file_path))\n yml_path, _ = unifier.get_script_or_integration_package_data()\n # Set file path to the yml file\n structure_validator.file_path = yml_path\n script_validator = ScriptValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors,\n skip_docker_check=self.skip_docker_checks)\n if self.is_backward_check and not script_validator.is_backward_compatible():\n self._is_valid = False\n\n if not script_validator.is_valid_file():\n self._is_valid = False\n\n elif re.match(IMAGE_REGEX, file_path, re.IGNORECASE):\n image_validator = ImageValidator(file_path, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not image_validator.is_valid():\n self._is_valid = False\n\n # incident fields and indicator fields are using the same scheme.\n elif checked_type(file_path, JSON_INDICATOR_AND_INCIDENT_FIELDS):\n incident_field_validator = IncidentFieldValidator(structure_validator,\n ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not incident_field_validator.is_valid_file(validate_rn=True):\n self._is_valid = False\n if self.is_backward_check and not incident_field_validator.is_backward_compatible():\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_INDICATOR_TYPES_REGEXES):\n reputation_validator = ReputationValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not reputation_validator.is_valid_file(validate_rn=True):\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_LAYOUTS_CONTAINER_REGEXES):\n layout_validator = LayoutsContainerValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not layout_validator.is_valid_layout(validate_rn=True):\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_LAYOUT_REGEXES):\n layout_validator = LayoutValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not layout_validator.is_valid_layout(validate_rn=True):\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_DASHBOARDS_REGEXES):\n dashboard_validator = DashboardValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not dashboard_validator.is_valid_dashboard(validate_rn=True):\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_INCIDENT_TYPES_REGEXES):\n incident_type_validator = IncidentTypeValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not incident_type_validator.is_valid_incident_type(validate_rn=True):\n self._is_valid = False\n if self.is_backward_check and not incident_type_validator.is_backward_compatible():\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_CLASSIFIER_REGEXES) and file_type == 'mapper':\n error_message, error_code = Errors.invalid_mapper_file_name()\n if self.handle_error(error_message, error_code, file_path=file_path):\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_CLASSIFIER_REGEXES_5_9_9):\n classifier_validator = ClassifierValidator(structure_validator, new_classifier_version=False,\n ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not classifier_validator.is_valid_classifier(validate_rn=True):\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_CLASSIFIER_REGEXES):\n classifier_validator = ClassifierValidator(structure_validator, new_classifier_version=True,\n ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not classifier_validator.is_valid_classifier(validate_rn=True):\n self._is_valid = False\n\n elif checked_type(file_path, JSON_ALL_MAPPER_REGEXES):\n mapper_validator = MapperValidator(structure_validator, ignored_errors=ignored_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not mapper_validator.is_valid_mapper(validate_rn=True):\n self._is_valid = False\n\n elif checked_type(file_path, CHECKED_TYPES_REGEXES):\n click.secho(f'Could not find validations for file {file_path}', fg='yellow')\n\n else:\n error_message, error_code = Errors.file_type_not_supported()\n if self.handle_error(error_message, error_code, file_path=file_path):\n self._is_valid = False\n\n self.changed_pack_data = changed_packs",
"def verify_files(self):\n\n print 'Testing files...'\n for fil in self.config['files']:\n fname = fil['name']\n\n local_path = os.path.join(os.getcwd(), 'files', fname)\n remote_path = fil['remote_path']\n test_message = 'File contents match %s -> %s' % (local_path, remote_path)\n\n if self.verify_file_contents(remote_path, local_path):\n pass_message(test_message)\n else:\n self.failed = True\n fail_message(test_message)",
"def check_files(flist):\n for f in flist:\n # ignore files that are to be specified using pipeline utils\n if 'PIPELINE' not in os.path.basename(f):\n if os.path.exists(f) is False:\n raise IOError('File does not exist: {}'.format(f))",
"def test_files_are_copied_when_not_matching_pattern_but_switch_was_used(self):\n\n renderings, deletions = self._execute_mocked_task({\n 'source': 'test',\n 'target': '/tmp',\n 'delete_source_files': False,\n 'pattern': '(.*).j2',\n '--exclude-pattern': '(.*).pyc',\n '--copy-not-matching-files': True,\n '--template-filenames': False\n })\n\n flatten_list_as_str = ' '.join(renderings)\n\n with self.subTest('Check --copy-not-matching-files - the non (.*).j2 files should be just copied ' +\n 'instead of rendered'):\n\n self.assertIn('sh(cp -p \"test/test_standardlib_jinja_render_directory.py\" ' +\n '\"/tmp//test_standardlib_jinja_render_directory.py\")', renderings)\n\n with self.subTest('Check --exclude-pattern'):\n self.assertNotIn('.pyc', flatten_list_as_str)",
"def checkAllFilesGenerated(self):\n root = get_exhale_root(self)\n containmentFolder = self.getAbsContainmentFolder()\n for node in root.all_nodes:\n if node.kind in [\"enumvalue\", \"group\"]:\n continue\n gen_file_path = os.path.join(containmentFolder, node.file_name)\n self.assertTrue(\n os.path.isfile(gen_file_path),\n \"File for {kind} node with refid=[{refid}] not generated to [{gen_file_path}]!\".format(\n kind=node.kind, refid=node.refid, gen_file_path=gen_file_path\n )\n )",
"def do_file_check(self):",
"def test_all_files(self):\n _stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n call_command('resolve_static', 'test/file.txt', verbosity='0')\n sys.stdout.seek(0)\n lines = [l.strip() for l in sys.stdout.readlines()]\n finally:\n sys.stdout = _stdout\n self.assertEquals(len(lines), 2)\n self.failUnless('project' in lines[0])\n self.failUnless('apps' in lines[1])",
"def validate_all_files_schema(self):\n click.secho('\\n================= Validates all of Content repo directories according '\n 'to their schemas =================\\n', fg='bright_cyan')\n # go over packs\n for pack_name in os.listdir(PACKS_DIR):\n pack_path = os.path.join(PACKS_DIR, pack_name)\n ignore_errors_list = self.get_error_ignore_list(pack_name)\n\n if not os.path.isdir(pack_path):\n # there are files that are not directories but returned from listdir like Packs/.DS_Store\n # skip them\n continue\n\n for dir_name in os.listdir(pack_path):\n dir_path = os.path.join(pack_path, dir_name)\n\n if dir_name not in CONTENT_ENTITIES_DIRS or \\\n dir_name in [constants.DASHBOARDS_DIR]:\n continue\n\n for file_name in os.listdir(dir_path):\n file_path = os.path.join(dir_path, file_name)\n\n if os.path.isfile(file_path):\n is_yml_file = file_path.endswith('.yml') and \\\n dir_name in (constants.INTEGRATIONS_DIR,\n constants.SCRIPTS_DIR,\n constants.PLAYBOOKS_DIR) and not file_path.endswith('_unified.yml')\n\n is_json_file = file_path.endswith('.json') and \\\n dir_name not in (\n constants.INTEGRATIONS_DIR, constants.SCRIPTS_DIR, constants.PLAYBOOKS_DIR)\n\n if is_yml_file or is_json_file:\n print(\"Validating scheme for {}\".format(file_path))\n self.is_backward_check = False # if not using git, no need for BC checks\n structure_validator = StructureValidator(file_path, ignored_errors=ignore_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not structure_validator.is_valid_scheme():\n self._is_valid = False\n\n else:\n inner_dir_path = file_path\n for inner_file_name in os.listdir(inner_dir_path):\n inner_file_path = os.path.join(inner_dir_path, inner_file_name)\n\n if os.path.isfile(inner_file_path):\n is_yml_file = inner_file_path.endswith('.yml') and \\\n (f'/{constants.INTEGRATIONS_DIR}/' in inner_file_path or\n f'/{constants.SCRIPTS_DIR}/' in inner_file_path or\n f'/{constants.PLAYBOOKS_DIR}/' in inner_file_path) and \\\n not inner_file_path.endswith('_unified.yml')\n\n if is_yml_file:\n print(\"Validating scheme for {}\".format(inner_file_path))\n self.is_backward_check = False # if not using git, no need for BC checks\n structure_validator = StructureValidator(inner_file_path,\n ignored_errors=ignore_errors_list,\n print_as_warnings=self.print_ignored_errors)\n if not structure_validator.is_valid_scheme():\n self._is_valid = False",
"def validate_files(dir, files_to_merge):\r\n for path in files_to_merge:\r\n pathname = dir.joinpath(path)\r\n if not pathname.exists():\r\n raise Exception(\"I18N: Cannot generate because file not found: {0}\".format(pathname))",
"def check_generated_files(out_dir, output_list_file):\n xcpd_dir = os.path.join(out_dir, \"xcp_d\")\n found_files = sorted(glob(os.path.join(xcpd_dir, \"**/*\"), recursive=True))\n found_files = [os.path.relpath(f, out_dir) for f in found_files]\n\n # Ignore figures\n found_files = [f for f in found_files if \"figures\" not in f]\n\n with open(output_list_file, \"r\") as fo:\n expected_files = fo.readlines()\n expected_files = [f.rstrip() for f in expected_files]\n\n if sorted(found_files) != sorted(expected_files):\n expected_not_found = sorted(list(set(expected_files) - set(found_files)))\n found_not_expected = sorted(list(set(found_files) - set(expected_files)))\n\n msg = \"\"\n if expected_not_found:\n msg += \"\\nExpected but not found:\\n\\t\"\n msg += \"\\n\\t\".join(expected_not_found)\n\n if found_not_expected:\n msg += \"\\nFound but not expected:\\n\\t\"\n msg += \"\\n\\t\".join(found_not_expected)\n raise ValueError(msg)",
"def main():\n\n with open(input_license_text, 'r') as inputfile:\n input_text_string = inputfile.read()\n normalized_text_string = normalizer.normalize_template(input_text_string)\n\n for filename in os.scandir(directory):\n file_name = str(filename.path)\n file_name = file_name.replace(str(directory), '')\n\n try:\n with open(filename.path, 'r') as input_file:\n input_template_file = input_file.read()\n input_template_file = normalizer.normalize_template(input_template_file)\n\n y = NormalizeTemplate(normalized_text_string, input_template_file)\n y.normalize_template()\n normalized_template_string = y.get_normalized_template()\n normalized_text_string = y.get_normalized_text()\n except BaseException:\n continue\n\n if (compare_normalized_files(normalized_template_string, normalized_text_string)):\n\n print(\"The Text matches with the Template- \" + file_name)",
"def test_files_not_modified():\n files = [\"example_entries.json\", \"watchlist.json\", \"countries.json\"]\n\n before = [os.path.getmtime(f) for f in files]\n decide(\"example_entries.json\", \"watchlist.json\", \"countries.json\")\n after = [os.path.getmtime(f) for f in files]\n\n assert before == after",
"def test_yaml_file_watch(self):\n # Set initial data\n _setup_template_value('yaml_file_test_values.tmp.yml', 'yaml_file_test_values_1.yml')\n\n with TemplateRenderThread('yaml_file_test.t', 'yaml_file_test.tmp.out') as renderer:\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_values_expected_1.out')\n\n # Set updated data\n print('Updating file..')\n _setup_template_value('yaml_file_test_values.tmp.yml', 'yaml_file_test_values_2.yml')\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_values_expected_2.out')",
"def test_duo_yaml_files_watch(self):\n # Set initial data\n _setup_template_value('yaml_file_test_values_first.tmp.yml', 'yaml_file_test_values_1.yml')\n _setup_template_value('yaml_file_test_values_second.tmp.yml', 'yaml_file_test_values_2.yml')\n\n with TemplateRenderThread('yaml_file_test_duo.t', 'yaml_file_test_duo.tmp.out') as renderer:\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_duo_expected_1.out')\n\n # Set updated data\n print('Updating first file..')\n _setup_template_value('yaml_file_test_values_first.tmp.yml', 'yaml_file_test_values_2.yml')\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_duo_expected_2.out')\n\n # Set updated data\n print('Updating second file..')\n _setup_template_value('yaml_file_test_values_second.tmp.yml', 'yaml_file_test_values_1.yml')\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_duo_expected_3.out')",
"def test_collect_files(self):\n test_files = (os.path.join(self.data_dir, 'sdR-12345678.fits'),\n os.path.join(self.data_dir, 'sdR-01234567.fits'),\n os.path.join(self.data_dir, 'spPlate-1234-54321.fits'),\n os.path.join(self.data_dir, 'extraneous.fits'))\n for f in test_files:\n open(f, 'a').close()\n root = os.path.join(os.environ[DM], 'doc', 'examples')\n files = scan_model(root)\n files_to_regexp(self.data_dir, files)\n\n self.assertInLog(log, (\"{0}/doc/examples/badModel.rst has no file \" +\n \"regexp!\").format(os.environ[DM]))\n collect_files(self.data_dir, files)\n self.assertInLog(log, 'Extraneous file detected: {0}'.format(test_files[3]))\n for f in files:\n if os.path.basename(f.filename) == 'badModel.rst':\n self.assertIsNone(f.regexp)\n self.assertIsNone(f._prototypes)\n else:\n self.assertIsNotNone(f.regexp)\n self.assertIsNotNone(f._prototypes)\n for f in test_files:\n os.remove(f)",
"def validate(self):\n for filename in [self.rmd_file, self.blogdown_file]:\n if not path.exists(path.join(self.source_folder, filename)):\n click.secho('Missing file: {}'.format(filename), fg='red')\n sys.exit(1)",
"def check_input_files(files):\r\n\r\n for filename in files:\r\n if not os.path.exists(filename):\r\n raise CommandError(\"{} does not exist\".format(filename))",
"def test_files(self):\r\n\r\n for path in self.get_files():\r\n self.assertTrue(datetime.fromtimestamp(os.path.getmtime(path)) > self.start_time,\r\n msg='File not recently modified: %s' % os.path.basename(path))",
"def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
добавляет файлы, необходимые для выполнения пакета принимает один параметр files полностью аналогичный одноименному параметру для AddJob
|
def AddFiles(self, files, retries=1):
JobPacketInfo(self.conn, self.id).AddFiles(files, retries)
|
[
"def add_files(self, files):\n pass",
"def associateFiles(self):\n files = WMJob.getFiles(self, type=\"id\")\n\n if len(files) > 0:\n addAction = self.daofactory(classname=\"Jobs.AddFiles\")\n addAction.execute(self[\"id\"], files, conn=self.getDBConn(),\n transaction=self.existingTransaction())\n\n return",
"def addFile(self, fi):",
"def addFiles(self, file_list):\n \n # Add the files to the queue\n for file_name in file_list:\n self.file_queue.put(file_name)\n \n # Write the queue to disk\n self.saveQueue()\n \n # Upload the data\n self.uploadData()",
"def add_files(self, filenames):\n for filename in filenames:\n self.add_file(filename)",
"def addFiles(self, filePaths): \n \n for filePath in filePaths: \n self.addFile(filePath)",
"def add_files(self, paths):\n for path in paths:\n self.add_file(path)",
"def add_file(self, path):\n pass",
"async def handle_add_file(\n self, _vuid: int, _uid: int, parameters: Dict[str, Any]):\n path = Path(parameters['path'])\n if path not in self.files:\n log(f'Client[{self.number}]: Add file: {path}')\n self.files.add(path)\n self.server.add_file(path, parameters['type_name'])",
"def add_file_arg(self, file):\n self.__arguments.append(file)\n self.__input_files.append(file)",
"def update_args_with_file(files, args):\n args['files'] = {}\n for file_name in files:\n file = files[file_name]\n filename = file.filename\n args['files'][file_name] = filename\n return args",
"def add_files_to_resource(self, resource, files_to_add, upload_folder):\n for fl in files_to_add:\n uploaded_file = UploadedFile(file=open(fl, 'rb'),\n name=os.path.basename(fl))\n new_res_file = add_file_to_resource(\n resource, uploaded_file, folder=upload_folder, add_to_aggregation=False\n )\n\n # make each resource file we added part of the logical file\n self.add_resource_file(new_res_file)",
"def add_file(self, src, dst):\n self.data[\"files\"].append((src, dst))",
"def AddFiles( self, files, treeName=None, readHists=False ) :\n\n if not isinstance(files, list) :\n files = [files]\n\n if treeName is not None :\n self.chain = ROOT.TChain(treeName, self.name)\n for file in files :\n self.chain.Add(file)\n\n if readHists :\n for file in files :\n self.ofiles.append( ROOT.TFile.Open( file ) )",
"def add_files(self, files, commit_msg):\n paths = []\n for rpath in files:\n path = os.path.join(self.repodir, rpath)\n paths.append(path)\n with open(path, 'w') as f:\n f.write(files[rpath])\n if paths:\n self.git_cmd(['add'] + paths)\n self.commit(commit_msg)",
"def addFile(content, mimetype, id=None, title=''):",
"def add_files(self,count=None):\n message_buffer =[]\n if count is None:\n count = len(self.files)\n while count:\n count -= 1\n message_buffer.append((count,base64.b64encode(self.files.pop()),0)) # required to maintain compatibility with\n if len(message_buffer) > 9:\n self.queue.write_batch(message_buffer)\n message_buffer = []\n self.queue.write_batch(message_buffer)",
"def add_waveform_files(self, waveform_files):\n if isinstance(waveform_files, basestring):\n waveform_files = [waveform_files]\n for waveform_file in waveform_files:\n if not isinstance(waveform_file, basestring):\n msg = \"%s is not a filename.\" % waveform_file\n warnings.warn(msg)\n continue\n if not os.path.exists(waveform_file):\n msg = \"Warning: File %s does not exists.\"\n warnings.warn(msg)\n continue\n self.waveform_files.append(waveform_file)",
"def add_file(self, key, filename, file, is_path=True, content_type=''):\n file = open(file, 'rb').read() if is_path else file\n self.files.append((key, filename, file, content_type))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
возвращает объект для работы с очередью c именем qname (см. класс Queue)
|
def Queue(self, qname):
return Queue(self, qname)
|
[
"def get_queue(self):\n return Queue(self.name, connection=self.connection, serializer=self.serializer)",
"def get_queue(self, name):\n\t\treturn StyxQueue(self.pool, name)",
"def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)",
"def __getitem__(self, name):\n return self.Queues[name]",
"def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue",
"def get_queue():\n\n return multiprocessing.Queue()",
"def get_queue(queue_type):\n con = get_redis_connection()\n queue_type = QueueType(queue_type)\n\n queue = WorkflowQueue(queue_type.value, connection=con)\n\n return queue",
"def get_queue():\n watcher = Watcher()\n watcher.connect()\n queue = watcher.get_queue()\n return queue",
"def get_queue(self):\n return self.queue",
"def queue(self):\n\n\t\treturn self.q",
"def get_obj(self, name):\r\n val = self.get(name)\r\n if not val:\r\n return None\r\n if name.find('queue') >= 0:\r\n obj = boto.lookup('sqs', val)\r\n if obj:\r\n obj.set_message_class(ServiceMessage)\r\n elif name.find('bucket') >= 0:\r\n obj = boto.lookup('s3', val)\r\n elif name.find('domain') >= 0:\r\n obj = boto.lookup('sdb', val)\r\n else:\r\n obj = None\r\n return obj",
"def get_obj(self, name):\n val = self.get(name)\n if not val:\n return None\n if name.find('queue') >= 0:\n obj = boto.lookup('sqs', val)\n if obj:\n obj.set_message_class(ServiceMessage)\n elif name.find('bucket') >= 0:\n obj = boto.lookup('s3', val)\n elif name.find('domain') >= 0:\n obj = boto.lookup('sdb', val)\n else:\n obj = None\n return obj",
"def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)",
"def create_queue(self):\n return queue.Queue()",
"def create_queue(self, name: str):",
"def __init__(self):\n self.dequeue = DeQueue()",
"def get_queue_by_key(key):\n return subclass_by_name(key)",
"def query(self):\n uri = common.genuri('lqueue')\n queryobject = nvpquery.QOSQueueQuery(self.connection, uri)\n return queryobject",
"def get_queue(name):\n try:\n queue = sqs.get_queue_by_name(QueueName=name)\n except ClientError as error:\n raise error\n else:\n return queue"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
создает новый пакет с именем pckname priority приоритет выполнения пакета notify_emails список почтовых адресов, для уведомления об ошибках wait_tags список тэгов, установка которых является необходимым условием для начала выполнения пакета set_tag тэг, устанавливаемый по завершении работы пакеты kill_all_jobs_on_error при неудачном завершении задания остальные задания прекращают работу resetable флаг, контролирующий возможность трансляции через пакет цепочки Reset'ов (по умолчанию True) возвращает объект класса JobPacket run_as пользователь, из под которого будут запускаться задачи пакета по умолчанию
|
def Packet(self, pckname, priority=MAX_PRIORITY, notify_emails=[], wait_tags=(), set_tag=None,
check_tag_uniqueness=False, resetable=True, kill_all_jobs_on_error=True, run_as=''):
try:
if isinstance(wait_tags, str):
raise AttributeError("wrong wait_tags attribute type")
return JobPacket(self, pckname, priority, notify_emails, wait_tags, set_tag, check_tag_uniqueness, resetable,
kill_all_jobs_on_error=kill_all_jobs_on_error, packet_name_policy=self.packet_name_policy, run_as=run_as)
except xmlrpc_client.Fault as e:
if 'DuplicatePackageNameException' in e.faultString:
self.logger.error(DuplicatePackageNameException(e.faultString).message)
raise DuplicatePackageNameException(e.faultString)
else:
raise
|
[
"def prioritize(self, model: str = None, priority: bool = True):\n priority = bool(priority)\n jobs = self._find_jobs(model)\n for job in jobs:\n job.priority = priority",
"def submit_unequal_priority_jobs(default_condor, path_to_sleep):\n cluster = default_condor.submit(\n {\n \"executable\": path_to_sleep,\n \"arguments\": \"1\",\n \"universe\": \"scheduler\",\n \"log\": \"scheduler_priority-unequal.log\",\n \"priority\": \"$(process)\",\n },\n count=NUM_JOBS,\n )\n cluster.wait(condition=ClusterState.all_terminal)\n return cluster",
"def prepare_cron_jobs() -> None:\n\tcron = CronTab(user=\"root\")\n\tinterpreter_location = prepare_environment()\n\tfor cron_job_comment in [\"swap_release_job\"]:\n\t\tif not [i for i in cron.find_comment(cron_job_comment)]: # Thing returns iterable so we have to go through it to check if empty\n\t\t\tlogging.info(\"Cron jobs for running swap_release.py not found. Trying to create them\")\n\t\t\tjob = cron.new(command=f\"cd {pathlib.Path.cwd()} && {interpreter_location} {pathlib.Path.joinpath(pathlib.Path.cwd(), __file__)}\",\n\t\t\t comment=cron_job_comment) # create job\n\t\t\tif \"swap_release_job\" in cron_job_comment:\n\t\t\t\tjob.hour.every(1)\n\t\t\t\tjob.minute.on(1)\n\t\t\tjob.enable() # enable job\n\t\t\tcron.write() # close cron and write changes",
"def check_waiting_jobs(self):\n check_threshold = ((datetime.datetime.now() -\n datetime.timedelta(minutes=self.check_minutes)).\n strftime(\"%Y-%m-%dT%H:%M:%S\"))\n try:\n self.cursor.execute(\n \"select * from jobs where status = 'waiting' and \"\n \"(timestamp is NULL or timestamp < :check_threshold)\",\n {\"check_threshold\": check_threshold})\n jobrows = self.cursor.fetchall()\n except sqlite3.OperationalError:\n self.notify_admin_exception(\"Error checking waiting jobs\")\n raise\n\n for jobrow in jobrows:\n (jobid, email, build, label, runs, tcpdump, video, datazilla,\n prescript, postscript,\n status, started, timestamp) = jobrow\n self.set_job(jobid, email, build, label, runs, tcpdump,\n video, datazilla, prescript, postscript,\n status, started, timestamp)\n\n self.logger.debug(\"checking_waiting_jobs: \"\n \"jobid: %s, email: %s, build: %s, label: %s, \"\n \"runs: %s, tcpdump: %s, video: %s, datazilla: %s, \"\n \"prescript: %s, postscript: %s, status: %s, \"\n \"started: %s, timestamp: %s\" %\n (jobid, email, build, label,\n runs, tcpdump, video, datazilla,\n prescript, postscript, status,\n started, timestamp))\n try:\n buildurl = self.check_build(build)\n except:\n self.notify_admin_exception(\"Build Error\")\n self.notify_user_exception(email,\n \"Build Error\")\n self.purge_job(jobid)\n continue\n\n if buildurl:\n self.job.status = status = \"pending\"\n build = buildurl\n try:\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n self.cursor.execute(\"update jobs set build=:build, \"\n \"status=:status, timestamp=:timestamp \"\n \"where id=:jobid\",\n {\"jobid\": jobid, \"build\": build,\n \"status\": status, \"timestamp\": timestamp})\n self.connection.commit()\n self.notify_user_info(email,\n \"job is pending availability of the build.\")\n except sqlite3.OperationalError:\n self.notify_admin_exception(\"Error updating job\")\n self.notify_user_exception(email,\n \"job failed\")\n self.purge_job(jobid)",
"def test_watch_scheduling_v1_priority_class(self):\n pass",
"def check_automatic_jobs(self):\n now = datetime.datetime.now()\n for aj in self.automatic_jobs:\n aj_datetime = aj[\"datetime\"]\n aj_hour = aj[\"hour\"]\n if (now > aj_datetime and now.day != aj_datetime.day and\n now.hour >= aj_hour):\n self.create_job(aj[\"email\"],\n aj[\"build\"],\n aj[\"label\"],\n aj[\"runs\"],\n aj[\"tcpdump\"],\n aj[\"video\"],\n aj[\"datazilla\"],\n aj[\"prescript\"],\n \"\",\n aj[\"locations\"],\n aj[\"speeds\"],\n aj[\"urls\"],\n aj[\"scripts\"])\n aj[\"datetime\"] = now",
"def test_create_scheduling_v1_priority_class(self):\n pass",
"def forkoff_start( self, job_list, delay_between_submitting_jobs=5 ):\n\n if self.processes_max == None or self.processes_max < 1: # double but just to be clear.\n nTerror(\"Can't do jobs without having processes_max; processes_max: %s\" % self.processes_max )\n return []\n\n ## Check job list for variable type errors\n for job in job_list:\n func = job[0]\n args = ()\n if len(job) > 1:\n args = job[1]\n if type(args) != types.TupleType:\n nTerror(\"given argument not of type Tuple for job: %s\", job)\n return []\n if not ( type(func) == types.FunctionType or\n type(func) == types.BuiltinFunctionType or\n type(func) == types.MethodType ) :\n nTerror(\"given function not of types:\")\n nTmessage(\"(Function, BuiltinFunctionType, or MethodType) for job:\")\n print job\n nTmessage(\"In stead type is : %s\", type(func))\n return []\n\n ## Maximum number of processes to do\n self.processes_todo = len( job_list )\n if self.processes_todo == 0:\n if self.verbosity:\n nTwarning(\"No new processes to do so none to start\")\n return []\n\n if self.verbosity > 1:\n nTmessage(\"Doing %s new process(es)\" % self.processes_todo)\n\n ## Keep making processes until an uncaught Exception occurs\n ## That would be a ctrl-c or so. The ctrl-c is also caught by\n ## subprocesses within python at least.\n ## I have not found a way to use this mechanism for jobs\n ## running in the background of a now killed terminal.\n ## When I read up, I got indications that the signal handlers\n ## for INTerrupt and QUIT might be rerouted and not available\n ## anymore. A hard kill is then needed.\n ##\n try:\n self.do_jobs( job_list, delay_between_submitting_jobs )\n except KeyboardInterrupt:\n if self.verbosity:\n nTwarning(\"Caught interrupt in parent.\")\n nTwarning(\"Trying to finish up by waiting for subprocess(es)\")\n\n ## Finish waiting for subprocesses\n ## Don't make any new!\n self.processes_todo = self.processes_started\n try:\n self.do_jobs( job_list, delay_between_submitting_jobs )\n except KeyboardInterrupt:\n if self.verbosity:\n nTwarning(\"Again caught interrupt in parent. Will start to kill running jobs.\")\n if self.hard_kill_started_jobs():\n nTerror(\"Failed hard killing running jobs\")\n raise KeyboardInterrupt\n\n ## Any subprocesses left\n if self.process_d:\n key_list = self.process_d.keys()\n key_list.sort()\n for pid in key_list:\n nTerror(\"subprocesses with fid [%s] was left behind with pid [%d]\" \\\n % ( self.process_d[ pid ], pid ))\n\n ## Check all were done\n if self.processes_finished != len( job_list ):\n if self.verbosity > 1:\n nTwarning(\"only %s out of %s jobs were started (not all successfully finished perhaps)\" \\\n % ( self.processes_finished, len( job_list ) ))\n\n ## Check if all finished correctly\n if self.processes_finished != self.processes_started:\n strMsg = \"ERROR: Number of processes finished and started do not match\"\n strMsg += repr(self.processes_finished) + \" \" + repr(self.processes_started)\n raise strMsg\n\n if self.verbosity > 1:\n nTmessage(\"Finished %s out of the %s processes successfully\" \\\n % ( len( self.done_jobs_list), self.processes_todo ))\n\n ## List of job numbers that were done.\n return self.done_jobs_list",
"def test_patch_scheduling_v1_priority_class(self):\n pass",
"def testJobKilling(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n locationAction = self.daoFactory(classname=\"Locations.New\")\n locationAction.execute(\"site1\", pnn=\"T2_CH_CERN\")\n\n testWorkflow = Workflow(spec=self.specUrl, owner=\"Steve\",\n name=\"wf001\", task=self.taskName)\n testWorkflow.create()\n testFileset = Fileset(name=\"TestFileset\")\n testFileset.create()\n\n for i in range(4):\n newFile = File(lfn=\"File%s\" % i, locations=set([\"T2_CH_CERN\"]))\n newFile.create()\n testFileset.addFile(newFile)\n\n testFileset.commit()\n testSubscription = Subscription(fileset=testFileset,\n workflow=testWorkflow,\n split_algo=\"FileBased\")\n testSubscription.create()\n\n splitter = SplitterFactory()\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=testSubscription)\n jobGroup = jobFactory(files_per_job=1)[0]\n\n assert len(jobGroup.jobs) == 4, \\\n \"Error: Splitting should have created four jobs.\"\n\n testJobA = jobGroup.jobs[0]\n testJobA[\"user\"] = \"sfoulkes\"\n testJobA[\"group\"] = \"DMWM\"\n testJobA[\"taskType\"] = \"Processing\"\n testJobB = jobGroup.jobs[1]\n testJobB[\"user\"] = \"sfoulkes\"\n testJobB[\"group\"] = \"DMWM\"\n testJobB[\"taskType\"] = \"Processing\"\n testJobC = jobGroup.jobs[2]\n testJobC[\"user\"] = \"sfoulkes\"\n testJobC[\"group\"] = \"DMWM\"\n testJobC[\"taskType\"] = \"Processing\"\n testJobD = jobGroup.jobs[3]\n testJobD[\"user\"] = \"sfoulkes\"\n testJobD[\"group\"] = \"DMWM\"\n testJobD[\"taskType\"] = \"Processing\"\n\n change.persist([testJobA], \"created\", \"new\")\n change.persist([testJobB], \"jobfailed\", \"executing\")\n change.persist([testJobC, testJobD], \"executing\", \"created\")\n\n change.persist([testJobA], \"killed\", \"created\")\n change.persist([testJobB], \"killed\", \"jobfailed\")\n change.persist([testJobC, testJobD], \"killed\", \"executing\")\n\n for job in [testJobA, testJobB, testJobC, testJobD]:\n job.load()\n self.assertEqual(job['retry_count'], 99999)\n self.assertEqual(job['state'], 'killed')\n\n return",
"def lantern_jobs():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not sending Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Sending Lantern jobs\".format(x=dates.now())\n LanternApi.make_new_jobs()",
"def prepare_ss_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index=1, rank='0', extraArgs=''):\n #New: only prepares a single job which allows multiple jobs to be queued\n #We must change the ownership of each of the copasi files to the user running this script\n #\n #We assume that we have write privileges on each of the files through our group, but don't have permission to actually change ownership (must be superuser to do this)\n #Thus, we workaround this by copying the original file, deleting the original, and moving the copy back to the original filename\n \n# import shutil\n# for i in range(len(self.get_optimization_parameters())):\n# for max in (0, 1):\n# copasi_file = os.path.join(self.path, 'auto_copasi_%d.cps' % (2*i + max))\n# temp_file = os.path.join(self.path, 'temp.cps')\n# shutil.copy2(copasi_file, temp_file)\n# os.remove(copasi_file)\n# os.rename(temp_file, copasi_file)\n# os.chmod(copasi_file, 0664) #Set as group readable and writable\n \n ############\n #Build the appropriate .job files for the sensitivity optimization task, write them to disk, and make a note of their locations\n condor_jobs = []\n \n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = 'output_%d.$(Process).txt' % subtask_index\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n \n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles='',\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename",
"def notify(self):\n\n def remind():\n \"\"\"\n this function shows a pop-up using windows notification\n \"\"\"\n ntftion.notify('reminder', f\"{self.notification}:\\n{self.work_name}\\n{self.work_datetime.hour}: \"\n f\"{self.work_datetime.minute} \", app_icon='reminder.ico', timeout=3)\n\n self.eisenhower_priority()\n if self.priority:\n while dt.now().day <= self.time_ntf.day and self.status != \"done\":\n if self.priority == 1 and dt.now().time() >= self.time_ntf.time():\n remind()\n time.sleep(5*60)\n\n elif (self.priority == 2) and ((dt.now().hour == self.time_ntf.hour)\n and (dt.now().time().minute == self.time_ntf.time().minute)):\n remind()\n break\n elif self.priority == 3 and dt.now().time().hour == 18:\n remind()\n time.sleep(24 * 3600)\n elif self.priority == 4 and dt.now().weekday() == 6:\n remind()\n time.sleep(7 * 24 * 3600)\n else:\n pass",
"def create_job(self, job_received, connected_workers):\n # self.created_jobs.append(Job(job_type=job['job_type'], models=job['models'], given=job[\"given\"]))\n\n # job = Job(job_type=job_received['job_type'], models=job_received['models'], given=job_received[\"given\"])\n # for job in self.created_jobs:\n given = list(job_received['given'].keys())[0]\n model = job_received['models'][0]\n logging.info(\"Checking for compatible workers for given: \" + str(given) + \" and model(s): \" + str(model) + \"\")\n logging.info(\"Connected workers: \" + str(connected_workers.copy()))\n logging.info(\"Connected workers keys: \" + str(connected_workers.copy().keys()))\n logging.info(\"Connected workers values: \" + str(connected_workers.copy().values()))\n\n\n compatible_workers = [worker for worker in connected_workers.copy().values() if worker.given == given and model in worker.models]\n logging.info(\"Found compatible workers: \" + str(compatible_workers))\n if compatible_workers is not None:\n for worker in compatible_workers:\n\n if job_received['job_type'] == \"UPDATE\":\n connected_workers[worker.id].user_queue.put(job_received)\n connected_workers[worker.id].status = \"Updating tasks\"\n elif job_received['job_type'] == \"MAINTAIN\":\n connected_workers[worker.id].maintain_queue.put(job_received)\n connected_workers[worker.id].status = \"Updating tasks\"\n\n\n requests.post(worker.location + '/AUGWOP/task', json=job_received)\n # Otherwise, let the frontend know that the request can't be served\n else:\n logging.info(f\"Augur does not have knowledge of any workers that are capable of handing the request: \" + str(job_received) + \". \\nPlease install a worker that can serve requests given `{given}`\")\n # frontend.send_multipart([client, b\"\", b\"NO-WORKERS\"])",
"def _populate_queue_by_priority(self):\n simdocs = list(database.get_docs_by_status(DBCOLLECTIONS.SIMULATION, PROCSTATUS.PENDING,\n projection=['_id', 'priority']))\n skipped = 0\n logger.info('Checking status of {} simulation tasks'.format(len(simdocs)))\n for simdoc in list(simdocs):\n if not database.check_sim_ready(simdoc['_id']):\n skipped += 1\n simdocs.remove(simdoc)\n if skipped:\n logger.info('Skipped {} simulation jobs because prerequisites were not met'.format(skipped))\n\n logger.info('Filling simulation queue with {} new jobs'.format(len(simdocs)))\n for simdoc in simdocs:\n sim_id = simdoc['_id']\n database.update_doc_status(DBCOLLECTIONS.SIMULATION, sim_id, PROCSTATUS.QUEUED)\n\n try:\n priority = int(simdoc['priority'])\n except:\n priority = 9999\n simtask = self.SimTask(simdoc['_id'], priority=priority)\n self.sim_queue.put(simtask)",
"def schedule_jobs() -> None:\n from sni.conf import CONFIGURATION as conf\n\n modules = {\n \"sni.db.jobs\": True,\n \"sni.esi.jobs\": True,\n \"sni.sde.jobs\": True,\n \"sni.uac.jobs\": True,\n \"sni.user.jobs\": True,\n \"sni.index.jobs\": True,\n \"sni.api.jobs\": True,\n \"sni.discord.jobs\": conf.discord.enabled,\n \"sni.teamspeak.jobs\": conf.teamspeak.enabled,\n }\n logging.debug(\"Scheduling jobs\")\n for module, include in modules.items():\n if include:\n import_module(module)",
"def submitted_job_status_sync(self):\n try:\n submitted_job_status_dict = {'submit' : 'unsure','unsure': 'doubt','doubt': 'error','active': 'finished'}\n self.logging.info(\"Now syncing submitted job status\")\n self.cursor.execute(\"SELECT * FROM tasks WHERE status in ('submit','unsure','doubt','active')\")\n self.submit_tasks = self.cursor.fetchone()\n if self.submit_tasks == None:\n self.logging.info(\"No jobs with status: submit,doubt or unsure\")\n else:\n for a_task in self.submit_tasks:\n (id,command,type,drive,sysip,submittime,status,logpath,exe_time,finish_time,exception) = a_task\n cmd = str(\"ps -ef\")\n cmd_out = os.popen(cmd).readlines()\n new_status = submitted_job_status_dict[status]\n for a_line in cmd_out:\n if command in a_line:\n new_status = 'active'\n\n if new_status == 'finished': # finish_time needs to be added to the job when it finishes\n finish_time = time.strftime(\"%Y%m%d-%H%M%S\")\n self.cursor.execute('UPDATE tasks SET status = ? , finish_time = ? WHERE id= ?', (new_status, finish_time, id))\n else: # no need to add finish_time when the job is not finished\n self.cursor.execute('UPDATE tasks SET status = ? WHERE id= ?',(new_status,id))\n self.logging.info('Status for task id: ' + id + ' Changed from : '+ status + ' to: ' + new_status )\n if type == 'segy_qc'and new_status == 'finished': # management of segy_qc_lock\n if os.path.exists(os.path.join(os.getcwd(),'segy_qc_lock')):\n try:\n os.remove(os.path.join(os.getcwd(),'segy_qc_lock'))\n self.logging.info(\"SEGY QC lock removed for task id: \" + id)\n except Exception as e:\n self.logging.error(e)\n\n except Exception as e:\n self.logging.error(e)",
"def testC_CondorTest(self):\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n # Get the config and set the removal time to -10 for testing\n config = self.getConfig()\n config.BossAir.removeTime = -10.0\n config.BossAir.pluginNames.append('VanillaCondorPlugin')\n\n nJobs = 10\n\n jobDummies = self.createDummyJobs(nJobs = nJobs)\n\n baAPI = BossAirAPI(config = config)\n\n\n jobPackage = os.path.join(self.testDir, 'JobPackage.pkl')\n f = open(jobPackage, 'w')\n f.write(' ')\n f.close()\n\n sandbox = os.path.join(self.testDir, 'sandbox.box')\n f = open(sandbox, 'w')\n f.write(' ')\n f.close()\n\n jobList = []\n for j in jobDummies:\n tmpJob = {'id': j['id']}\n tmpJob['custom'] = {'location': 'malpaquet'}\n tmpJob['name'] = j['name']\n tmpJob['cache_dir'] = self.testDir\n tmpJob['retry_count'] = 0\n tmpJob['plugin'] = 'VanillaCondorPlugin'\n tmpJob['owner'] = 'mnorman'\n tmpJob['packageDir'] = self.testDir\n tmpJob['sandbox'] = sandbox\n tmpJob['priority'] = None\n jobList.append(tmpJob)\n\n\n info = {}\n #info['packageDir'] = self.testDir\n info['index'] = 0\n info['sandbox'] = sandbox\n\n\n\n baAPI.submit(jobs = jobList, info = info)\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, nJobs)\n\n newJobs = baAPI._loadByStatus(status = 'New')\n self.assertEqual(len(newJobs), nJobs)\n\n\n baAPI.track()\n\n newJobs = baAPI._loadByStatus(status = 'New')\n self.assertEqual(len(newJobs), 0)\n\n newJobs = baAPI._loadByStatus(status = 'Idle')\n self.assertEqual(len(newJobs), nJobs)\n\n # Do a second time to make sure that the cache\n # doesn't die on us\n baAPI.track()\n\n newJobs = baAPI._loadByStatus(status = 'New')\n self.assertEqual(len(newJobs), 0)\n\n newJobs = baAPI._loadByStatus(status = 'Idle')\n self.assertEqual(len(newJobs), nJobs)\n\n\n baAPI.kill(jobs = jobList)\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0)\n\n # Try resubmission\n for j in jobList:\n j['retry_count'] = 1\n\n baAPI.submit(jobs = jobList, info = info)\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, nJobs)\n\n newJobs = baAPI._loadByStatus(status = 'New')\n self.assertEqual(len(newJobs), nJobs)\n\n\n # See where they are\n baAPI.track()\n\n newJobs = baAPI._loadByStatus(status = 'New')\n self.assertEqual(len(newJobs), 0)\n\n newJobs = baAPI._loadByStatus(status = 'Idle')\n self.assertEqual(len(newJobs), nJobs)\n\n # Now kill 'em manually\n command = ['condor_rm', self.user]\n pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)\n pipe.communicate()\n\n # See what happened\n baAPI.track()\n\n newJobs = baAPI._loadByStatus(status = 'Idle')\n self.assertEqual(len(newJobs), 0)\n\n newJobs = baAPI._loadByStatus(status = 'Removed')\n self.assertEqual(len(newJobs), nJobs)\n\n # Because removal time is -10.0, jobs should remove immediately\n baAPI.track()\n\n # Assert that jobs were listed as completed\n myThread = threading.currentThread()\n newJobs = baAPI._loadByStatus(status = 'Removed', complete = '0')\n self.assertEqual(len(newJobs), nJobs)\n\n return",
"def test_watch_scheduling_v1_priority_class_list(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
возвращает объект для работы с тэгом tagname (см. класс Tag)
|
def Tag(self, tagname):
return Tag(self, tagname)
|
[
"def lookup_tag(cls, name):\n tag = cls.query.filter_by(name=name).first()\n return tag",
"def __init__(self, tag):\r\n self.tag = tag.lower()",
"def tag(tag_name, parser):\n return parser >> (lambda x: Tag(tag_name, x))",
"def get(self, tagname):\n return self.tags.setdefault(tagname, ModelTag(tagname))",
"def get_tag(name):\n return query.get_tag(name=name)",
"def get_tag(tag):\r\n from tagging.models import Tag\r\n if isinstance(tag, Tag):\r\n return tag\r\n\r\n try:\r\n if isinstance(tag, types.StringTypes):\r\n return Tag.objects.get(name=tag)\r\n elif isinstance(tag, (types.IntType, types.LongType)):\r\n return Tag.objects.get(id=tag)\r\n except Tag.DoesNotExist:\r\n pass\r\n\r\n return None",
"def get_tag(self, tag_name):\n if not tag_name:\n return None\n for tag in self._tags:\n if tag._name == tag_name:\n return tag\n pass\n new_tag = self._add_tag(tag_name)\n return new_tag",
"def get_tag_name(self):\n\n pass",
"def get_tag(self):\n #if self.key is None:\n #raise UserWarning(\"Key not set yet, use first 'put()' before you use this method.\")\n #self.icon.icon_key = self.key\n# TODO detel icon!\n return TagStructure(name=self.name,\\\n color=self.color,icon_id=getattr(self,'icon_id'))",
"def get_tag_object(self) -> Any:\n return self.tags",
"def tag(self) -> 'Tag':\n # project/lineage must exist so let's fetch it outside of try-except\n project = self.project.key\n lineage = self.lineage.key\n try:\n generation = self.key\n except self.Listing.Empty: # generation doesn't exist\n LOGGER.debug('No previous generations found - using a null tag')\n return NOTAG\n return TAGS(self.registry, project, lineage, generation)",
"def _add_tag(self, tag_name):\n tag = TagInfo()\n tag._name = tag_name\n self._tags.append(tag)\n return tag",
"def get_tag(self, *names):\n while 1:\n tok = self.get_token()\n if tok.type not in [\"starttag\", \"endtag\", \"startendtag\"]:\n continue\n if names:\n if tok.data in names:\n return tok\n else:\n return tok",
"def tag_name(self):\n return self._tag_name",
"def get_tag_name(self, ):\n return self.tag_name",
"def get_tag(self, training_dataset, name):\n return self._tags_api.get(training_dataset, name)[name]",
"def tag(self, tag_id, label=None):\n tag = self.tagset[tag_id]\n if label:\n tag = '{}-{}'.format(tag, label)\n return tag",
"def get_class_for_tag(self, tag):\r\n return self._mapping[tag]",
"def __init__(self, caption, tag):\n self._caption = caption\n self._tag = tag.replace(\" \", \"\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
возвращает объект для манипуляций с пакетом (см. класс JobPacketInfo) принимает один параметр объект типа JobPacket
|
def PacketInfo(self, packet):
pck_id = packet.id if isinstance(packet, JobPacket) \
else packet if isinstance(packet, str) \
else None
if pck_id is None:
raise RuntimeError("can't create PacketInfo instance from %r" % packet)
return JobPacketInfo(self, pck_id)
|
[
"def Packet(self, pckname, priority=MAX_PRIORITY, notify_emails=[], wait_tags=(), set_tag=None,\n check_tag_uniqueness=False, resetable=True, kill_all_jobs_on_error=True, run_as=''):\n try:\n if isinstance(wait_tags, str):\n raise AttributeError(\"wrong wait_tags attribute type\")\n return JobPacket(self, pckname, priority, notify_emails, wait_tags, set_tag, check_tag_uniqueness, resetable,\n kill_all_jobs_on_error=kill_all_jobs_on_error, packet_name_policy=self.packet_name_policy, run_as=run_as)\n except xmlrpc_client.Fault as e:\n if 'DuplicatePackageNameException' in e.faultString:\n self.logger.error(DuplicatePackageNameException(e.faultString).message)\n raise DuplicatePackageNameException(e.faultString)\n else:\n raise",
"def get(self, packet: Dict[Any, Any]) -> Optional[BaseMessage]:\n try:\n typ = self.types[packet['type']]\n except KeyError:\n raise TypeError\n\n return typ(packet)",
"def ReceiveMessageFromPacketInfo(self) -> IPPacketInformation:",
"def get_packet(self) -> Union[MessagePacket, None]:\r\n if not self._packet_queue:\r\n return None\r\n\r\n # Pop the packet from the queue\r\n packet = self._packet_queue.popleft()\r\n\r\n return packet",
"def parse_buffer(buffer):\n return Packet(buffer)\n pass",
"def get_job_data(self):\n\n return JobData(self.data)",
"async def _process_pkt(self, pkt, label, info):\n plankey = info.plankey\n instance = info.instance\n\n if pkt is NoMessages or instance.process(pkt):\n info.mark_done()\n\n try:\n result = await instance.info()\n except asyncio.CancelledError:\n raise\n except Exception as error:\n hp.add_error(self.error_catcher, error)\n return\n\n if plankey is not None:\n self.session.fill(plankey, instance.serial, result)\n\n return instance.serial, label, result",
"def getPacketObject(self):\n packetObj = {}\n packetObj[\"bytes\"] = self.printByteArray(self.byteArray)\n packetObj[\"header\"] = self.header\n packetPayload = {}\n for key in self.payload:\n if key == \"payloadBytes\":\n packetPayload[\"payloadBytes\"] = self.printByteArray(self.payload[key])\n else:\n packetPayload[key] = self.payload[key]\n packetObj[\"payload\"] = packetPayload\n return packetObj",
"def wait_packet(self, clazz):\n while True:\n pkt = PacketRegistry.decode(self.read())\n if isinstance(pkt, clazz):\n return pkt",
"def _parse_single_job(self, job_dict) -> JobModel:\n return self.single_model(**job_dict)",
"def _get_packet_by_id(self, frame_id):\n if not (0 <= frame_id <= 255):\n raise ValueError(\"Frame ID must be between 0 and 255.\")\n\n queue = self._packet_listener.get_queue()\n\n packet = queue.get_by_id(frame_id, timeout=XBeeDevice.TIMEOUT_READ_PACKET)\n\n return packet",
"def packet_from_xml_packet(xml_pkt, psml_structure=None):\n if not isinstance(xml_pkt, lxml.objectify.ObjectifiedElement):\n parser = lxml.objectify.makeparser(huge_tree=True, recover=True)\n try:\n xml_pkt = lxml.objectify.fromstring(xml_pkt, parser)\n except lxml.etree.XMLSyntaxError:\n res = re.findall(r'<field name=\"num\" pos=\"0\" show=\"(.*?)\"', xml_pkt.decode(), re.S)[0]\n print(f'Packet conversion error from xml to python object for packet number {res}.')\n return\n if psml_structure:\n return _packet_from_psml_packet(xml_pkt, psml_structure)\n return _packet_object_from_xml(xml_pkt)",
"def getDataStructsJob(self):\n job = WMJob(name=self['name'])\n\n # Transfer all simple keys\n for key in self:\n if isinstance(self.get(key), (str, bytes, int, float)):\n job[key] = self[key]\n\n for fileObj in self['input_files']:\n job['input_files'].append(fileObj.returnDataStructsFile())\n\n job['mask'] = WMMask()\n for key in self[\"mask\"]:\n job[\"mask\"][key] = self[\"mask\"][key]\n\n job.baggage = self.baggage\n\n return job",
"def from_json(cls, jason):\r\n\r\n # convert packet type name to packet Type\r\n packet_type_name = jason.get(\"PacketType\")\r\n packet_type = None\r\n if packet_type_name:\r\n packet_type = cls.NAMES_PACKETTYPE.get(packet_type_name)\r\n\r\n message_packet = cls(source_address=jason.get(\"SourceAddress\"),\r\n destination_address=jason.get(\"DestinationAddress\"),\r\n packet_type=packet_type,\r\n packet_payload=jason.get(\"PayloadBytes\"),\r\n packet_lqi=jason.get(\"PacketLqi\"),\r\n packet_doppler=jason.get(\"PacketDoppler\"),\r\n packet_timestamp_count=jason.get(\"PacketTimestampCount\")\r\n )\r\n\r\n return message_packet",
"def new_packet():\n return rtmp_packet.RtmpPacket()",
"def ghostManagerData(self, _packet, _data=[]):\n \tpkt = _packet\n \treturn pkt",
"def __init__(self, interface, packet):\n self.iface_name = interface\n self.packet = packet\n l2tester.Sender.__init__(self, interface, str(packet))",
"def get_item(self, job_id: int) -> DatabaseEntry:\n try:\n with Session(self.engine) as session:\n job_data = (\n session.query(\n Job.__table__, Project.location, JobStatus.status, JobType.type\n )\n .select_from(Job)\n .where(Job.id == job_id)\n .join(Project, Job.project_id == Project.id)\n .join(JobStatus, Job.status_id == JobStatus.id)\n .join(JobType, Job.jobtype_id == JobType.id)\n .one()\n )\n return self._row_to_entry(job_data)\n except NoResultFound:\n raise ValueError(f\"No job with id {job_id} found!\") from None",
"def getJob(self, name=None):\n if name == None: \n name = self.jobstable.get_selectedRecordNames()[0]\n if name == None:\n return None, name\n jobid = self.DB.meta.peatsa_jobs[name]\n try:\n job = PEATSA.WebApp.Data.Job(jobid, self.connection)\n except:\n #print 'job not in database'\n return None,name\n return job, name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
generates initial hidden states for each agent
|
def generate_initial_hidden_states(self, batch_size, test_mode=False, caller=None):
# Set up hidden states for all levels - and propagate through the runner!
hidden_dict = {}
hidden_dict["level1"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _
in range(self.n_agents if self.is_obs_noise(test_mode) and caller != "learner" else 1)])
hidden_dict["level2"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _
in range(len(sorted(combinations(list(range(self.n_agents)), 2)))*2
if self.is_obs_noise(test_mode) and caller != "learner" else
len(sorted(combinations(list(range(self.n_agents)), 2))))])
hidden_dict["level3"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _
in range(self.n_agents)])
if self.args.use_cuda:
hidden_dict = {_k:_v.cuda() for _k, _v in hidden_dict.items()}
return hidden_dict, "?*bs*v*t"
|
[
"def initial_agent_states(self) -> Dict[str, \"AgentState\"]:\n return self._initial_agent_states",
"def init_hidden_state(self, images):\n mean_images = images.mean(dim=1)\n h = self.init_h(mean_images) # (batch_size, decoder_dim)\n c = self.init_c(mean_images)\n return h, c",
"def _get_init_states(self):\n init1 = self.ext_builder.addInput(self.state_dims[:1], iclass=NormalInputNode)\n init2 = self.ext_builder.addInput(self.state_dims[1:], iclass=NormalInputNode)\n \n return [init1, init2]",
"def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n rnnten = initializer(shape=(self.batch, self.units))\n return rnnten",
"def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n hidden = initializer(shape=(self.batch, self.units))\n return hidden",
"def initial_biases(self):\n input_to_hidden = np.random.uniform(-1, 1, self.size[1])\n hidden_to_output = np.random.uniform(-1, 1, self.size[2])\n return [input_to_hidden, hidden_to_output]",
"def reset_agent_states(self):\n self.world.clear_agent_locs()\n\n for agent in self.world.agents:\n # Clear everything to start with\n agent.state[\"inventory\"] = {k: 0 for k in agent.inventory.keys()}\n agent.state[\"escrow\"] = {k: 0 for k in agent.inventory.keys()}\n agent.state[\"endogenous\"] = {k: 0 for k in agent.endogenous.keys()}\n # Add starting coin\n agent.state[\"inventory\"][\"Coin\"] = float(self.starting_agent_coin)\n\n # Clear everything for the planner\n self.world.planner.state[\"inventory\"] = {\n k: 0 for k in self.world.planner.inventory.keys()\n }\n self.world.planner.state[\"escrow\"] = {\n k: 0 for k in self.world.planner.escrow.keys()\n }\n\n # Place the agents randomly in the world\n for agent in self.world.get_random_order_agents():\n r = np.random.randint(0, self.world_size[0])\n c = np.random.randint(0, self.world_size[1])\n n_tries = 0\n while not self.world.can_agent_occupy(r, c, agent):\n r = np.random.randint(0, self.world_size[0])\n c = np.random.randint(0, self.world_size[1])\n n_tries += 1\n if n_tries > 200:\n raise TimeoutError\n self.world.set_agent_loc(agent, r, c)",
"def setup_initial_state(self):\n # collect the ids of vehicles in the network\n self.ids = self.vehicles.get_ids()\n self.controlled_ids = self.vehicles.get_controlled_ids()\n self.sumo_ids = self.vehicles.get_sumo_ids()\n self.rl_ids = self.vehicles.get_rl_ids()\n\n # dictionary of initial observations used while resetting vehicles after\n # each rollout\n self.initial_observations = dict.fromkeys(self.ids)\n\n # create the list of colors used to different between different types of\n # vehicles visually on sumo's gui\n #TODO: Get these colors working!\n # self.colors = {(255,0,0), (0,255,0),(0,0,255),(255,255,255)}\n self.colors = {}\n key_index = 1\n color_choice = np.random.choice(len(COLORS))\n for i in range(self.vehicles.num_types):\n self.colors[self.vehicles.types[i]] = \\\n COLORS[(color_choice + key_index) % len(COLORS)]\n key_index += 1\n\n for veh_id in self.ids:\n # set the colors of the vehicles based on their unique types\n veh_type = self.vehicles.get_state(veh_id, \"type\")\n self.traci_connection.vehicle.setColor(veh_id,\n self.colors[veh_type])\n\n # add the initial states to the vehicles class\n self.vehicles.set_edge(\n veh_id, self.traci_connection.vehicle.getRoadID(veh_id))\n self.vehicles.set_position(\n veh_id, self.traci_connection.vehicle.getLanePosition(veh_id))\n self.vehicles.set_lane(\n veh_id, self.traci_connection.vehicle.getLaneIndex(veh_id))\n self.vehicles.set_speed(\n veh_id, self.traci_connection.vehicle.getSpeed(veh_id))\n self.vehicles.set_route(\n veh_id, self.available_routes[self.vehicles.get_edge(veh_id)])\n self.vehicles.set_absolute_position(\n veh_id, self.get_x_by_id(veh_id))\n # the time step of the last lane change is always present in\n # the environment,but only used by sub-classes that apply lane\n # changing\n self.vehicles.set_state(veh_id, \"last_lc\",\n -1 * self.lane_change_duration)\n # some constant vehicle parameters\n self.vehicles.set_state(\n veh_id, \"length\",\n self.traci_connection.vehicle.getLength(veh_id))\n self.vehicles.set_state(veh_id, \"max_speed\", self.max_speed)\n\n # import initial state data to initial_observations dict\n self.initial_observations[veh_id] = dict()\n self.initial_observations[veh_id][\"type\"] = veh_type\n self.initial_observations[veh_id][\"edge\"] = \\\n self.traci_connection.vehicle.getRoadID(veh_id)\n self.initial_observations[veh_id][\"position\"] = \\\n self.traci_connection.vehicle.getLanePosition(veh_id)\n self.initial_observations[veh_id][\"lane\"] = \\\n self.traci_connection.vehicle.getLaneIndex(veh_id)\n self.initial_observations[veh_id][\"speed\"] = \\\n self.traci_connection.vehicle.getSpeed(veh_id)\n self.initial_observations[veh_id][\"route\"] = \\\n self.available_routes[self.initial_observations[veh_id][\"edge\"]]\n self.initial_observations[veh_id][\"absolute_position\"] = \\\n self.get_x_by_id(veh_id)\n\n # set speed mode\n self.set_speed_mode(veh_id)\n\n # set lane change mode\n self.set_lane_change_mode(veh_id)\n\n # save the initial state. This is used in the _reset function\n #\n route_id = \"route\" + self.initial_observations[veh_id][\"edge\"]\n pos = self.traci_connection.vehicle.getPosition(veh_id)\n\n self.initial_state[veh_id] = \\\n (self.initial_observations[veh_id][\"type\"], route_id,\n self.initial_observations[veh_id][\"lane\"],\n self.initial_observations[veh_id][\"position\"],\n self.initial_observations[veh_id][\"speed\"], pos)\n\n # collect list of sorted vehicle ids\n self.sorted_ids, self.sorted_extra_data = self.sort_by_position()\n\n # collect headway, leader id, and follower id data\n for veh_id in self.ids:\n headway = self.traci_connection.vehicle.getLeader(veh_id, 2000)\n if headway is None:\n self.vehicles.set_leader(veh_id, None)\n self.vehicles.set_headway(veh_id, 9e9)\n else:\n self.vehicles.set_leader(veh_id, headway[0])\n self.vehicles.set_headway(veh_id, headway[1])\n self.vehicles.set_follower(headway[0], veh_id)\n\n # contains the last lc before the current step\n self.prev_last_lc = dict()\n for veh_id in self.ids:\n self.prev_last_lc[veh_id] = self.vehicles.get_state(veh_id,\n \"last_lc\")\n\n # subscribe the requested states for traci-related speedups\n for veh_id in self.ids:\n self.traci_connection.vehicle.subscribe(\n veh_id, [tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION,\n tc.VAR_ROAD_ID, tc.VAR_SPEED])\n self.traci_connection.vehicle.subscribeLeader(veh_id, 2000)",
"def init_states(self):\n self.filtered_state_means = None\n self.filtered_state_covariances = None\n self.predicted_state_means = None\n self.predicted_state_covariances = None\n self.smoothed_state_means = None\n self.smoothed_state_covariances = None",
"def initialize_hidden_state(self):\n return tf.keras.initializers.Zeros()(shape=(self.batch, self.units))",
"def test_agent_infer_states(self):\n\n ''' VANILLA method (fixed point iteration) with one hidden state factor and one observation modality '''\n num_obs = [5]\n num_states = [3]\n num_controls = [1]\n A = utils.random_A_matrix(num_obs, num_states)\n B = utils.random_B_matrix(num_states, num_controls)\n\n agent = Agent(A=A, B=B, inference_algo = \"VANILLA\")\n\n o = tuple([np.random.randint(obs_dim) for obs_dim in num_obs])\n qs_out = agent.infer_states(o)\n\n qs_validation = inference.update_posterior_states(A, o, prior=agent.D)\n\n for f in range(len(num_states)):\n self.assertTrue(np.isclose(qs_validation[f], qs_out[f]).all())\n\n ''' VANILLA method (fixed point iteration) with multiple hidden state factors and multiple observation modalities '''\n num_obs = [2, 4]\n num_states = [2, 3]\n num_controls = [2, 3]\n A = utils.random_A_matrix(num_obs, num_states)\n B = utils.random_B_matrix(num_states, num_controls)\n\n agent = Agent(A=A, B=B, inference_algo = \"VANILLA\")\n\n o = tuple([np.random.randint(obs_dim) for obs_dim in num_obs])\n qs_out = agent.infer_states(o)\n\n qs_validation = inference.update_posterior_states(A, o, prior=agent.D)\n\n for f in range(len(num_states)):\n self.assertTrue(np.isclose(qs_validation[f], qs_out[f]).all())\n\n ''' Marginal message passing inference with multiple hidden state factors and multiple observation modalities '''\n num_obs = [5]\n num_states = [3]\n num_controls = [1]\n A = utils.random_A_matrix(num_obs, num_states)\n B = utils.random_B_matrix(num_states, num_controls)\n\n agent = Agent(A=A, B=B, inference_algo = \"MMP\")\n\n o = tuple([np.random.randint(obs_dim) for obs_dim in num_obs])\n qs_pi_out = agent.infer_states(o)\n\n policies = control.construct_policies(num_states, num_controls, policy_len = 1)\n\n qs_pi_validation, _ = inference.update_posterior_states_v2(A, B, [o], policies, prior = agent.D, policy_sep_prior = False)\n\n for p_idx in range(len(policies)):\n for f in range(len(num_states)):\n self.assertTrue(np.isclose(qs_pi_validation[p_idx][0][f], qs_pi_out[p_idx][0][f]).all())\n\n ''' Marginal message passing inference with multiple hidden state factors and multiple observation modalities '''\n num_obs = [2, 4]\n num_states = [2, 2]\n num_controls = [2, 2]\n A = utils.random_A_matrix(num_obs, num_states)\n B = utils.random_B_matrix(num_states, num_controls) \n\n planning_horizon = 3\n backwards_horizon = 1\n agent = Agent(A=A, B=B, inference_algo=\"MMP\", policy_len=planning_horizon, inference_horizon=backwards_horizon)\n o = [0, 2]\n qs_pi_out = agent.infer_states(o)\n\n policies = control.construct_policies(num_states, num_controls, policy_len = planning_horizon)\n\n qs_pi_validation, _ = inference.update_posterior_states_v2(A, B, [o], policies, prior = agent.D, policy_sep_prior = False)\n\n for p_idx in range(len(policies)):\n for t in range(planning_horizon+backwards_horizon):\n for f in range(len(num_states)):\n self.assertTrue(np.isclose(qs_pi_validation[p_idx][t][f], qs_pi_out[p_idx][t][f]).all())",
"def init_states(batch_size, num_lstm_layer, num_hidden):\n init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n return init_c + init_h",
"def initial_states(self):\n return list(self.iter_initial_states())",
"def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n self.start = gameState.getAgentPosition(self.index)\n\n '''\n Your initialization code goes here, if you need any.\n '''\n self.pelletCount = 0\n for opponent in self.getOpponents(gameState):\n DefaultAgent.enemyPositions[opponent] = [gameState.getInitialAgentPosition(opponent)]",
"def _get_default_initial_states(self):\n return tf.zeros([self.batch_size, 1, self.num_hidden_units])",
"def new_generate_agents(num_new_agent,agents):\n #num_new_agent = rnd.randrange(num_new_generate)\n new_agents = [Agent_man() for new_agent_id in range(num_new_agent)]\n Move.state_task_for_new_agents(new_agents)\n Decision.initial_strategy(new_agents)\n for id,agent in enumerate(new_agents):\n print(f'new_agents No.{id}, state:{agent.state}, next_state:{agent.next_state}, strategy:{agent.strategy}, next_strategy:{agent.next_strategy}, task:{agent.task}')\n agents.extend(new_agents)",
"def test_reset_agent_VANILLA(self):\n\n num_obs = [2, 4]\n num_states = [2, 3]\n num_controls = [2, 3]\n A = utils.random_A_matrix(num_obs, num_states)\n B = utils.random_B_matrix(num_states, num_controls)\n\n agent = Agent(A=A, B=B, inference_algo = \"VANILLA\")\n\n init_qs = utils.obj_array_uniform(agent.num_states)\n self.assertTrue(all( [ (agent.qs[f] == init_qs[f]).all() for f in range(agent.num_factors)]) )\n self.assertTrue(agent.curr_timestep == 0)",
"def __init__(self, mdp, discount = 0.9, iterations = 1000):\n # In the first iteration, only update the value of the first state in\n # the states list. In the second iteration, only update the value of\n # the second. Keep going until you have updated the value of each state\n # once, then start back at the first state for the subsequent iteration.\n # If the state picked for updating is terminal, nothing happens in that\n # iteration. You should be indexing into the states variable defined in\n # the code skeleton.\n # self.mdp = mdp\n # self.discount = discount\n # self.iterations = iterations\n\n\n ValueIterationAgent.__init__(self, mdp, discount, iterations)",
"def reset_hidden_state(self):\n self.hi_out_val = self.hi_out_val_default"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initializes and returns an LSL outlet
|
def initializeOutlet(interface):
info = StreamInfo('OpenBCI_EEG', 'EEG', 4, 256, 'float32', 'openbci12345')
outlet = StreamOutlet(info)
return outlet
|
[
"def connect_ls_to_lr(ls, lr, rp, rp_ip, rp_mac, db):\n ovn_nbctl(\"-- --id=@lrp create Logical_Router_port name=%s network=%s \"\n \"mac=%s -- add Logical_Router %s ports @lrp -- lsp-add %s \"\n \"rp-%s\" % (rp, rp_ip, rp_mac, lr, ls, rp), db)\n ovn_nbctl(\"set Logical-Switch-Port rp-%s type=router \"\n \"options:router-port=%s addresses=%s\" % (rp, rp, rp_mac), db)",
"def __init__(self, hass, area):\r\n\r\n self.area = area\r\n self.hass = hass\r\n self._name = f\"Area Light Control ({self.area.name})\"\r\n self._state = STATE_OFF\r\n\r\n _LOGGER.debug(f\"{self.name} Switch initializing.\")\r\n\r\n # Set attributes\r\n self._attributes = {}\r\n\r\n _LOGGER.info(f\"{self.name} Switch initialized.\")",
"def __init__ (self, scHandle):\n Greenlet.__init__(self)\n\n self.scHandle = scHandle",
"def __init__(self, label=None):\n\n self.terminals = (1,0)\n self.label = label if label is not None else \"Component\"",
"def __init__(self, *args):\n this = _ida_hexrays.new_lvar_locator_t(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, mpls_ttl=None):\n super().__init__()\n self.mpls_ttl = mpls_ttl",
"def __init__(self, ns=None):\n this = _libSALOME_LifeCycleCORBA.new_SALOME_LifeCycleCORBA(ns)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, window):\n self._ptr = lib.SDL_GL_CreateContext(window._ptr)",
"def __init__(self, hass, area):\r\n\r\n self.area = area\r\n self.hass = hass\r\n self._name = f\"Area Presence Hold ({self.area.name})\"\r\n self._state = STATE_OFF\r\n\r\n _LOGGER.debug(f\"{self.name} Switch initializing.\")\r\n\r\n self.timeout_callback = None\r\n\r\n # Set attributes\r\n self._attributes = {}\r\n\r\n _LOGGER.info(f\"{self.name} Switch initialized.\")",
"def add_lamp(self, x, y):\n \n lamp = bpy.data.objects[\"ramplamp\"].copy()\n lamp.name = \"C_Lamp.000\"\n lamp.location = (x, y,\n self.city.ground.altitude_f(x, y))\n self.city.scene.objects.link(lamp)\n lamp.parent = self.object",
"def __init__(self):\n self._resourceManager = visa.ResourceManager()",
"def __init__(self, L):\n self.__L = int(L)\n # self.__levels is a list of the circuit's levels, and is set when\n # the levels first need to be accessed.\n self.__levels = None",
"def __init__(self, root, wd, initial_color=\"red\", *args, **kwargs):\n\n # Check to see if the supplied color is valid\n if initial_color not in (\"red\", \"yellow\", \"green\"):\n raise ValueError(initial_color + \" is not a valid color\")\n\n # Create the widget frame for the light\n self.frame = ttk.Frame(root, width=wd, *args, **kwargs)\n\n self.frame.grid(row=0, column=0) # Only widget in a 1x1 grid\n # Set initial color\n self.color = initial_color\n\n # Make lamp objects and store them a dictionary keyed by\n # their color\n self.lamps = dict(zip(('red', 'yellow', 'green'), \n (CompLamp(self, wd, 0, 'red'), \n CompLamp(self, wd, 1, 'yellow'), \n CompLamp(self, wd, 2, 'green'))))\n # Turn on lamp for initial color\n self.lamps[self.color].turn_on()",
"def __init__(self, ip):\n super(SLAM, self).__init__(ip)",
"def __init__(self, test, lo_index):\n r = test.vapi.create_loopback()\n self._sw_if_index = r.sw_if_index\n super(VppLoInterface, self).__init__(test)\n self._lo_index = lo_index",
"def setupLL_Native(self):\n self.LLN_Selector = slicer.qMRMLNodeComboBox()\n self.LLN_Selector.nodeTypes = ['vtkMRMLMultiVolumeNode']\n self.LLN_Selector.noneEnabled = True\n self.LLN_Selector.setMRMLScene(slicer.mrmlScene)\n self.LLN_Selector.addEnabled = 0\n self.LLN_SelectorLabel = qt.QLabel('Native Look Locker')\n self.LLN_Selector.setToolTip(\"Select the pre contrast Look Locker to create the T1 Mapping\")\n self.InputOutput_Layout.addRow(self.LLN_SelectorLabel, self.LLN_Selector)",
"def create(self):\n uri = common.genuri('lswitch', self.lswitch_uuid, 'lport')\n return super(LSwitchPort, self)._action(\"POST\", uri)",
"def init_lvar(self, lvars):\r\n n = lvars.size()[0]\r\n id = Variable(torch.LongTensor([GNN_IDS[\"lvar\"]]))\r\n if self._cuda:\r\n id = id.cuda()\r\n emb = self.embedding(id).repeat(n, 1)\r\n return emb",
"def setupLL_Enhanced(self):\n self.LLE_Selector = slicer.qMRMLNodeComboBox()\n self.LLE_Selector.nodeTypes = ['vtkMRMLMultiVolumeNode']\n self.LLE_Selector.noneEnabled = True\n self.LLE_Selector.setMRMLScene(slicer.mrmlScene)\n self.LLE_Selector.addEnabled = 0\n self.LLE_SelectorLabel = qt.QLabel('Enhanced Look Locker')\n self.LLE_Selector.setToolTip(\"Select the post contrast Look Locker to create the T1 Mapping\")\n self.InputOutput_Layout.addRow(self.LLE_SelectorLabel, self.LLE_Selector)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function recursively builds a string of manager to employee relationships starting from the managers that do not have managers.
|
def findHierarchy(self):
def __recursiveHelper(key_name, output, indent):
if key_name in self.relations:
for employee in self.relations[key_name].employees:
output += " " * indent + str(employee) +"\n"
# return __recursiveHelper(employee, output, indent+1)
__recursiveHelper(employee, output, indent+1)
else:
print(output)
return output
#experimenting with Iter() and next() iterators/generators
#and a while loop in the recursive function:
# def __recursiveHelper(key_name, output, indent):
# if key_name in self.relations:
# employees = iter(self.relations[key_name].employees)
# employee = next(employees, "stop")
# while employees and employee != 'stop':
# output += " " * indent + str(employee) +"\n"
# __recursiveHelper(next(employees, "stop"), output, indent+1)
# else:
# employee = next(employees, "stop")
#
# else:
# return output
output = ""
indent = -1
# self.relations is a dictionary of manager-name string keys.
# The employees of None are the top-ranking managers.
# only issue:
# having trouble returning the concatenated output
# from the recursive function:
return __recursiveHelper(None, output, indent+1)
|
[
"def recursive_str(root, res, tab):\n for node in root.childs:\n if len(node.childs) == 0:\n res.append(tab + node.name)\n else:\n res.append(tab + '{')\n Tree.recursive_str(node, res, tab + '\\t')\n res.append(tab + '}')",
"def generate_full_chain(chain):\n list_of_subchains = [extract_amino_acids(subchain) for subchain in chain]\n # Join list into single string separated by spaces\n return ' '.join(list_of_subchains)",
"def dump_rels(person, relationships=[], visited=[]):\n\n if person in visited:\n return relationships, visited\n\n visited.append(person)\n\n d_rels = list(RelationshipAssertion.objects.filter(person=person))\n i_rels = list(RelationshipAssertion.objects.filter(related_person=person))\n\n if not d_rels and not i_rels:\n return relationships, visited\n\n if d_rels:\n relationships += d_rels\n if i_rels:\n relationships += i_rels\n\n for rel in d_rels:\n dump_rels(rel.related_person, relationships, visited)\n\n for rel in i_rels:\n dump_rels(rel.person, relationships, visited)\n\n return relationships, visited",
"def get_movie_people_relation():\r\n for item in title:\r\n for key in people_dict.keys():\r\n for movie_title in people_dict[key]:\r\n if item == movie_title:\r\n if item in movie_people_dict.keys():\r\n if key not in movie_people_dict[item]:\r\n movie_people_dict[item] += ',' + key\r\n else:\r\n movie_people_dict[item] = key\r\n else:\r\n if item not in movie_people_dict.keys():\r\n movie_people_dict[item] = ''\r\n return movie_people_dict",
"def __str__(self):\n output = []\n for parent in sorted(self._byparent):\n for rule in sorted(self._byparent[parent]):\n output.append(str(rule))\n return '\\n'.join(output)",
"def phone_dir_nav():\n\n emps = Employee.query.all()\n\n for emp in emps: # [<Emp>, <Emp>]\n if emp.dept is not None:\n print(emp.name, emp.dept.dept_code, emp.dept.phone)\n else:\n print(emp.name, \"-\", \"-\")",
"def get_movie_people_relation(title, people_dict, movie_people_dict):\n for item in title:\n for key in people_dict.keys():\n for movie_title in people_dict[key]:\n if item == movie_title:\n if item in movie_people_dict.keys():\n if key not in movie_people_dict[item]:\n movie_people_dict[item] += ',' + key\n else:\n movie_people_dict[item] = key\n else:\n if item not in movie_people_dict.keys():\n movie_people_dict[item] = ''\n return movie_people_dict",
"def expand_paths_by_nodes(self, paths):\n paths_formatted = set()\n # Expand each path\n for path in paths:\n if len(path) < 2:\n continue\n expanded_paths = set()\n if self.include_entity:\n relations_for_each_step = [[path[0]]]\n else:\n relations_for_each_step = []\n for index in range(1, len(path)):\n node1 = path[index-1]\n node2 = path[index]\n if (node1, node2) in self.pair_to_relations:\n relations = self.pair_to_relations[(node1, node2)]\n else:\n print(node1, node2)\n relations_for_each_step.append(relations)\n if self.include_entity:\n relations_for_each_step.append([node2])\n expanded_paths.update(list(itertools.product(*relations_for_each_step)))\n paths_formatted.update(expanded_paths)\n return paths_formatted",
"def gap_readable_relations(group):\n g = copy.deepcopy(group)\n for i in range(len(g[1])):\n for j in range(len(g[1][i])):\n g[1][i][j] = 'g.'+str(int(g[1][i][j][1])+1)+g[1][i][j][2:]\n rel_string_list = ['*'.join(rel) for rel in g[1]]\n rel_single_string = '['+', '.join(rel_string_list)+']'\n return rel_single_string",
"def _return_string_all_descendants_rec(self, node, string, level):\n if len(node.get_children()) == 0:\n return string\n else:\n level += 1\n for child in node.get_children():\n string += \"| \"*level\n string += \"|---\" + str(child) + \"\\n\"\n string = self._return_string_all_descendants_rec(child, string, level)\n return string",
"def _create_intermediate_nodes(self, name):\n hierarchy = self._split_node_name(name, self.root_name)\n node_tree = [\n self.root_name+\n self._node_separator+\n self._node_separator.join(hierarchy[:num+1])\n for num in range(len(hierarchy))\n ]\n iobj = [\n (child[:child.rfind(self._node_separator)], child)\n for child in node_tree if child not in self._db\n ]\n for parent, child in iobj:\n self._db[child] = {\n 'parent':parent, 'children':[], 'data':[]\n }\n self._db[parent]['children'] = sorted(\n self._db[parent]['children']+[child]\n )",
"def build_clique(nodes:tuple) -> str:\n def gen():\n for s, t in itertools.permutations(nodes, 2):\n yield 'rel({},{}).'.format(s,t)\n return ''.join(gen())",
"def get_common_manager(filename):\n # Get a dict of {emp: manager, }\n emp_manager_dict = {}\n\n # Read the file in\n with open(filename) as f:\n lines = f.read().splitlines()\n\n # get the fixed lines\n count = lines[0]\n emp_a = lines[1]\n emp_b = lines[2]\n\n # Get the hierachy of manager employee\n for line in lines[3:]:\n manager, emp = line.split(' ')\n emp_manager_dict[emp] = manager\n\n # Use function below to get the chain of command for each employee in question\n h1 = get_hierachy_for_emp(emp_a, emp_manager_dict)\n h2 = get_hierachy_for_emp(emp_b, emp_manager_dict)\n\n # Compare the arrays from top manager to employee\n # Keep a track of last match and stop when hierachies diverge\n match = None\n for i in range(0, min(len(h1), len(h2))):\n # If current employee in both chains is the same and not equal to either employee then record a match\n if h1[i] == h2[i] and h1[i] != emp_a and h2[i] != emp_b:\n match = h1[i]\n # else no match, break\n else:\n break\n\n return match",
"def homogenize(stuff):\n l = []\n ll = [l]\n stuff = list(stuff[:])\n d = None\n while len(stuff):\n element = stuff[0]\n delement = depth(element)\n if delement == d or not l:\n l.append(element)\n else:\n l = [element]\n ll.append(l)\n d = delement\n stuff = stuff[1:]\n return join(*ll)",
"def generateAlignmentStrings(self):\n\n # Step 1: assign node IDs to columns in the output\n # column_index[node.ID] is the position in the toposorted node list\n # of the node itself, or the earliest node it is aligned to.\n column_index = {}\n current_column = 0\n\n ni = self.nodeiterator()\n for node in ni():\n other_columns = [column_index[other] for other in node.alignedTo\n if other in column_index]\n if len(other_columns) > 0:\n found_idx = min(other_columns)\n else:\n found_idx = current_column\n current_column += 1\n\n column_index[node.ID] = found_idx\n\n ncolumns = current_column\n\n # Step 2: given the column indexes, populate the strings\n # corresponding to the sequences inserted in the graph\n seqnames = []\n alignstrings = []\n for label, start in zip(self.__labels, self.__starts):\n seqnames.append(label)\n curnode_id = start\n charlist = ['-']*ncolumns\n while curnode_id is not None:\n node = self.nodedict[curnode_id]\n charlist[column_index[curnode_id]] = node.base\n curnode_id = node.nextNode(label)\n alignstrings.append(\"\".join(charlist))\n\n # Step 3: Same as step 2, but with consensus sequences\n consenses = self.allConsenses()\n for i, consensus in enumerate(consenses):\n seqnames.append('Consensus'+str(i))\n charlist = ['-']*ncolumns\n for path, base in zip(consensus[0], consensus[1]):\n charlist[column_index[path]] = base\n alignstrings.append(\"\".join(charlist))\n\n return list(zip(seqnames, alignstrings))",
"def make_to_string(front, mid, back, empty_repr):\n \"*** YOUR CODE HERE ***\"\n def printer(lnk):\n if lnk == Link.empty:\n return empty_repr\n else:\n return front + str(lnk.first) + mid + printer(lnk.rest) + back\n return printer",
"def printSocialGraph(memberM):\n\tfriendsatdepth={0:[memberM]}\t\t\t\t\t\t\t\t#due to the way that I am printing these object out, memberM needed to be in a list\n\temailaddressesused=set()\t\t\t\t\t\t\t\t\t#a new, empty set to hold all of the email addresses that have already been used.\n\temailaddressesused.add(memberM.email)\t\t\t\t\t\t#adding the first email to that list so that we don't loop back on ourselves\n\t\n\temailsbefore=thisdepth=0\n \twhile (emailsbefore != len(emailaddressesused)):\t\t\t#This loop is where we actually start building our output\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#If there are no email addresses added at any level, then we have found the maximum depth\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\tof the users network\n \t\temailsbefore=len(emailaddressesused)\n\t\tnextdepth=thisdepth+1\n\t\tfriendsatdepth[nextdepth]=[]\n\t\tfor eachfriend in friendsatdepth[thisdepth]:\n\t\t\tfriendsatdepth[nextdepth]+=getFriendsof(eachfriend, emailaddressesused)\n\t\tthisdepth+=1 \n\n\tfor eachlevel in friendsatdepth.keys():\t\t\t\t\t\t#This loop is where we start printing the output\n\t\tfor efriend in friendsatdepth[eachlevel]:\n\t\t\tprint(str(eachlevel)+\"\\t\"+efriend.name+\"\\t\"+efriend.email)",
"def get_maze_string(self):\n res = '' # result string\n for y in reversed(range(-1, self.size)):\n # +---+---+---+---+\n res += '+' # first pillar\n for x in range(self.size):\n # horizontal wall\n if not self.known(x, y, Maze.North):\n res += ' . '\n elif self.wall(x, y, Maze.North):\n res += '---'\n else:\n res += ' '\n res += '+' # pillar\n res += '\\n'\n # | | | G | |\n if y == -1:\n break\n res += '|' # first wall\n for x in range(self.size):\n # cell space\n if [x, y] in self.start:\n res += ' S '\n elif [x, y] in self.goals:\n res += ' G '\n else:\n res += ' '\n # vertical wall\n if not self.known(x, y, Maze.East):\n res += '.'\n elif self.wall(x, y, Maze.East):\n res += '|'\n else:\n res += ' '\n res += '\\n'\n return res",
"def str_reverse_recur(node):\n\n if node == None:\n return \"\"\n else:\n return LinkedList.str_reverse_recur(node.next) + \" \" + str(node.item)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract zipfile to a directory if password is correct.
|
def extractfile(file, passwd):
try:
zipf = zipfile.ZipFile(file)
zipf.extractall(path=os.path.join(file[:-4]), pwd=str.encode(passwd))
print('Password: {}'.format(passwd))
except:
pass
|
[
"def extract_zipfile(filename, extract_dir):\n util.file.maybe_rmtree(extract_dir)\n shutil.unpack_archive(filename, extract_dir, 'zip')",
"def extract_zip(zipfile, output_dir):\n output_dir = Path(output_dir)\n if zipfile.testzip() is None:\n for m in zipfile.namelist():\n fldr, name = re.split(\"/\", m, maxsplit=1)\n if name:\n content = zipfile.open(m, \"r\").read()\n with open(str(output_dir / name), \"wb\") as out:\n out.write(content)",
"def extract_7z_to_directory(path, directory, password=None):\r\n with py7zr.SevenZipFile(path, mode='r', password=password) as z:\r\n # password_protected = z.password_protected\r\n # needs_password = z.needs_password()\r\n # names = z.getnames()\r\n z.extractall(directory)\r\n return None",
"def _unpack_zipfile(filename, extract_dir):\n try:\n import zipfile\n except ImportError:\n raise ReadError('zlib not supported, cannot unpack this archive.')\n\n if not zipfile.is_zipfile(filename):\n raise ReadError(\"%s is not a zip file\" % filename)\n\n zip = zipfile.ZipFile(filename)\n try:\n for info in zip.infolist():\n name = info.filename\n\n # don't extract absolute paths or ones with .. in them\n if name.startswith('/') or '..' in name:\n continue\n\n target = os.path.join(extract_dir, *name.split('/'))\n if not target:\n continue\n\n _ensure_directory(target)\n if not name.endswith('/'):\n # file\n data = zip.read(info.filename)\n f = open(target, 'wb')\n try:\n f.write(data)\n finally:\n f.close()\n del data\n finally:\n zip.close()",
"def extract_update(update_archive, destination, password=None):\r\n with contextlib.closing(zipfile.ZipFile(update_archive)) as archive:\r\n if password:\r\n archive.setpassword(password)\r\n archive.extractall(path=destination)\r\n logger.debug(\"Update extracted\")\r\n return destination",
"def unzipfile(filename, passcode):\n # Password is SHA-256 hash of the pass code received\n password = hashlib.sha256(passcode.encode('utf-8')).hexdigest()\n # Unzip with password\n with ZipFile(filename) as zf:\n zf.extractall(pwd=bytes(password, 'utf-8'))",
"def extract_zip_contents(zip_file, destination):\n logging.info(\"Extracting ZIP File\")\n if os.path.isfile(zip_file):\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(destination)\n else:\n logging.error(\"%s not found.\", zip_file)\n sys.exit(\"ZIP is not the filesystem.\")",
"def test_unpack_zip_success(self) -> None:\n files = [\n \"regular_file1.txt\",\n os.path.join(\"dir\", \"dir_file1.txt\"),\n os.path.join(\"dir\", \"..\", \"dir_file2.txt\"),\n ]\n test_zip = self.make_zip_file(\"test_zip.zip\", files)\n unzip_file(test_zip, self.tempdir)",
"def extract_zip(zip_path, target_folder):\n with zipfile.ZipFile(zip_path) as archive:\n archive.extractall(target_folder)",
"def _extract_zip(zipfile, dest=None):\n tempObj = None\n if bool(dest) is False:\n import tempfile\n tempObj = tempfile.TemporaryDirectory()\n dest = tempObj.name\n from zipfile import ZipFile\n with ZipFile(zipfile, 'r') as zf:\n zf.extractall(dest)\n return tempObj",
"def extract_and_clean(zipper, zip_path, filename):\n zipper.extract(zip_path)\n if \"/\" in zip_path :\n os.rename(zip_path, filename)\n shutil.rmtree(zip_path.split('/')[0])",
"def extract_zip(archive, outdir=None):\n zip = ZipFile(archive, \"r\")\n if outdir is None:\n outdir = os.getcwd()\n zip.extractall(outdir)\n zip.close()",
"def extract_zip_file(download_location, extract_location):\n zip_ref = zipfile.ZipFile(download_location, \"r\")\n zip_ref.extractall(extract_location)\n zip_ref.close()",
"def SshExtractZip(host, zipname, dst):\n command = ['ssh', host, 'unzip', '-o', '-d', dst, zipname]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh unzip -o -d \"%s\" \"%s\" on \"%s\" (%s)' %\n (dst, zipname, host, result))\n\n # unzip will create directories with access 700, which is not often what we\n # need. Fix the permissions for the whole archive.\n command = ['ssh', host, 'chmod', '-R', '755', dst]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh chmod -R 755 \"%s\" on \"%s\" (%s)' %\n (dst, host, result))",
"def test_extract_known_zip():\n setup_functions()\n zip_known = raw_dir_files.format(file_name='zip_known')\n expected = test_engine.extract_zip(achive_zip,\n archivedir_write_path=zip_known,\n file_name='sample_zip.csv')\n\n assert ['sample_zip.csv'] == expected\n assert os.path.exists(os.path.join(\n raw_dir_files.format(file_name='zip_known'), 'sample_zip.csv'))\n os.system(\"rm -r {}\".format(zip_known))",
"def __extract_zip(self):\n archive_binaries_dir = None\n zip_file = zipfile.ZipFile(self.archive)\n try:\n extract_dir = tempfile.mkdtemp()\n archive_binaries_dir = self.__create_extraction_dir(\n zip_file.namelist(), extract_dir, zip_file.extract)\n finally:\n zip_file.close()\n return archive_binaries_dir, extract_dir",
"def getzip(self, url, zipfile, unzipdir):\n done_file = os.path.join(unzipdir, '.'+os.path.basename(zipfile)+'.done')\n if my.file_exists(done_file):\n self.print_log('{} already extracted; skipping. To reinstall \"rm {}\"'.format(os.path.basename(zipfile), done_file))\n else:\n self.print_log('Downloading {} as .'.format(url, zipfile))\n with closing(get(url, stream=True)) as r:\n with open(zipfile, 'wb') as fd:\n for chunk in r.iter_content():\n fd.write(chunk)\n self.print_log('Extracting into {}.'.format(unzipdir))\n with ZipFile(zipfile, 'r') as zip:\n zip.extractall(unzipdir)\n os.remove(zipfile)\n with open(done_file, 'w'):\n pass",
"def zip_extraction_directory( file_path, file_name ):\n files = [ filename for filename in os.listdir( file_path ) if not filename.endswith( '.zip' ) ]\n if len( files ) > 1:\n return os.path.abspath( file_path )\n elif len( files ) == 1:\n # If there is only on file it should be a directory.\n if os.path.isdir( os.path.join( file_path, files[ 0 ] ) ):\n return os.path.abspath( os.path.join( file_path, files[ 0 ] ) )\n raise ValueError( 'Could not find directory for the extracted file %s' % os.path.abspath( os.path.join( file_path, file_name ) ) )",
"def fromZip(self, zip_location,extract_location):\n zip_file = zipfile.ZipFile(zip_location,'r')\n zip_file.extractall(extract_location)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate Profit of Order
|
def calculate_profit(self):
|
[
"def profit(self) -> float:\n total_profit = 0.\n for s in self.book:\n total_profit += s.profit_\n return total_profit",
"def profit(self):\n retail_value = 0\n wholesale_value = 0\n for bike in self.sold:\n retail_value += bike.total_cost() + (\n self.retail_margin * bike.total_cost())\n wholesale_value += bike.total_cost()\n return retail_value - wholesale_value",
"def profit(self):\n\t\tprofits = margin*number_sold",
"def profit(self) -> Dec:\n return self.wealth() - self.initial_wealth",
"def profit_and_loss(self):\n return sum(\n [e.price * e.quantity for entries_lst in self.trace for e in entries_lst]\n )",
"def profit_per_item(self, pk=None):\n total_profit = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit = total_paid - total_cost\n return total_profit",
"def get_profit(self):\n # Profit from previous transactions\n values = [t['value'] for t in self.transactions]\n\n profits = []\n base = None\n for v in values:\n if not base:\n base = v\n profit = v - base\n profits.append(profit)\n base = v\n\n return np.array(profits).sum()\n\n # Get all values to get profit\n #return np.array([ s['value'] for s in self.states ]).mean()",
"def profit(self, prices, dividends):\n assert len(prices) == len(dividends) + 1\n capital_profit = prices[-1] - prices[0]\n dividend_profit = self._dividend_profit(dividends, prices)\n return capital_profit + dividend_profit",
"def calc_profit(self, assignment):\n return sum([self.profit(agent, task)\n for agent, tasks in assignment.items() \n for task in tasks])",
"def envisaged_profit(self):\n profit = round(\n self.calcul_buy_nb_action() * self.take_profit - self.investment_price(),\n 2,\n )\n percent_profit = round(profit * 100 / self.capital, 2)\n return profit, percent_profit",
"def Net_accounting_profit(Revenues, Other_revenues, Expenses, Net_accounting_profit):\n Net_accounting_profit = Revenues + Other_revenues - Expenses\n return Net_accounting_profit",
"def operating_profit(self):\n return self._operating_profit",
"def profit_before_tax(self):\n return self._profit_before_tax",
"def get_profit(self):\n\n return int(self.init_profit * self.amount)",
"def calculate_profit(route, orderbook, pairs):\n amount = Decimal(1)\n\n def price_for(pair, trade_type):\n book = orderbook['_'.join(pair)]\n price, available = book[trade_type][0]\n return price\n\n for primary, secondary in zip(route[:-1], route[1:]):\n\n pair = (primary, secondary)\n\n if pair in pairs: # BUY\n price = price_for(pair, 'asks')\n amount = amount / Decimal(price) * (1 - TAKER_FEE)\n\n else: # SELL\n pair = (secondary, primary)\n price = price_for(pair, 'bids')\n amount = amount * Decimal(price) * (1 - TAKER_FEE)\n\n return amount",
"def calculate_profit(price_ago, current_price):\n profit = (current_price - price_ago) / float(price_ago) * 100\n return profit",
"def Total_taxable_profit(Net_accounting_profit, Total_additions_to_GP, Total_taxable_profit):\n Total_taxable_profit = Net_accounting_profit + Total_additions_to_GP\n return Total_taxable_profit",
"def profit(self, token: Address) -> Wad:\n return sum(map(lambda s: s.target_amount, filter(lambda s: s.target_token == token, self.steps)), Wad(0)) \\\n - sum(map(lambda s: s.source_amount, filter(lambda s: s.source_token == token, self.steps)), Wad(0))",
"def profit(self, startDate: str, endDate: str, annualized = False):\n if not annualized: # Regular profit function\n profit = 0\n for stockTicker in self.stocks:\n buy = self.market.stocks[stockTicker].prices[startDate]\n sell = self.market.stocks[stockTicker].prices[endDate]\n profit += sell - buy # Basic profit function\n if profit <= 0:\n print('Yikes! Next time use Fintual :S') # Haha!\n return profit\n else: # Annualized profit function\n # Obtains the relative revenue\n initialCapital = 0\n finalCapital = 0\n for stockTicker in self.stocks:\n initialCapital += self.market.stocks[stockTicker].prices[startDate]\n finalCapital += self.market.stocks[stockTicker].prices[endDate]\n revenue = (finalCapital - \n initialCapital) / initialCapital # == Relative revenue\n # Obtains the float fraction of years between two dates\n startDate = datetime.strptime(startDate, '%Y-%m-%d')\n endDate = datetime.strptime(endDate, '%Y-%m-%d')\n years = endDate - startDate\n yearFraction = int(years.days) / 365 # == Relative time\n # Obtains the annualized revenue\n annualizedRevenue = (((1 + revenue)**(1 / yearFraction)) - 1) \n return round(annualizedRevenue, 2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Removes a service from a list of existing services.
|
def RemoveServiceFromEndpoints(service_name, services):
new_services = []
if not isinstance(services, list):
return new_services
# TODO(user): Consider throwing an exception if the service is not
# already configured in the list of endpoints.
for service in services:
if not isinstance(service, dict) or 'name' not in service:
raise exceptions.ToolException(ValueError(
'Services are expected to be service dicts!'))
if service['name'] != service_name:
new_services.append(service)
return new_services
|
[
"def DeleteServices(self):\n for service in self.services.values():\n service.Delete()",
"def remove_from_service(self):\n pass",
"def remove(self, service):\n os.remove(os.path.join(self.directory, service))",
"def _purge_deleted_services(self):\n base_url = self.bleemeo_base_url\n service_url = urllib_parse.urljoin(base_url, '/v1/service/')\n\n deleted_services_from_state = (\n set(self.services_uuid) - set(self.core.services)\n )\n for key in deleted_services_from_state:\n service_uuid = self.services_uuid[key]['uuid']\n response = requests.delete(\n service_url + '%s/' % service_uuid,\n auth=(self.agent_username, self.agent_password),\n headers={\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': self.core.http_user_agent,\n },\n timeout=REQUESTS_TIMEOUT,\n )\n if response.status_code not in (204, 404):\n logging.debug(\n 'Service deletion failed. Server response = %s',\n response.content\n )\n continue\n del self.services_uuid[key]\n self.core.state.set_complex_dict(\n 'services_uuid', self.services_uuid\n )\n\n services = api_iterator(\n service_url,\n params={'agent': self.agent_uuid},\n auth=(self.agent_username, self.agent_password),\n headers={'User-Agent': self.core.http_user_agent},\n )\n\n services_registred = set()\n for data in services:\n services_registred.add(data['id'])\n\n deleted_services = []\n for key in list(self.services_uuid.keys()):\n (service_name, instance) = key\n entry = self.services_uuid[key]\n if entry is None or entry['uuid'] in services_registred:\n continue\n\n del self.services_uuid[key]\n deleted_services.append(key)\n\n self.core.state.set_complex_dict(\n 'services_uuid', self.services_uuid\n )\n\n if deleted_services:\n logging.debug(\n 'API deleted the following services: %s',\n deleted_services\n )\n self.core.update_discovery(deleted_services=deleted_services)",
"def DeleteServices(api_client, services):\n errors = {}\n for service in services:\n try:\n with console_io.ProgressTracker('Deleting [{0}]'.format(service.id)):\n api_client.DeleteService(service.id)\n except (calliope_exceptions.HttpException, operations.OperationError,\n operations.OperationTimeoutError) as err:\n errors[service.id] = str(err)\n\n if errors:\n printable_errors = {}\n for service_id, error_msg in errors.items():\n printable_errors[service_id] = '[{0}]: {1}'.format(service_id,\n error_msg)\n raise ServicesDeleteError(\n 'Issue deleting {0}: [{1}]\\n\\n'.format(\n text.Pluralize(len(printable_errors), 'service'),\n ', '.join(printable_errors.keys())) +\n '\\n\\n'.join(printable_errors.values()))",
"def delete_service(self, service_id):\n raise exception.NotImplemented() # pragma: no cover",
"def removeService(self, name):\n\n # locate the service in the tuple\n for groups in self.pinGroups:\n if group[0] == str(name):\n # replace the service name with unused\n group[0] = \"unused\"\n break",
"def delete_service_type_with_services(\n client: SymphonyClient, service_type: ServiceType\n) -> None:\n service_type_with_services = ServiceTypeServicesQuery.execute(\n client, id=service_type.id\n )\n if not service_type_with_services:\n raise EntityNotFoundError(entity=Entity.ServiceType, entity_id=service_type.id)\n services = service_type_with_services.services\n for service in services:\n RemoveServiceMutation.execute(client, id=service.id)\n RemoveServiceTypeMutation.execute(client, id=service_type.id)\n del SERVICE_TYPES[service_type.name]",
"def delete_service(self, id):\n raise NotImplementedError()",
"def _unregister_services(self):\n for service in self._services:\n self._dxl_client.unregister_service_sync(service, self.DXL_SERVICE_REGISTRATION_TIMEOUT)",
"def service_delete(container, sysdir=constants.SYSTEMD_DIR, log=None):\n log = log or common.configure_logging(__name__)\n # prefix is explained in the service_create().\n service = 'tripleo_' + container\n\n sysd_unit_f = systemctl.format_name(service)\n sysd_health_f = systemctl.format_name(service + '_healthcheck')\n sysd_timer_f = service + '_healthcheck.timer'\n sysd_health_req_d = sysd_unit_f + '.requires'\n\n for sysd_f in sysd_unit_f, sysd_health_f, sysd_timer_f:\n if os.path.isfile(sysdir + sysd_f):\n log.debug('Stopping and disabling systemd service for %s' %\n service)\n try:\n systemctl.stop(sysd_f)\n systemctl.disable(sysd_f)\n except systemctl.SystemctlException:\n log.exception(\"systemctl failed\")\n raise\n log.debug('Removing systemd unit file %s' % sysd_f)\n os.remove(sysdir + sysd_f)\n else:\n log.info('No systemd unit file was found for %s' % sysd_f)\n\n # Now that the service is removed, we can remove its \".requires\"\n if os.path.exists(os.path.join(sysdir, sysd_health_req_d)):\n log.info('Removing healthcheck require for %s' % service)\n shutil.rmtree(os.path.join(sysdir, sysd_health_req_d))",
"def unproxy_service(self, *service_ids) -> None:\n\n for service_id in service_ids:\n router_key = self._router_key(self._router_id(service_id))\n middleware_key = self._middleware_key(self._middleware_id(service_id))\n tservice_key = self._tservice_key(self._tservice_id(service_id))\n\n self._zk.delete(router_key, recursive=True)\n self._zk.delete(middleware_key, recursive=True)\n self._zk.delete(tservice_key, recursive=True)\n\n # prevents \"KV connection error: middlewares cannot be a standalone element\"\n middlewares_key = f\"/{self._prefix}/http/middlewares\"\n if not self._zk.get_children(middlewares_key):\n self._zk.delete(middlewares_key)\n\n self._trigger_configuration_update()",
"def unregister(self, service_name, service_addr, addr_cls=None):\n addr_cls = addr_cls or PlainAddress\n etcd_delete = True\n if addr_cls != PlainAddress:\n etcd_delete = False\n\n for service_name in service_name:\n key = self._form_service_key(service_name, service_addr)\n if etcd_delete:\n self._client.delete(key)\n else:\n self._client.put(addr_cls(service_addr).delete_value())\n\n self._services.get(service_addr, {}).discard(service_name)",
"def service_remove(path, service_name):\n compose_result, err = __load_docker_compose(path)\n if err:\n return err\n services = compose_result[\"compose_content\"][\"services\"]\n if service_name not in services:\n return __standardize_result(\n False, \"Service {} did not exists\".format(service_name), None, None\n )\n del services[service_name]\n return __dump_compose_file(\n path,\n compose_result,\n \"Service {} is removed from {}\".format(service_name, path),\n already_existed=True,\n )",
"def removeServiceClasses(self, ids=None, REQUEST=None):\n if not ids: return self()\n if isinstance(ids, basestring): ids = (ids,)\n for id in ids:\n self.serviceclasses._delObject(id)\n if REQUEST: return self()",
"def remove_service_account(self, service_name, account_name):\n\n for service in self.services:\n if service_name == service.name:\n service.remove_account(account_name)\n return True\n return False",
"def remove(self, synchronous=False):\n # Assemble a list of dependent services to remove\n dependent_services = []\n # Workers do not have service_registry field implemented yet\n if hasattr(self.app.manager, 'service_registry'):\n for service in self.app.manager.service_registry.active():\n for dependency in service.dependencies:\n if dependency.is_satisfied_by(self):\n dependent_services.append(service)\n if dependent_services:\n log.debug(\"Removing all services depending on {0}: {1}\".format(\n self.name, dependent_services))\n for dependent_service in dependent_services:\n log.debug(\"Initiating removal of service {0} because it \"\n \"depends on {1}\".format(dependent_service.get_full_name(),\n self.name))\n dependent_service.remove()\n log.debug(\"Setting dependent service {0} as not `activated`\"\n .format(dependent_service.get_full_name()))\n dependent_service.activated = False\n log.debug(\"Setting service {0} as not `activated`\".format(\n self.get_full_name()))\n self.activated = False",
"def delService(self):\n self.__selected.delete()\n row = self.currentRow()\n if row >= 1:\n self.__service_list.setCurrentRow(row - 1, QtCore.QItemSelectionModel.Select)\n self.refresh()",
"def remove_pilot_compute_service(self, pjs):\n self.pilot_job_services.remove(pjs)\n CoordinationAdaptor.update_cds(self.url, self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return distance of two keys in qwerty keyboard based on manhattan or euclidean distance.
|
def key_distance(self, x, y, type="manhattan"):
if type == "manhattan":
return self.manhattan_dist_matrix[self.keys.index(x), self.keys.index(y)]
elif type == "euclidean":
return self.euclidean_dist_matrix[self.keys.index(x), self.keys.index(y)]
|
[
"def get_distance(self, button1, button2):\n return int(math.sqrt(sum([(i - j)*(i - j) for i, j in zip(button1, button2)])))",
"def qwerty_distance():\n from collections import defaultdict\n import math\n R = defaultdict(dict)\n R['-']['-'] = 0\n zones = [\"dfghjk\", \"ertyuislcvbnm\", \"qwazxpo\"]\n keyboard = [\"qwertyuiop\", \"asdfghjkl\", \"zxcvbnm\"]\n for num, content in enumerate(zones):\n for char in content:\n R['-'][char] = num + 1\n R[char]['-'] = 3 - num\n for a in ascii_lowercase:\n rowA = None\n posA = None\n for num, content in enumerate(keyboard):\n if a in content:\n rowA = num\n posA = content.index(a)\n for b in ascii_lowercase:\n for rowB, contentB in enumerate(keyboard):\n if b in contentB:\n R[a][b] = int(math.fabs(rowB - rowA) + math.fabs(posA - contentB.index(b)))\n return R",
"def distance(word1: str, word2: str) -> int:\n len1, len2 = len(word1), len(word2)\n dist = 0.0\n if len1 == 0 and len2 == 0:\n return 0\n elif len1 == 0 and len2 != 0:\n return len2\n elif len2 == 0 and len1 != 0:\n return len1\n else: # two distinct non zero words\n for w1, w2 in zip(word1, word2):\n dist = abs(ord(w1) - ord(w2))\n return dist / max(ord(w1), ord(w2))",
"def distance(self, first_tape, second_tape):\n pairs = zip(first_tape, second_tape)\n return math.sqrt(abs(sum(map((lambda n: self.subsq(*n)), pairs))))",
"def _euclidean_distance(self, a, b):\n X = (self.keyboard_cartesian[a]['x'] - self.keyboard_cartesian[b]['x']) ** 2\n Y = (self.keyboard_cartesian[a]['y'] - self.keyboard_cartesian[b]['y']) ** 2\n return math.sqrt(X + Y)",
"def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))",
"def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))",
"def distance(w_1, w_2):\n return dis_array[w_1, w_2]",
"def dist(string1, string2):\n if string1 == string2:\n return 0\n count1 = Counter(string1)\n count2 = Counter(string2)\n\n keys = set(count1.keys())\n keys.update(count2.keys())\n dist = sum(abs(count1.get(letter, 0) - count2.get(letter, 0)) for letter in keys)\n return dist",
"def hamming_distance(seq1, seq2):\n dist = sum([char1 != char2 for char1, char2 in zip(seq1, seq2)])\n return dist",
"def werCalc(s1, s2):\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))",
"def manhattan(rating1, rating2):\r\n distance = 0\r\n commonRatings = False \r\n for key in rating1:\r\n if key in rating2:\r\n distance += abs(rating1[key] - rating2[key])\r\n commonRatings = True\r\n if commonRatings:\r\n return distance\r\n else:\r\n return -1 #Indicates no ratings in common\r",
"def word_distance(w1, w2):\n\n syl_pairs = itertools.izip_longest(\n w1.syllables,\n w2.syllables,\n fillvalue=EMPTY_SYLLABLE\n )\n score = sum(syllable_distance(s1, s2) for s1, s2 in syl_pairs)\n\n # finger on the scale here.\n # Penalize differences in the first sound more.\n score += (2 if w1.sounds[0] != w2.sounds[0] else 0)\n # Penalize differences in the last sound a bit more\n score += (1 if w1.sounds[-1] != w2.sounds[-1] else 0)\n # Penalize mismatches in syllable counts more.\n score += (2 if len(w1.syllables) != len(w2.syllables) else 0)\n return score",
"def hamming_distance(str1, str2):\n \n # TODO: Write your solution here\n\n # basic variables\n distance = 0\n\n #check length of two strings is equal or no\n if len(str1) != len(str2):\n return None\n \n for char in range(len(str1)):\n if str1[char] != str2[char]:\n distance += 1\n\n return distance",
"def distance(typed_letter, proposed_letter):\n\n # If the typed letter is the same as the proposed_letter the distance is 0\n if typed_letter == proposed_letter:\n return 0\n\n # Otherwise\n else:\n\n # Find the positions for the typed and the proposed letter\n typed_row, typed_column = position.get(typed_letter)\n proposed_row, proposed_column = position.get(proposed_letter)\n\n # Compute distance between letters\n distance = abs(proposed_row - typed_row) + abs(proposed_column - typed_column)\n\n return distance",
"def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)",
"def manhattan_distance(a, b):\n return ((a.x - b.x) + (a.y - b.y))",
"def wordDistance(word1, word2):\n assert len(word1) == len(word2)\n count = 0\n\n for c1, c2 in zip(word1, word2):\n if c1 != c2:\n count += 1\n return count",
"def bhattacharyya_distance(distribution1: \"dict\", distribution2: \"dict\",) -> int:\n sq = 0\n for i in range(len(distribution1)):\n sq += np.sqrt(distribution1[i]*distribution2[i])\n \n return -np.log(sq)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a dataframe of distance matrix of x and y. Indexes are letters of x and columns are letters of y.
|
def distance_dataframe(self, x, y, keyboard_weight=None):
dist_matrix = self.distance_matrix(x, y, keyboard_weight)
dist_df = pd.DataFrame(dist_matrix, index=["", *list(x)],
columns=["", *list(y)])
return dist_df
|
[
"def make_dist_matrix(x, y):\r\n N = len(x)\r\n xx = np.vstack( (x,)*N )\r\n yy = np.vstack( (y,)*N )\r\n return np.sqrt( (xx - xx.T)**2 + (yy - yy.T)**2 )",
"def distance_2d(df1, df2, x, y):\n\n d1_coordinates = {'x': df1[x], 'y': df1[y]}\n df1_loc = pd.DataFrame(data=d1_coordinates)\n df1_loc.index = df1['ID']\n\n\n d2_coordinates = {'x': df2[x], 'y': df2[y]}\n df2_loc = pd.DataFrame(data=d2_coordinates)\n df2_loc.index = df2['ID']\n\n value = distance_matrix(df1_loc, df2_loc)\n return value",
"def calcDistance(self):\n # Initialize the distance matrix\n arr = np.repeat(0, self.num_col)\n result_mat = np.repeat(arr, self.num_col)\n result_mat = np.reshape(result_mat, (self.num_col, self.num_col))\n trinary_mat = self.df_trinary.values\n for left_val in TRINARY_VALUES:\n left_func = lambda v: 1 if v==left_val else 0\n left_mat = np.transpose(np.vectorize(left_func)(trinary_mat))\n for right_val in TRINARY_VALUES:\n if left_val == right_val:\n continue\n right_func = lambda v: 1 if v==right_val else 0\n right_mat = np.vectorize(right_func)(trinary_mat)\n # Count the number of occurrences of this combination of values\n # by doing a matrix multiply\n new_mat = np.matmul(left_mat, right_mat)\n # Multiply by the squared distance between the values\n squared_distance = (left_val - right_val)**2\n new_mat = new_mat*squared_distance\n # Accumulate the result\n result_mat = result_mat + new_mat\n # Convert to dataframe\n result_mat = np.vectorize(lambda v: np.sqrt(v)) (result_mat)\n self.df_distance = pd.DataFrame(result_mat, columns=self.columns,\n index=self.columns)",
"def dist_matrix(self, stanames):\n lats = self.getatt(stanames, 'Lat')\n lons = self.getatt(stanames, 'Lon')\n coords = []\n coords = [(lat, lon) for lat,lon in zip(lats, lons)]\n dist = distance.cdist(coords, coords, 'euclidean')\n df_dist = pd.DataFrame(dist, index=stanames,columns=stanames)\n \n return df_dist",
"def compute_distance_matrix(self):\n return np.square(self.data[:, np.newaxis, :] - self.centers[['x1', 'x2']].as_matrix()).sum(axis=2)",
"def get_distance_matrix(self):\n names = self.get_named_leaves()\n num_names = len(names)\n dist_mat = np.zeros((num_names, num_names), dtype='float')\n for i, j in itertools.combinations(range(num_names), 2):\n node1, node2 = self.node_names[names[i]], self.node_names[names[j]]\n dist = self.node_distance(node1, node2)\n dist_mat[i,j] = dist\n dist_mat[j,i] = dist\n return names, dist_mat",
"def pairwise_distances(self):\n xyz = self['xyz']\n pairwise_distance_matrix = squareform(pdist(xyz, 'euclidean'))\n\n return pairwise_distance_matrix",
"def distance_matrix(self):\n num_atoms = len(self.coords)\n matrix = np.zeros((num_atoms, num_atoms))\n\n for i, atom_i in enumerate(self.coords):\n for j, atom_j in enumerate(self.coords):\n matrix[i, j] = atom_i.distance_to(atom_j)\n return matrix",
"def _dist_matrix(self, x, y):\n \n # Compute the distance matrix \n dm_count = 0\n \n # Compute condensed distance matrix (upper triangle) of pairwise dtw distances\n # when x and y are the same array\n if(np.array_equal(x, y)):\n x_s = shape(x)\n dm = np.zeros((x_s[0] * (x_s[0] - 1)) // 2, dtype=np.double)\n \n p = ProgressBar(shape(dm)[0])\n \n for i in xrange(0, x_s[0] - 1):\n for j in xrange(i + 1, x_s[0]):\n dm[dm_count] = self._dtw_distance(x[i, ::self.subsample_step],\n y[j, ::self.subsample_step])\n \n dm_count += 1\n p.animate(dm_count)\n \n # Convert to squareform\n dm = squareform(dm)\n return dm\n \n # Compute full distance matrix of dtw distnces between x and y\n else:\n x_s = np.shape(x)\n y_s = np.shape(y)\n dm = np.zeros((x_s[0], y_s[0])) \n dm_size = x_s[0]*y_s[0]\n \n p = ProgressBar(dm_size)\n \n for i in xrange(0, x_s[0]):\n for j in xrange(0, y_s[0]):\n dm[i, j] = self._dtw_distance(x[i, ::self.subsample_step],\n y[j, ::self.subsample_step])\n # Update progress bar\n dm_count += 1\n p.animate(dm_count)\n \n return dm",
"def get_distance_matrix():\n df_afstandn2 = get_dataframe(\"\"\"SELECT *\n FROM proj_afval_netwerk.afv_poi_afstand\n WHERE afstand < 1000\n \"\"\")\n return df_afstandn2",
"def compute_dist_matrix(X1, X2, distance):\n N, M = X1.shape[0], X2.shape[0]\n dist_matrix = np.zeros((N, M))\n for i in range(N):\n for j in range(M):\n dist_matrix[i][j] = dist(X1[i], X2[j], distance=distance)\n return dist_matrix",
"def get_distance_matrix(df_vectorized):\n\n\n gram_matrix = np.dot(df_vectorized.values,df_vectorized.values.T)\n\n norms_matrix = np.sqrt(np.outer(np.diag(gram_matrix),np.diag(gram_matrix)))\n\n cosine_distance_matrix = gram_matrix/norms_matrix\n\n return cosine_distance_matrix",
"def calc_dist_matrix(self,verbose=False):\n\n print(\"Calculating distance matrix.\"); sys.stdout.flush()\n\n nrow = self.data_vector.shape[0]\n self.dist_matrix = np.zeros((nrow, nrow),dtype=float)\n for i in range(nrow):\n if verbose:\n if i % 1000 == 0:\n print(\"Row\",i,\"of\",nrow)\n sys.stdout.flush()\n\n for j in range(i + 1, nrow):\n self.dist_matrix[i,j] = self._pairwise_dist(self.data_vector[i],self.data_vector[j])\n self.dist_matrix[j,i] = self.dist_matrix[i,j]\n \n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)",
"def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix",
"def get_euclidean_matrix(df):\n df.reset_index(drop=True, inplace=True)\n\n # foods = df['food_names']\n # food_examples = []\n # indices = list(range(0, len(foods)))\n # for i in indices:\n # food_examples.append(str(foods[i]) + str(i))\n # food_examples = pd.Series(food_examples)\n food_examples = df['food_names']\n\n df = df.drop(['food_names', 'height', 'weight', 'above_range', 'BMI', 'age', 'gender',\n 'glucose_tolerance_category','90-percentile_of_2h-iAUC', 'average_carbs_ratio',\n 'average_daily_carbs','average_meals_per_day', 'average_sleep_hours',\n 'average_glucose', 'baseline', 'coefficient_of_variation', 'max_2-hours_iAUC',\n 'median_fasting_glucose_level','median_of_2h-iAUC', 'night_baseline'], axis='columns')\n\n df = df.replace([-np.inf], 0).dropna(axis=1)\n\n num_examples = df.shape[0]\n\n distances = pdist(df.values, metric='euclidean')\n print(distance)\n dis_array = squareform(distances)\n print(dis_array)\n dis_df = pd.DataFrame(data = dis_array, index=food_examples, columns=food_examples)\n print(dis_df)\n writer = pd.ExcelWriter('Euclidean_distance_icarbonx.xlsx', engine='xlsxwriter')\n dis_df.to_excel(writer, sheet_name='Sheet1')\n writer.save()",
"def _calculate_distances(self, MNI_df):\n df_loc = 0\n out_df = pd.DataFrame(columns=('ch1','ch2','distance'))\n for ch1 in MNI_df.iterrows():\n ch1_name = ch1[1].channel_name\n ch1_coordinates = np.array([ch1[1].X, ch1[1].Y, ch1[1].Z])\n for ch2 in MNI_df.iterrows():\n ch2_name = ch2[1].channel_name\n ch2_coordinates = np.array([ch2[1].X, ch2[1].Y, ch2[1].Z])\n \n # Calculate eucleidian distance\n dist = self._point_distance(ch1_coordinates, ch2_coordinates)\n out_df.loc[df_loc] = [ch1_name, ch2_name, dist]\n df_loc += 1\n \n # Get rid of zero distances (same contacts)\n out_df = out_df[out_df.distance != 0]\n out_df.reset_index(inplace=True, drop=True)\n \n return out_df",
"def build_distance_matrix(self):\n coords = self.atomcoords\n self.distancematrix = np.zeros((len(coords), len(coords)))\n for i in range(len(coords)):\n for j in [x for x in range(len(coords)) if x > i]:\n self.distancematrix[i][j] = norm(coords[i] - coords[j])\n self.distancematrix[j][i] = self.distancematrix[i][j]",
"def dist_matrix(X, Y):\n sx = np.sum(X ** 2, 1)\n sy = np.sum(Y ** 2, 1)\n D2 = sx[:, np.newaxis] - 2.0 * X.dot(Y.T) + sy[np.newaxis, :]\n # to prevent numerical errors from taking sqrt of negative numbers\n D2[D2 < 0] = 0\n D = np.sqrt(D2)\n return D",
"def distance_matrix(X, Y, metric):\n distance = np.zeros((len(X), len(Y)))\n for i in range(len(X)):\n for j in range(len(Y)):\n m = metric(X[i], Y[j])\n if np.isnan(m):\n pdb.set_trace()\n distance[i, j] = m\n return distance"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the token and dsn from a key Generate a simple SHA1 hash of the key key is a 64bits integer Token is a 32bits integer, dsn is a 64bits integer
|
def key2tokenAndDSN(self, key):
import binascii
import struct
import hashlib
self.keystr = struct.pack("!Q", key)
self.h = hashlib.sha1(self.keystr.rjust(8,'\00'))
self.shastr=self.h.digest() # binary
#shastr = struct.pack("!IIIII", *struct.unpack("@IIIII",shastr)) #to net
self.token, self.dsn = self.shastr[0:4], self.shastr[-8:]
#print "raw: %s (len=%i)"%(shastr,len(shastr))
#print "hex: %s"% binascii.hexlify(token), "%s"%binascii.hexlify(dsn)
self.d1, self.d2 = struct.unpack("!II",self.dsn)
self.token, self.dsn = (struct.unpack("!I",self.token)[0], (long(self.d2)<<32)+self.d1)
#print "token: %x"% token
#print "dsn: %x" % dsn
return (self.token, self.dsn)
|
[
"def make_digest(key):\r\n return sha1(key.encode('utf-8')).hexdigest()",
"def fnv1(self, key):\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash",
"def fingerprint(self, key):\n base64_pub = self.base64_pub_encode(key)\n return SHA256.new(base64_pub.encode('utf-8')).digest()",
"def SHA1(self) -> _n_0_t_3[_n_0_t_9]:",
"def _sha(key):\n\n return key.split('-')[-1].encode('utf-8')",
"def encode_token(key, pin, length=None):\n if length is None:\n length = len(pin)\n m = hmac.new(key.encode('utf8'), digestmod=hashlib.sha512)\n m.update(pin.encode('utf8'))\n digest = re.sub(r'[a-f]', '', m.hexdigest())[:length]\n if digest == pin:\n digest = ''\n if len(digest) < length:\n digest += encode_token(key + key, pin, length - len(digest))\n return digest",
"def digest(self, key):\n if len(key) == 0:\n return '', ''\n\n *heads, tail = key.split()\n prefix = ''.join(word[0] for word in heads)\n\n digested = []\n if prefix:\n digested.append(prefix + tail)\n return key, digested",
"def _dsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n content.write_string('ssh-dss')\n content.write_mpint(numbers.public_numbers.parameter_numbers.p)\n content.write_mpint(numbers.public_numbers.parameter_numbers.q)\n content.write_mpint(numbers.public_numbers.parameter_numbers.g)\n content.write_mpint(numbers.public_numbers.y)\n content.write_mpint(numbers.x)\n return content.data",
"def build_serverkeyhash(self):\n server_publickey = self.getfilehttps(self.epo_url + \"srpubkey.bin\")\n self.serverkeyhash = b64encode(mcafee_crypto.SHA1(server_publickey))\n return self.serverkeyhash",
"def keytag(dnskey):\n if dnskey.algorithm == 1:\n a = ord(dnskey.key[-3]) << 8\n b = ord(dnskey.key[-2])\n return a + b\n else:\n header = struct.pack(\"!HBB\", dnskey.flags, dnskey.protocol,\n dnskey.algorithm)\n key = header + dnskey.key\n ac = 0\n for i, value in enumerate(ord(x) for x in key):\n if i % 2:\n ac += value\n else:\n ac += (value << 8)\n ac += (ac >> 16) & 0xffff\n return ac & 0xffff",
"def getMD5(self, key1, key2, last8):\n n1=[]\n s1=0\n n2=[]\n s2=0\n for c in key1:\n if c.isdigit():\n n1.append(c)\n if c.isspace():\n s1+=1\n \n for c in key2:\n if c.isdigit():\n n2.append(c)\n if c.isspace():\n s2+=1\n \n d1 = int(''.join(n1))\n d2 = int(''.join(n2))\n z1=d1/s1\n z2=d2/s2\n \n print \"Key 1 has %d spaces:\" % s1, z1\n print \"Key 2 has %d spaces:\" % s2, z2\n \n mdThing = struct.pack(\">LL\", z1, z2) + last8\n return md5(mdThing).digest()",
"def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")",
"def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)",
"def hash_keys(key: bytes) -> Tuple[bytes, bytes]:\n\n salt = b'1234567890123456'\n keys = PBKDF2(key, salt, 64, count=1000000, hmac_hash_module=SHA512)\n key1 = keys[:32]\n key2 = keys[32:]\n\n return key1, key2",
"def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]",
"def concat_hash(self, x, symkey):\n msg = '%s%s' % (x, symkey)\n return int(hashlib.sha1(msg).hexdigest(), 16)",
"def __computeHash(self, cmd):\n fieldA = capture([\"sha1sum\", cmd]).split()\n return fieldA[0]",
"def get_signature(self, key, secret):\n m = hashlib.md5(key+secret+str(int(time.time())))\n return m.hexdigest()",
"def key_to_rdata(key: Key) -> bytes:\n header = struct.pack(\"!HBB\", key.flags, key.protocol, key.algorithm.value,)\n pubkey = base64.b64decode(key.public_key)\n return header + pubkey"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Identify distinct MPTCP Connections that reached Successful handshake Look for Ack packets with MPTCP option Header For each MPTCP connection report Receiver's token value which acts as the connectionID
|
def mptcp_connections(self, pkts):
count = 0
#MPTCP_Capable = 0x0
#MPTCP_CapableACK ---> successful handshake
print "======================================================================"
print "Successful Handshake --- Look for Ack packets with MPTCP option Header"
print """Token = connectionID = SHA1(key)[0-32] of Other party's key. (Capture from
either step 2 or 3 in the first handshake)"""
print "Total packets: %s" % len(pkts)
print "======================================================================"
print "Identifying MPTCP Connections...."
for i in range(len(pkts)):
if(MPTCP_CapableACK in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 0):
count +=1 #Count the number of distinct MPTCP connections
#Compute the receiver's token
self.key_rcv = pkts[i][TCPOption_MP].mptcp.rcv_key
self.rcv_token, self.rcv_dsn = self.key2tokenAndDSN(self.key_rcv)
#Compute the sender's token
self.key_snd = pkts[i][TCPOption_MP].mptcp.snd_key
self.snd_token, self.snd_dsn = self.key2tokenAndDSN(self.key_snd)
print ("%i. New MPTCP Connection (Successful Handshake) src: %s; dest: %s; Sender's key: %s; Receiver's key: %s; Receivers Token (connectionID): %s; Sender's Token: %s" % (count, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_key, pkts[i][TCPOption_MP].mptcp.rcv_key, self.rcv_token, self.snd_token))
print "Total MPTCP Connections: %i" % count
|
[
"def process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n flow_id = acks[saddr, sport, daddr, dport][co.FLOW_ID]\n if conn_acks[conn_id][co.S2C] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.S2C]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_C2S] and (dss - conn_acks[conn_id][co.C2S]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.C2S][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission! (take into account the seq overflow)\n mptcp_connections[conn_id].attr[co.C2S][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_C2S][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]))\n conn_acks[conn_id][HSEQ_C2S][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_C2S].add(dss)\n conn_acks[conn_id][HSEQ_C2S][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]]\n\n conn_acks[conn_id][co.S2C] = dack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta",
"def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID]\n if conn_acks[conn_id][co.C2S] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission!\n mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]))\n conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_S2C].add(dss)\n conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]]\n\n conn_acks[conn_id][co.C2S] = dack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta",
"def supportedpids(self, recpdu: rdmpacket.RDMpacket) -> rdmpacket.RDMpacket:\n\n if recpdu.cc is not defines.CC_Get_command:\n return nackreturn(self, recpdu, nackcodes.nack_unsupported_cc)\n sendpdu = rdmpacket.RDMpacket()\n sendpdu.destuid = recpdu.srcuid\n sendpdu.srcuid = self.device_descriptor.uid\n sendpdu.tn = recpdu.tn\n sendpdu.port_resp = 0x00\n sendpdu.mess_cnt = 0x00\n sendpdu.sub_id = 0x0000\n sendpdu.cc = 0x21\n sendpdu.pid = 0x0050 \n rdmlist = list(self.device_descriptor.getswitcher.keys())\n llrplist = list(self.device_descriptor.llrpswitcher.keys())\n sendpdu.pdl = (len(rdmlist)+len(llrplist))*2\n keybytes = bytearray()\n for key in rdmlist:\n keybytes.extend(key.to_bytes(2, byteorder='big'))\n for key in llrplist:\n keybytes.extend(key.to_bytes(2, byteorder='big'))\n sendpdu.pd = keybytes\n sendpdu.length = 24+sendpdu.pdl\n sendpdu.calcchecksum()\n return sendpdu",
"def process_mptcp_first_syn(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns, ts_syn_timeout, ts_timeout):\n # The sender of the first SYN is the client\n # Check if the connection is black listed or not\n conn_id = False\n conn_candidates = fast_conns.get((saddr, daddr, sport, dport), [])\n min_delta = ts_syn_timeout\n for start, duration, cid, fid in conn_candidates:\n if (co.START in mptcp_connections[cid].flows[fid].attr\n and abs((ts_delta - mptcp_connections[cid].flows[fid].attr[co.START]).total_seconds()) < min_delta):\n conn_id = cid\n flow_id = fid\n min_delta = abs((ts_delta - mptcp_connections[cid].flows[fid].attr[co.START]).total_seconds())\n\n if not conn_id:\n black_list.add((saddr, sport, daddr, dport))\n return\n elif conn_id and (saddr, sport, daddr, dport) in black_list:\n black_list.remove((saddr, sport, daddr, dport))\n\n if ((saddr, sport, daddr, dport) in acks and (ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]).total_seconds() <= ts_syn_timeout\n and acks[saddr, sport, daddr, dport][co.S2C] == -1) and conn_id in conn_acks:\n # SYN retransmission! But do nothing particular\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta\n else:\n acks[saddr, sport, daddr, dport] = {co.C2S: -1, co.S2C: -1, co.TIMESTAMP: {CLIENT: ts_delta, SERVER: None}, co.CONN_ID: conn_id,\n co.FLOW_ID: flow_id}\n conn_acks[conn_id] = {co.C2S: -1, co.S2C: -1, co.TIMESTAMP: {CLIENT: ts_delta, SERVER: None}, SEQ_C2S: set(), SEQ_S2C: set(), HSEQ_C2S: {},\n HSEQ_S2C: {}}",
"def compute_mptcp_dss_retransmissions(pcap_filepath, mptcp_connections, fast_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing MPTCP DSS retransmissions for\", pcap_filepath)\n acks = {}\n conn_acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_mptcp_first_syn(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n pcap_file.close()",
"def process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns, ts_syn_timeout, ts_timeout):\n # The sender of the SYN/ACK is the server\n if (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and acks[daddr, dport, saddr, sport][co.C2S] == -1):\n # Better to check, if not seen, maybe uncomplete TCP connection\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta\n\n elif (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and tcp.ack == acks[daddr, dport, saddr, sport][co.C2S]):\n # SYN/ACK retransmission! But don't do anything special\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta",
"def copy_info_to_mptcp_connections(connections, mptcp_connections, failed_conns, acksize_all, acksize_all_mptcp, flow_name, fast_conns=None):\n connection = connections[flow_name]\n conn_id, flow_id = get_flow_name_connection_optimized(connection, mptcp_connections, fast_conns=fast_conns)\n if isinstance(conn_id, (int, long)):\n mptcp_connections[conn_id].flows[flow_id].subflow_id = flow_name\n mptcp_connections[conn_id].flows[flow_id].attr[co.TCP_COMPLETE] = connection.flow.attr[co.TCP_COMPLETE]\n mptcp_connections[conn_id].flows[flow_id].attr[co.START] = connection.flow.attr[co.START]\n mptcp_connections[conn_id].flows[flow_id].attr[co.DURATION] = connection.flow.attr[co.DURATION]\n if co.BACKUP in connection.attr:\n mptcp_connections[conn_id].flows[flow_id].attr[co.BACKUP] = connection.attr[co.BACKUP]\n if co.SOCKS_PORT in connection.attr:\n mptcp_connections[conn_id].flows[flow_id].attr[co.SOCKS_PORT] = connection.attr[co.SOCKS_PORT]\n mptcp_connections[conn_id].flows[flow_id].attr[co.SOCKS_DADDR] = connection.attr[co.SOCKS_DADDR]\n if co.SOCKS_PORT not in mptcp_connections[conn_id].attr:\n mptcp_connections[conn_id].attr[co.SOCKS_PORT] = connection.attr[co.SOCKS_PORT]\n mptcp_connections[conn_id].attr[co.SOCKS_DADDR] = connection.attr[co.SOCKS_DADDR]\n\n elif not mptcp_connections[conn_id].attr[co.SOCKS_PORT] == connection.attr[co.SOCKS_PORT] or not mptcp_connections[conn_id].attr[co.SOCKS_DADDR] == connection.attr[co.SOCKS_DADDR]:\n print(\"DIFFERENT SOCKS PORT...\", mptcp_connections[conn_id].attr[co.SOCKS_PORT], connection.attr[co.SOCKS_PORT], mptcp_connections[conn_id].attr[co.SOCKS_DADDR], connection.attr[co.SOCKS_DADDR], conn_id, flow_id)\n\n for direction in co.DIRECTIONS:\n for attr in connection.flow.attr[direction]:\n mptcp_connections[conn_id].flows[flow_id].attr[direction][attr] = connection.flow.attr[direction][attr]\n\n if flow_name in acksize_all[direction]:\n if conn_id not in acksize_all_mptcp[direction]:\n acksize_all_mptcp[direction][conn_id] = {}\n\n acksize_all_mptcp[direction][conn_id][flow_id] = acksize_all[direction][flow_name]\n\n else:\n # This is a TCPConnection that failed to be a MPTCP subflow: add it in failed_conns\n failed_conns[connection.conn_id] = connection\n\n return conn_id, flow_id",
"def _extractConnectionStatistics(self, ttoutput):\n assert isinstance(ttoutput, str), \"type ttoutput: %s\"%type(ttoutput)\n\n hostRE = r\"host \\w+:\\s*([0-9.]+):([0-9]+)\" # Matches a line containing one of the hosts that are communicating here\n startTimeRE = r'first packet:\\s*([\\w\\s:\\.]+)\\s*' # The time the first packet of this connection was seen\n lastPacketRE = r'last packet:\\s*([\\w\\s:\\.]+)\\s*' # The time the last packet was seen\n completedRE = r'complete conn: (\\w+)' # Whether the connection has been completed or not\n dateformat = \"%a %b %d %H:%M:%S.%f %Y\" # How the dates in the input are formatted (used to convert them to datetime)\n connectionSeparatorRE = \"=====+\" # The descriptions of the individual connections are separated by a line of =\n\n result = []\n current_connection = TcpConnection()\n for line in ttoutput.splitlines():\n if re.search(hostRE, line) and current_connection.host1 is None:\n # If the line contains the IP of one of the hosts\n match = re.search(hostRE, line)\n current_connection.host1 = (match.group(1), int(match.group(2)))\n elif re.search(hostRE, line):\n # If the line contains the IP of one of the hosts and the first host was already seen, it has to be the second host\n match = re.search(hostRE, line)\n current_connection.host2 = (match.group(1), int(match.group(2)))\n elif re.search(startTimeRE, line):\n match = re.search(startTimeRE, line)\n current_connection.startTime = datetime.strptime(match.group(1), dateformat)\n elif re.search(lastPacketRE, line):\n assert current_connection.startTime is not None\n match = re.search(lastPacketRE, line)\n current_connection.duration = datetime.strptime(match.group(1),\n dateformat) - current_connection.startTime\n assert isinstance(current_connection.duration, timedelta)\n elif re.search(completedRE, line):\n match = re.search(completedRE, line)\n current_connection.connection_completed = True if match.group(1).lower() == \"yes\" else False\n elif re.search(connectionSeparatorRE, line):\n assert current_connection.isComplete()\n result.append(current_connection)\n current_connection = TcpConnection()\n result.append(current_connection)\n\n result = sorted(result, key=lambda conn: datetimeToEpoch(conn.startTime))\n return result",
"def compute_tcp_acks_retrans(pcap_filepath, connections, inverse_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing TCP ack sizes for\", pcap_filepath)\n nb_acks = {co.C2S: {}, co.S2C: {}}\n acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n try:\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n # Check if linux cooked capture\n if pcap.datalink() == dpkt.pcap.DLT_LINUX_SLL:\n eth = dpkt.sll.SLL(buf)\n else:\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_first_syn(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_syn_ack(ts_delta, acks, nb_acks, connections, tcp, saddr, ip, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n except dpkt.NeedData as e:\n print(e, \": trying to continue...\", file=sys.stderr)\n finally:\n pcap_file.close()\n\n return nb_acks",
"def devidentify(self, recpdu: rdmpacket.RDMpacket) -> rdmpacket.RDMpacket:\n\n if recpdu.cc == defines.CC_Get_command:\n sendpdu = rdmpacket.RDMpacket()\n sendpdu.length = 25\n sendpdu.destuid = recpdu.srcuid\n sendpdu.srcuid = self.device_descriptor.uid\n sendpdu.tn = recpdu.tn\n sendpdu.port_resp = 0x00\n sendpdu.mess_cnt = 0x00\n sendpdu.sub_id = 0x0000\n sendpdu.cc = defines.CC_Get_command_resp\n sendpdu.pid = recpdu.pid\n sendpdu.pdl = 1\n sendpdu.pd = self.device_descriptor.identifystatus.to_bytes(1, 'big')\n sendpdu.calcchecksum()\n return sendpdu\n elif recpdu.cc == defines.CC_Set_command:\n if recpdu.pd[0] <= 1:\n print(\"Device {}: Identify set to {:02x}\".format(self.device_descriptor.uid.hex(), recpdu.pd[0]))\n self.device_descriptor.identifystatus = recpdu.pd[0]\n sendpdu = rdmpacket.RDMpacket()\n sendpdu.length = 24\n sendpdu.destuid = recpdu.srcuid\n sendpdu.srcuid = self.device_descriptor.uid\n sendpdu.tn = recpdu.tn\n sendpdu.port_resp = 0x00\n sendpdu.mess_cnt = 0x00\n sendpdu.sub_id = 0x0000\n sendpdu.cc = defines.CC_Set_command_resp\n sendpdu.pid = recpdu.pid\n sendpdu.pdl = 0\n sendpdu.calcchecksum()\n return sendpdu\n else:\n #Out of range NACK\n return nackreturn(self, recpdu, nackcodes.nack_data_range)\n else:\n return nackreturn(self, recpdu, nackcodes.nack_unsupported_cc)",
"def RETRANSMIT(self):\n\n ##############################################\n # retransmit all the unacknowledged packets #\n # (all the packets currently in self.buffer) #\n ##############################################\n \n if(self.timeout_hanjing):\n #If we are coming from the timeout state, retransmit all the buffer\n for k,v in self.buffer.items():\n if(self.SACK == 0):\n header_GBN = GBN(type = 'data', len = len(v), hlen = 6, num = k, win = self.win)\n else:\n header_GBN = GBN(type = 'data', options = 1, len = len(v), hlen = 6, num = k, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / v)\n log.debug(\"Sending packet number: %s\", k)\n \n if ((self.Q_3_2 == 1) and (self.dup_ack_hanjing == True) and (self.timeout_hanjing == False)):\n #just retransmit the packet that has been ack'ed 3 times consequtively\n header_GBN = GBN(type = 'data', len = len(self.buffer[self.unack]), hlen = 6, num = self.unack, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / self.buffer[self.unack])\n log.debug(\"Sending packet number: %s\", self.unack)\n \n #Question 3.3\n if(self.SACK == 1 and (self.timeout_hanjing == False) and (self.hlen > 6)):\n if(self.hlen == 9):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) \n if(self.hlen == 12):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) + list(range(self.ledge2, self.ledge2 + self.len2)) \n if(self.hlen == 15):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) + list(range(self.ledge2, self.ledge2 + self.len2)) + list(range(self.ledge3, self.ledge3 + self.len3)) \n \n for i in optionalHeader_list:\n optionalHeader_list[optionalHeader_list.index(i)] = i % 2**self.n_bits\n \n #We need to find the difference between the sender buffer, and the optionalHeader_list\n Sender_buffer_keys = list(self.buffer.keys()) \n log.debug(\"The sender buffer: %s\", Sender_buffer_keys)\n #Trimmed_sender_buffer includes the buffer list only up to the last packet number in the optional header list)\n trimmed_sender_buffer = Sender_buffer_keys[:Sender_buffer_keys.index(optionalHeader_list[-1])+1]\n #Retrans_list is the list of keys to be retransmitted\n log.debug(\"Trimmed Sender Buffer: %s\", trimmed_sender_buffer)\n log.debug(\"Optional Header List: %s\", optionalHeader_list)\n Retrans_list = [item for item in trimmed_sender_buffer if item not in optionalHeader_list]\n log.debug(\"SACK: packets should be retransmitted: %s\", Retrans_list)\n for i in Retrans_list:\n header_GBN = GBN(type = 'data', options = 1 , len = len(self.buffer[i]), hlen = 6, num = i, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / self.buffer[i])\n log.debug(\"SACK Retransmission: Sending packet number: %s\", i)\n # back to SEND state\n self.dup_ack_hanjing = False\n self.timeout_hanjing = False\n raise self.SEND()",
"def handshake(self, pkt):\n # pkt.summary()\n dst = pkt[IP].src\n src = pkt[IP].dst\n dport = pkt[TCP].sport\n ackno = pkt[TCP].seq + 1\n seqno = 0 # use random\n synack = IP(src=src, dst=dst)/TCP(sport=self.sport, dport=dport, flags='SA', seq=seqno, ack=ackno)\n reply = None\n while not reply:\n reply = sr1(synack, timeout=1, verbose=self.verbose)\n seqno += 1\n serv = FTPServerConnectiton(src, dst, self.sport, dport, seqno, ackno)\n serv_thread = Thread(target=serv.run)\n serv_thread.start()\n print 'New connection created'",
"def print_connection_being_established(pkt):\n print_headers(pkt, overwrite_min=0)\n print(green(\"!!!! New TCP/OpenFlow Connection being established!!\\n\"))",
"def transmitPollAck(): \r\n global data\r\n DW1000.newTransmit()\r\n data[0] = C.POLL_ACK\r\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\r\n DW1000.setData(data, LEN_DATA)\r\n DW1000.startTransmit()",
"def transmitPollAck(): \n global data\n DW1000.newTransmit()\n data[0] = C.POLL_ACK\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\n DW1000.setData(data, LEN_DATA)\n DW1000.startTransmit()",
"def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info",
"def Connect(self):\r\n #sleep(1)\r\n #self.src_ref = randint(1, 20)\r\n self.src_ref = 10\r\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.s.settimeout(self.timeout)\r\n self.s.connect((self.ip, self.port))\r\n self.s.send(TPKTPacket(COTPConnectionPacket(self.dst_ref,\r\n self.src_ref,\r\n self.dst_tsap,\r\n self.src_tsap,\r\n 0x0a)).pack())\r\n reply = self.s.recv(1024)\r\n _ = COTPConnectionPacket().unpack(TPKTPacket().unpack(reply).data)\r\n\r\n self.NegotiatePDU()",
"def connectBufferReady():\n bufferReady.next = bufferReady_int",
"def get_trace_maconly_pkts(self):\n c = self.conn.cursor()\n c.execute(\"select * from send_events where uniqid is null\")\n sent_macpkt = c\n \n c.execute(\"select * from recv_events where uniqid is null\")\n recv_macpkt = c\n \n c.execute(\"select * from drop_events where uniqid is null\") \n drop_macpkt = c\n\n return (sent_macpkt, recv_macpkt, drop_macpkt)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return the current schema_org schema version
|
def get_schema_org_version():
return _get_schemaorg_version()
|
[
"def get_schemaorg_version():\n try:\n version = get_latest_schemaorg_version()\n except ValueError:\n version = SCHEMAORG_DEFAULT_VERSION\n return version",
"def schema_version(self):\n # return self._parsed[\"schemaVersion\"]\n # does not exist in manifest reference\n pass",
"def schema_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"schema_version\")",
"def schema_version(self):\n return self._parsed[\"schemaVersion\"]",
"def get_latest_schema_version() -> Text:\n return _LATEST_SCHEMA_VERSION",
"def schema_version(self) -> int:\n return self.__meta['SchemaVersion']",
"def content_schema_version(self) -> Optional[str]:\n return pulumi.get(self, \"content_schema_version\")",
"def resolve_current_labbook_schema_version(self, info):\n return CURRENT_SCHEMA",
"def schema_version(self):\n return self._replay_schema.value",
"def content_schema_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"content_schema_version\")",
"def get_datasetSchemaVersion(self):\n\t\treturn self.dsDoc['about']['datasetSchemaVersion']",
"def get_problemSchemaVersion(self):\n\t\treturn self.prDoc['about']['problemSchemaVersion']",
"def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest",
"def get_database_schema(self):\n\n cursor = self.db.cursor()\n cursor.execute('SELECT version FROM schema')\n results = cursor.fetchone()\n cursor.close()\n return results[0]",
"def schema_version(conn):\n with Tx(conn) as c:\n try:\n c.execute('SELECT version FROM meta LIMIT 1', ['version'])\n except psycopg2.ProgrammingError:\n return 0\n if c.rowcount == 0:\n return 0\n return c.fetchone()['version']",
"def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]",
"def db_version(self):\n return self._db_version",
"def version(self):\r\n print migration.db_version()",
"def get_version(self):\n return flask_djangofy.get_version()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get all classes and label them if they are referenced if include_ref is False, only "defined" classes are included.
|
def get_classes(self, include_ref=True):
defs = self._get_class_defs()
ans = {}
ans.update(defs)
if include_ref:
refs = self._get_class_refs()
ans.update(refs)
return list(ans.values())
|
[
"def _find_all_referenced_classes(self):\n\n referenced_classes = set()\n searched_classes = set()\n\n search_pattern = \"import\\s+.+;\"\n extract_pattern = \"import\\s+([^;]+)\"\n white_list = [\"^import\\s+com.izforge.izpack.*;$\", \"^import\\s+java.*;$\"]\n\n izclass_container = self.get_container(\"classes\")\n class_to_path_map = izclass_container.container\n classes_referenced_in_specs, spec_found_in = list(zip(*self.find_specification_references(\"classes\")))\n\n for reffed_class in classes_referenced_in_specs:\n queue = Queue()\n queue.put(reffed_class)\n while (not queue.empty()):\n search_class = queue.get()\n searched_classes.add(search_class)\n if not search_class in class_to_path_map:\n continue\n referenced_classes.add(class_to_path_map[search_class])\n found_imports = self.seeker.search_source_for_pattern(class_to_path_map[search_class], search_pattern, extract_pattern, white_list)\n for found_import in found_imports:\n if (found_import[0] in searched_classes):\n continue\n queue.put(found_import[0])\n\n return referenced_classes & set(class_to_path_map.values())",
"def classes(self):\n return self._classes",
"def process_class_list(self, module, classes):",
"def return_classes(self):\n\n\t\t \n\t\t \n\t\treturn self.classes",
"def pre_class(self):\n return self.classes",
"def get_classes(self):\n classes = []\n for document in self.documents:\n if document.c not in classes:\n classes.append(document.c)\n return classes",
"def addClassRef(clazz):\n\n global h_classes\n header = \"class %s;\" % clazz\n if not header in h_classes:\n h_classes.append(header)",
"def get_classes(self):\n return self.classes",
"def getParentClassesRefs(self):\n\t\traise Exception(\"Abstract method IClass.getParentClassesRefs not implemented in: \" + str(self))",
"def iter_classdefs(self):\n return self._search_in_scope('classdef')",
"def _load_classes(self):\n\t\t# load class names (name -> label)\n\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\tself.classes \t\t\t\t= {}\n\t\tself.coco_labels \t\t\t= {}\n\t\tself.coco_labels_inverse \t= {}\n\t\tfor c in categories:\n\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\tself.classes[c['name']] = len(self.classes)\n\t\tself.labels = {}\n\t\tfor key, value in self.classes.items():\n\t\t\tself.labels[value] = key\n\n\t\tprint(self.coco_labels)\n\t\tprint(self.coco_labels_inverse)\n\t\tprint(self.classes)\n\t\tprint(self.labels)",
"def classes(self):\n return self._classes",
"def class_labels(self):\n return self._class_labels",
"def get_classes(self):\n if self.include_unseen_class and self.fill_unseen_labels:\n return np.append(self.classes_, [self.fill_label_value])\n\n return self.classes_",
"def getClasses(self):\n self._process()\n return self._sets",
"def get_classes(self, name):\n return list(cl for cl in self.classes if cl.name == name)",
"def load_classes(self):\n\t\t\t# Load class names (name -> label).\n\t\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\t\tself.classes = {}\n\t\t\tself.coco_labels = {}\n\t\t\tself.coco_labels_inverse = {}\n\t\t\tfor c in categories:\n\t\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\t\tself.classes[c['name']] = len(self.classes)\n\n\t\t\t# Also load the reverse (label -> name).\n\t\t\tself.labels = {}\n\t\t\tfor key, value in self.classes.items():\n\t\t\t\tself.labels[value] = key",
"def getByReferenceClassifiers(inpClassifiers, startExecCount=0):\n\toutVals = [_ByReferenceClassifier(inpClassifier, execCount=startExecCount) for inpClassifier in inpClassifiers]\n\treturn outVals",
"def wordclasses(self):\n for corpus in self:\n corpus.wordclasses"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return validation errors as a list of dictionaries
|
def get_validation_errors(self):
return [err.to_dict() for err in self._schema.validator.validation_errors]
|
[
"def compact_form_errors(form):\n errors = {}\n\n for name, validationerror in form.errors.as_data().items():\n errors[name] = [item.code for item in validationerror]\n\n return errors",
"def errors_as_dict(self):\n errors = []\n for e in self.errors:\n errors.append({\n 'file': e.term.file_name,\n 'row': e.term.row,\n 'col': e.term.col,\n 'term': e.term.join,\n 'error': str(e)\n })\n\n return errors",
"def validation_errors(self):\n return self._validation_errors",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n error_messages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n error_messages.append(f\"{field}: {error}\")\n return error_messages",
"def _validate_errors(self):\n errors = (constraint.fails(self) for constraint in self._constraints)\n return [err for err in errors if err]",
"def getErrorsList(self):\n return self.__errors",
"def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors",
"def _validate_errors_object(self, data_list):\n\n if not isinstance(data_list, list):\n return [\"'Errors' object MUST be an array\"]\n ret = []\n for error_dict in data_list:\n ret.extend(self._validate_error_object(error_dict))\n return ret",
"def get_preflight_validation_fails(self):\n return self.get_as_dict_array(ShipyardDbAccess.SELECT_VALIDATIONS)",
"def getErrors(self) -> java.util.Collection:\n ...",
"def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]",
"def process_errors(errors):\n new_errors = {\n 'errors': [],\n }\n\n for key, value in errors.items():\n if key == 'non_field_errors':\n new_errors['error_code'] = get_api_code(value[0])\n new_errors['error_message'] = value[0]\n else:\n new_errors['errors'].append({\n 'error_code': get_api_code(value[0]),\n 'error_message': value[0],\n 'field': key,\n })\n\n if not new_errors.has_key('error_code'):\n message = 'Validation Failed'\n new_errors['error_code'] = get_api_code(message)\n new_errors['error_message'] = message\n\n return new_errors",
"def errors(self):\n errs = {}\n for results in self.results.itervalues():\n for result in results:\n if result.status == punc.model.Result.STATUS_ERROR:\n err_msg = result.error_message()\n if err_msg is not None:\n name = result.device_name()\n if name in errs:\n errs[name].add(err_msg)\n else:\n errs[name] = set([err_msg])\n return errs",
"def errors(self):\n return tuple(self._errors)",
"def errors(self) -> List[ValidationResult]:\n # TODO: use severity\n return [r for r in self.results_excluding_normalized() if self._is_error(r)]",
"def validations(self):\n return self.container['validations']",
"def get_form_errors(form):\n all_errors = []\n for field in form.errors:\n all_errors += form.errors[field]\n return all_errors",
"def errors(self) -> List[str]:\n return [e.get('message')\n for e in self._error.response.json().get('errors', [])]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Faster Wavelenght selector If passed lists it will return lists. If passed np arrays it will return arrays Fastest is using np.ndarrays fast_wav_selector ~10002000 quicker than wav_selector
|
def fast_wav_selector(wav, flux, wav_min, wav_max):
if isinstance(wav, list): # if passed lists
wav_sel = [value for value in wav if(wav_min < value < wav_max)]
flux_sel = [value[1] for value in zip(wav,flux) if(wav_min < value[0] < wav_max)]
elif isinstance(wav, np.ndarray):
# Super Fast masking with numpy
mask = (wav > wav_min) & (wav < wav_max)
wav_sel = wav[mask]
flux_sel = flux[mask]
else:
raise TypeError("Unsupported input wav type")
return [wav_sel, flux_sel]
|
[
"def wav_selector(wav, flux, wav_min, wav_max, verbose=False):\n if isinstance(wav, list): # if passed lists\n wav_sel = [wav_val for wav_val in wav if (wav_min < wav_val < wav_max)]\n flux_sel = [flux_val for wav_val, flux_val in zip(wav,flux) if (wav_min < wav_val < wav_max)]\n elif isinstance(wav, np.ndarray):\n # Super Fast masking with numpy\n mask = (wav > wav_min) & (wav < wav_max)\n if verbose:\n print(\"mask=\", mask)\n print(\"len(mask)\", len(mask))\n print(\"wav\", wav)\n print(\"flux\", flux)\n wav_sel = wav[mask]\n flux_sel = flux[mask]\n else:\n raise TypeError(\"Unsupported input wav type\")\n return [wav_sel, flux_sel]",
"def _ComboWave(WList): #accept list or csv of 140 pts\n PreNormL=[]\n for DesiredOutputPulseShapeQ in WList:\n if len(DesiredOutputPulseShapeQ) == 140*4:#will accept pre-formatted Hex2Byte text\n PreNormL.append(np.array([int(DesiredOutputPulseShapeQ[4*ii:4*ii+4],16) for ii in range(len(DesiredOutputPulseShapeQ)//4)]))\n elif len(DesiredOutputPulseShapeQ)==140:#will accept a straight list\n PreNormL.append(np.array(DesiredOutputPulseShapeQ))\n elif DesiredOutputPulseShapeQ.endswith(('.txt','.csv','.dat')):#will accept file\n with open(DesiredOutputPulseShapeQ,'r') as filehead:\n RawListQ=filehead.read()\n if '\\r\\n' in RawListQ:\n ListedValues=RawListQ.split('\\r\\n')\n elif '\\n' in RawListQ:\n ListedValues=RawListQ.split('\\n')\n elif ',' in RawListQ:\n ListedValues=RawListQ.split(',')\n else:\n print('Unrecognized format on input file.')\n return\n if len(ListedValues) != 140:\n print('File must have 140 entries; entry count: '+str(len(ListedValues)))\n return\n PreNormL.append(np.array(ListedValues))\n CPreNormL=np.sum(PreNormL,0)\n return [1.*entry/float(max(CPreNormL)) for entry in CPreNormL]",
"def do_sinad_cw_sweep( self, chans=[0,1], save=False, fname='sinad.npz',\n freqarray=[]):\n \tif not freqarray: #if freqarray is empty then...\n \tfreqs = self.freqs_bw\n\telse: \n \tfreqs = freqarray\n \n\tmulti_sinad=[]\n \n\tfor chan in chans:\n \tsinad_chan= {}\n \n \tfor i in range(len(freqs)): # MHz\n \t\tfreq = freqs[i]\n \t\traw, f = self.get_snap(chan, freq) \n \t\tsinad_chan[freq] = self.calc_sinad(raw,freq)\n \n \t\tmulti_sinad.append(sinad_chan)\n \n\tif save:\t\t\n \tnp.savez(fname, sinad=multi_sinad)\n \n\treturn multi_sinad",
"def GetSpectraFromIndexList(all_wl,all_spectra,idx_list):\n NBSPEC=len(all_spectra)\n \n \n all_wl_sel=[]\n all_spectra_sel=[]\n \n for idx in np.arange(0,NBSPEC):\n if idx in idx_list:\n all_wl_sel.append(all_wl[idx])\n all_spectra_sel.append(all_spectra[idx])\n return all_wl_sel,all_spectra_sel",
"def synthesize1(sampling_speed, n_samples, cAudioFilePath):\n t = np.linspace(0, 1, num=100)\n wavetable1 = t * (t < 0.5) + (-(t - 1)) * (t>= 0.5) #triangle wave\n samples = []\n current_sample = 0\n while len(samples) < n_samples:\n current_sample += sampling_speed\n current_sample = current_sample % wavetable1.size\n samples.append(wavetable1[current_sample])\n current_sample += 1\n return np.array(samples)",
"def get_samples(snd):\n\n return [samp for samp in snd]",
"def slow_wave_features(dataset, features, where = None):\r\n slow_wave_ids = list(dataset[\"sws\"].keys())\r\n\r\n features_out = []\r\n for name in features:\r\n feature = np.array([dataset[\"sws\"][k][name] for k in slow_wave_ids])\r\n if type(feature[0]) == type(NestedDict()):\r\n raise KeyError(name)\r\n if type(where) != type(None):\r\n feature = feature[where]\r\n features_out.append(feature)\r\n return features_out",
"def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1\n arr = None\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m\n wloffset = offset.to(u.m) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr",
"def wav_reader(directory):\n wav_list = find_wavs(directory)\n res_list = []\n\n for wav in wav_list:\n temp_list = [wav]\n\n if re.match(r'.*target1.*\\.wav$', wav):\n temp_list.append(True)\n else:\n temp_list.append(False)\n\n res_list.append(tuple(temp_list))\n\n return res_list",
"def torch_calc_spectrograms(waves, window_lengths, spectral_diffs=(0, 1),\r\n window_name='hann', use_mel_scale=True,\r\n proj_method='matmul', num_spec_bins=256,\r\n random_crop=True):\r\n # waves = [tf.squeeze(w, axis=-1) for w in waves]\r\n waves = [torch.squeeze(w, dim=-1) for w in waves]\r\n\r\n if window_name == 'hann':\r\n # windows = [tf.reshape(tf.signal.hann_window(wl, periodic=False), [1, 1, -1])\r\n # for wl in window_lengths]\r\n windows = [torch.reshape(torch.from_numpy(W.hann(wl)), [1, 1, -1])\r\n for wl in window_lengths]\r\n elif window_name is None:\r\n windows = [None] * len(window_lengths)\r\n else:\r\n raise ValueError('Unknown window function (%s).' % window_name)\r\n\r\n spec_len_wave = []\r\n for d in spectral_diffs:\r\n for length, window in zip(window_lengths, windows):\r\n\r\n wave_crops = waves\r\n for _ in range(d):\r\n wave_crops = [w[:, 1:] - w[:, :-1] for w in wave_crops]\r\n\r\n if random_crop:\r\n # wave_crops = aligned_random_crop(wave_crops, length)\r\n wave_crops = torch_aligned_random_crop(wave_crops, length)\r\n\r\n # frames = [tf.signal.frame(wc, length, length // 2) for wc in wave_crops]\r\n frames = [torch.tensor(librosa.util.frame(wc.numpy(),length,length//2)) for wc in wave_crops]\r\n # TODO: Whether this method is feasible (in the gradient part) remains to be verified\r\n if window is not None:\r\n frames = [f * window for f in frames]\r\n\r\n if proj_method == 'fft':\r\n # ffts = [tf.signal.rfft(f)[:, :, 1:] for f in frames]\r\n ffts = [torch.rfft(f,signal_ndim=1)[:, :, 1:] for f in frames]\r\n elif proj_method == 'matmul':\r\n # mat = get_spectral_matrix(length, num_spec_bins=num_spec_bins,\r\n # use_mel_scale=use_mel_scale)\r\n # ffts = [matmul_real_with_complex(f, mat) for f in frames]\r\n mat = torch_get_spectral_matrix(length, num_spec_bins=num_spec_bins,\r\n use_mel_scale=use_mel_scale)\r\n ffts = [torch_matmul_real_with_complex(f, mat) for f in frames]\r\n\r\n #sq_mag = lambda x: tf.square(tf.math.real(x)) + tf.square(tf.math.imag(x))\r\n sq_mag = lambda x: (torch.view_as_real(x)[:,0])**2 + (torch.view_as_real(x)[:,1])**2\r\n # torch.view_as_real() opreation need the last release edition of Pytorch 1.6.0\r\n specs_sq = [sq_mag(f) for f in ffts]\r\n\r\n if use_mel_scale and proj_method == 'fft':\r\n sample_rate = 24000\r\n upper_edge_hertz = sample_rate / 2.\r\n lower_edge_hertz = sample_rate / length\r\n # lin_to_mel = tf.signal.linear_to_mel_weight_matrix(\r\n # num_mel_bins=num_spec_bins,\r\n # num_spectrogram_bins=length // 2 + 1,\r\n # sample_rate=sample_rate,\r\n # lower_edge_hertz=lower_edge_hertz,\r\n # upper_edge_hertz=upper_edge_hertz,\r\n # dtype=tf.dtypes.float32)[1:]\r\n # specs_sq = [tf.matmul(s, lin_to_mel) for s in specs_sq]\r\n lin_to_mel = torch_build_mel_basis(\r\n num_mel_bins=num_spec_bins,\r\n num_spectrogram_bins=length,\r\n sample_rate=sample_rate,\r\n lower_edge_hertz=lower_edge_hertz,\r\n upper_edge_hertz=upper_edge_hertz,\r\n dtype=torch.float32)\r\n # TODO: I use librosa to build the mel filters here to instead, and i'm not sure whether this method works or not\r\n specs_sq = [torch.matmul(s, lin_to_mel) for s in specs_sq]\r\n\r\n # specs = [tf.sqrt(s+EPSILON) for s in specs_sq]\r\n specs = [torch.sqrt(s+EPSILON) for s in specs_sq]\r\n\r\n spec_len_wave.append(specs)\r\n\r\n spec_wave_len = zip(*spec_len_wave)\r\n return spec_wave_len",
"def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates",
"def getWAVs():\n return getFilesFromPath(\"music/wav/\")",
"def translate(self, sequence: List[Pulse], nshots=None):\n\n # First create np arrays for each channel\n start = min(pulse.start for pulse in sequence)\n end = max(pulse.start + pulse.duration for pulse in sequence)\n time_array = np.arange(\n start * 1e-9 - self.pulse_buffer,\n end * 1e-9 + self.pulse_buffer,\n 1 / self.sample_rate,\n )\n waveform_arrays = np.zeros((self.device.num_channels, len(time_array)))\n\n for pulse in sequence:\n start_index = bisect(time_array, pulse.start * 1e-9)\n end_index = bisect(time_array, (pulse.start + pulse.duration) * 1e-9)\n i_ch, q_ch = pulse.channel\n i, q = self.generate_waveforms_from_pulse(pulse, time_array[start_index:end_index])\n waveform_arrays[i_ch, start_index:end_index] += i\n waveform_arrays[q_ch, start_index:end_index] += q\n\n return waveform_arrays",
"def match(files, fields, wavls, alt_wavls=None):\r\n\tmatching = np.zeros((len(files), 2)) # matching 2d array\r\n\r\n\tfor i in range(len(files)): # iterates over all backsub filenames\r\n\t\tfor j in range(len(fields)): # loops over all fields\r\n\t\t\tif (fields[j] or fields[j].swapcase()) in files[i]: # identifies the field\r\n\t\t\t\tmatching[i,0] = j\r\n\t\t\t\tbreak\r\n\t\tfor k in range(len(wavls)): # loops over all wavelengths\r\n\t\t\tif wavls[k] in files[i]: # identifies the wavelength\r\n\t\t\t\tmatching[i,1] = k\r\n\t\t\t\tbreak\r\n\t\t\tif alt_wavls is not None: # checks if alt wavelengths are given\r\n\t\t\t\tif alt_wavls[k] in files[i]: # identifies alt wavelength\r\n\t\t\t\t\tmatching[i,1] = k\r\n\t\t\t\t\tbreak\r\n\r\n\treturn matching",
"def _weichall():\n try:\n LAchall=LOSC('a').rchall();LBchall=LOSC('b').rchall();L2chall=LOSC('2').rchall();\n allwvfm=[*LAchall[:2],*LBchall,*L2chall];\n allenergy=[*EMeters.EG1wYFE1in(),*EMeters.EG1w2in()[0],*EMeters.EG()[0][0]]\n allweich=[]\n for ii in range(10):\n templistQ=allwvfm[ii]\n bkgrdbuffer=int(0.038*len(templistQ))\n bkgrdQ=np.mean(templistQ[:bkgrdbuffer])\n ensampQ=allenergy[ii]\n weightQ=ensampQ/np.sum(np.array(templistQ)-bkgrdQ)\n allweich.append(np.array(weightQ*(np.array(templistQ)-bkgrdQ)))\n return allweich\n except:\n print('Weighted waveform generation failed!')\n return False",
"def make_wavetables(kernel: GPy.kern.Kern, n: int = 17, waveshaping: bool = False) -> List[np.ndarray]:\n wavetables = []\n\n if not waveshaping:\n cholesky = make_cov_cholesky(kernel)\n else:\n cholesky = make_cov_cholesky_waveshaping(kernel)\n for _ in range(n):\n wavetable = fast_normal_from_cholesky(cholesky)[0]\n wavetables.append(wavetable[:-1])\n\n return wavetables",
"def select(arrays, index):\n if arrays is None or any(i is None for i in arrays):\n return arrays\n return tuple(i.ravel()[index] for i in arrays)",
"def subset(self, words): \n sub = []\n for w in words:\n try:\n sub.append(self.E.v(w))\n except:\n continue \n return np.array(sub).T",
"def dtw_list_store(source, target, source_list, target_list):\n\n dtw_source = []\n dtw_target = []\n\n fs, source = scipy.io.wavfile.read(source)\n fs, target = scipy.io.wavfile.read(target)\n\n\n #source = psf.mfcc(source, 16000)\n #target = psf.mfcc(target, 16000)\n\n source, energy = psf.fbank(source, 16000)\n target, energy = psf.fbank(target, 16000)\n\n distance, path = fastdtw(source, target, dist=euclidean)\n\n for vertex in path:\n dtw_source.append(source[vertex[0],:])\n dtw_target.append(target[vertex[1],:])\n\n dtw_source = np.array(dtw_source)\n dtw_target = np.array(dtw_target)\n\n\n source_list.append(dtw_source)\n target_list.append(dtw_target)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Downloads a FASTA file for the proteome by organism ID
|
def get_fasta_by_id(proteome_id, output_file):
taxid_pattern = re.compile('^\d{1,7}$')
# if not taxid_pattern.match(proteome_id): # fetch file from Uniprot
# raise ValueError(str(proteome_id) + ' is not a valid proteome identifier')
url = UNIPROT_BASE_URL + proteome_id
attempts = 0
while attempts < 3:
try:
response = requests.get(url)
if response.status_code > 399 or response.status_code < 200:
raise requests.HTTPError(response.status_code + ': ' + response.content)
content = response.content
if len(content) < 10:
raise FastaNotFoundError()
with open(output_file, 'w') as f:
f.write(content)
break
except requests.HTTPError as e:
attempts += 1
if attempts >= 3:
raise FastaNotFoundError('Failed to download fasta: ' + response.status_code + ' response.content')
return output_file
|
[
"def download_refseq_reference(reference_id, download_path):\n\n def mash_reference_id_to_ncbi_ftp_path(reference_id):\n \"\"\"\n Args:\n query_id (str): Mash reference ID (column 1 of mash dist report)\n Returns:\n list: Directory names used to locate reference genome\n on ftp://ftp.ncbi.nlm.nih.gov/genomes/all/\n For example:\n \"GCF/001/022/155\"\n \"\"\"\n prefix = reference_id.split('_')[0]\n digits = reference_id.split('_')[1].split('.')[0]\n path_list = [prefix] + [digits[i:i+3] for i in range(0, len(digits), 3)]\n\n return \"/\".join(path_list)\n\n ncbi_ftp_path = mash_reference_id_to_ncbi_ftp_path(reference_id)\n assembly = reference_id[:reference_id.find(\"_genomic.fna.gz\")]\n\n ncbi_ftp_server_base = \"ftp://ftp.ncbi.nlm.nih.gov\"\n fasta_url = \"/\".join([\n ncbi_ftp_server_base, \"genomes\", \"all\",\n ncbi_ftp_path,\n assembly,\n reference_id\n ])\n assembly_stat_url = \"/\".join([\n ncbi_ftp_server_base, \"genomes\", \"all\",\n ncbi_ftp_path,\n assembly,\n assembly + \"_assembly_stats.txt\"\n ])\n\n #fetch the files\n try:\n urllib.request.urlretrieve(fasta_url, \"/\".join([download_path, reference_id]))\n logger.info(\n \"file_downloaded\",\n timestamp=str(now()),\n url=fasta_url,\n )\n except Exception as e:\n logging.error(\n \"download_failed\",\n timestamp=str(now()),\n url=fasta_url,\n )\n try:\n urllib.request.urlretrieve(assembly_stat_url,\n \"/\".join([download_path, assembly + \"_assembly_stats.txt\"]))\n logger.info(\n \"file_downloaded\",\n timestamp=str(now()),\n url=assembly_stat_url,\n )\n except Exception as e:\n logging.error(\n \"download_failed\",\n timestamp=str(now()),\n url=assembly_stat_url,\n )",
"def download_proteome(proteome_id, data_dir, domain=\"Eukaryota\"):\n base = (\"ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/\"\n \"knowledgebase/reference_proteomes\")\n\n url = [base, domain, proteome_id + \".fasta.gz\"]\n outfile = os.path.join(data_dir, proteome_id + \".fasta\")\n\n with closing(request.urlopen(url)) as remote_handle:\n with open(remote_handle, \"rb\") as remote_file:\n mem_file = io.BytesIO(remote_file.read())\n\n with open(outfile, \"w\") as out, gzip.open(mem_file) as gz:\n outfile.write(gz.read())\n\n return outfile",
"def download_genomes(DF):\n\n RNA_dir = GENOME_DIR + '/rna/'\n GENOMIC_dir = GENOME_DIR + '/genomic/'\n\n if not os.path.exists(RNA_dir):\n os.makedirs(RNA_dir)\n if not os.path.exists(GENOMIC_dir):\n os.makedirs(GENOMIC_dir)\n\n for genome_link in DF['ftp']:\n prefix = genome_link.split('/')[-1]\n rna_fname = prefix + '_rna.fna.gz'\n genomic_fname = prefix + '_genomic.fna.gz' \n\n cmd_rna = 'wget' + ' ' + '-O' + ' ' + RNA_dir+rna_fname + ' ' + genome_link + '/' + rna_fname\n cmd_genomic = 'wget' + ' ' + '-O' + ' ' + GENOMIC_dir + genomic_fname + ' ' + genome_link + '/' + genomic_fname\n\n if not os.path.exists(RNA_dir + rna_fname):\n subprocess.call(cmd_rna, shell=True)\n\n if not os.path.exists(GENOMIC_dir + genomic_fname):\n subprocess.call(cmd_genomic, shell=True)",
"def download_ncbi_mapping(build):\n\n if build=='GRCh37':\n ftpsite = 'ftp://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/GRCh37.p13_interim_annotation/'\n files = [\n 'interim_GRCh37.p13_knownrefseq_alignments_2017-01-13.bam',\n 'interim_GRCh37.p13_top_level_2017-01-13.gff3.gz'\n ]\n else:\n ftpsite = 'ftp://ftp.ncbi.nlm.nih.gov/genomes/Homo_sapiens/ARCHIVE/ANNOTATION_RELEASE.108/GRCh38.p10_interim_annotation/'\n files = [\n 'interim_GRCh38.p10_knownrefseq_alignments_2017-01-13.bam',\n 'interim_GRCh38.p10_top_level_2017-01-13.gff3.gz'\n ]\n\n for fn in files:\n f = urlopen(ftpsite + fn)\n with open(fn, \"wb\") as datafile:\n datafile.write(f.read())",
"def fetch_as_fasta(chrom,start,end,gindex,fname):\n \n # Print the sequence in fasta format.\n header = '>%s:%s-%s' % (chrom, start, end)\n fname.write('%s\\n%s\\n' % (header, gindex[chrom][start:end]))",
"def fetch_uniprot_fasta(accession_id):\n base_url = \"http://www.uniprot.org/uniprot/\"\n \n fasta_url = base_url + accession_id + \".fasta\"\n \n sequence = \"\"\n \n for line in urllib.request.urlopen(fasta_url):\n text = line.decode(\"utf-8\").strip()\n \n if text.startswith(\">\"):\n fasta_id = text\n else:\n sequence += text\n \n return(fasta_id, sequence)",
"def download(dataset_csv_path='ncbi_ids.csv', save_path='../data/RefSeq'):\n\n Entrez.email = \"your_email@gmail.com\"\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n with open(dataset_csv_path, 'r') as f:\n data = csv.reader(f)\n for row in data:\n microbe_id = row[0].split('.')[0]\n if os.path.exists(os.path.join(save_path, microbe_id + '.fasta')):\n continue\n\n handle = Entrez.efetch(db=\"nucleotide\", id=microbe_id,\n rettype=\"fasta\", retmode=\"text\")\n record = SeqIO.read(handle, \"fasta\")\n handle.close()\n SeqIO.write(record, os.path.join(save_path, microbe_id + \".fasta\"),\n \"fasta\")",
"def download_genome(genome_dir, ref='hg19'):\n # TODO: find the hg19 genome???",
"def getFasta(fileGI,fileout = \"gis.fasta\", outfmt = \"fasta\"):\n myGIs = open(fileGI).read().split()\n gilist = [\",\".join(myGIs[i:i+500]) for i in range(0,len(myGIs),500)]\n from Bio import Entrez\n import time\n fout = open(fileout,\"w\")\n Entrez.email = \"ks2074@gmail.com\"\n for ele in gilist:\n handle = Entrez.efetch(db = \"protein\", id = ele, rettype = outfmt, retmode = \"text\")\n fout.write(handle.read())\n time.sleep(3)\n fout.close()",
"def ncbi_download(tool, genome_download_dir, parallel, host_taxid):\n assert (tool == \"kraken\") | (tool == \"kaiju\"),\\\n \"Argument 'tool' must be either 'kraken' or 'kaiju'.\"\n if tool == \"kraken\":\n file_format = \"fasta\"\n else:\n file_format = \"protein-fasta\"\n\n # Check directory exists\n if not os.path.exists(genome_download_dir):\n os.makedirs(genome_download_dir)\n\n ngd_command = \"ncbi-genome-download -F \" + file_format + \" -o \" + genome_download_dir\n\n if host_taxid:\n # Single host ID, so no need to set the parallel option\n taxid_ngd_command = ngd_command + \" --species-taxid \" + str(host_taxid) + \" plant\"\n subprocess.check_call(taxid_ngd_command, shell=True)\n\n ngd_command += \" --parallel \" + str(parallel) + \" viral\"\n subprocess.check_call(ngd_command, shell=True)",
"def download(org_file):\n commands.download(org_file)",
"def download_file(id_map):\n counter = 0\n for key,value in id_map.iteritems():\n path = get_path(key)\n if not os.path.exists(path):\n os.makedirs(path)\n\n for pid in value:\n counter = counter + 1\n if(counter%50==0) :\n print \"downloaded \" + str(counter) + \" files\"\n proteinFile = pid + '.pdb.gz'\n download_protein(path, proteinFile)",
"def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None",
"def fetch_sequence(sequence_id, database='uniprot'):\n if sequence_id.startswith('UPI'):\n database = 'uniparc'\n url_template = 'http://www.uniprot.org/uniparc/{}.fasta'\n elif sequence_id.startswith('UniRef'):\n database = 'uniref'\n url_template = 'http://www.uniprot.org/uniref/{}.fasta'\n else:\n database = 'uniprot'\n url_template = 'http://www.uniprot.org/uniprot/{}.fasta'\n\n url = url_template.format(sequence_id)\n logger.debug('Downloading sequence {} from {}...'.format(sequence_id, url))\n\n r = requests.get(url)\n if r.status_code != 200:\n raise Exception(\"Failed to fetch sequence with return code: {}\".format(r.status_code))\n\n seq = Bio.SeqIO.read(io.StringIO(r.text), 'fasta')\n if database == 'uniprot':\n seq.annotations['db'], seq.id, seq.name = re.split('[\\| ]', seq.id)\n return seq",
"def download_genome(self):\n downloader = GenomeDownloader(self.config.download_dir, GENE_LIST_HOST,\n self.genome_data.genome_file, self.genome_data.genomename,\n update_progress=self.update_progress)\n downloader.download()",
"def download_assembly(assembly_name, assembly_dirpath):\r\n\ttries = 1\r\n\r\n\tplant_name = plant_assembly_name(assembly_name)\r\n\trefseq_path = get_download_url_from_refseq_genomes(assembly_name)\r\n\r\n\tif refseq_path is not None:\r\n\t\turl_for_assembly_file = refseq_path\r\n\telif plant_name is None:\r\n\t\turl_for_assembly_file = get_download_url_from_ucsc(assembly_name)\r\n\telse:\r\n\t\turl_for_assembly_file = get_download_url_from_ensemble_plants(plant_name, assembly_dirpath)\r\n\r\n\tfile_suffix = re.search(\"\\.((fa.*)|(2bit)|(fna.*))$\", url_for_assembly_file.split(SEP)[-1]).group()\r\n\tfile_suffix = re.sub(\"fna\", \"fa\", file_suffix)\r\n\tfa_file = assembly_dirpath + assembly_name + \".fa\"\r\n\tdownloaded_file = assembly_dirpath + assembly_name\r\n\tmax_tries = 10\r\n\twhile not os.path.exists(downloaded_file + file_suffix) and tries <= max_tries:\r\n\t\ttry:\r\n\t\t\turllib.request.urlretrieve(url_for_assembly_file, downloaded_file + file_suffix)\r\n\t\texcept Exception as err:\r\n\t\t\tif url_for_assembly_file.endswith(\"fa.gz\"):\r\n\t\t\t\tfile_suffix = \".2bit\"\r\n\t\t\t\ttry:\r\n\t\t\t\t\turllib.request.urlretrieve(url_for_assembly_file[:-6] + file_suffix, downloaded_file + file_suffix)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tfile_suffix = \".fa.gz\"\r\n\t\t\t\t\ttime.sleep(60)\r\n\t\t\t\t\ttries += 1\r\n\r\n\tif not os.path.exists(downloaded_file + file_suffix): # loop has ended with no results\r\n\t\tdownload_error(assembly_dirpath)\r\n\r\n\ttry:\r\n\t\tif file_suffix == \".2bit\":\r\n\t\t\t# transform twobit to fa.gz\r\n\t\t\tos.system(TWOBITTOFA_EXE + \" \" + downloaded_file + file_suffix + \" \" + fa_file)\r\n\t\t\tos.remove(downloaded_file + \".2bit\")\r\n\t\telif file_suffix == \".fa.gz\" or file_suffix == \".fna.gz\":\r\n\t\t\t# unzip gzip (to rezip with bgzip)\r\n\t\t\tos.system(\"gunzip \" + downloaded_file + file_suffix)\r\n\r\n\texcept:\r\n\t\tset_error_message(\"Failed to extract assembly fasta file.\")\r\n\t\traise RuntimeError(\"Failed to extract assembly fasta file.\")\r\n\r\n\treturn fa_file #returns the path of the downloaded fa file\r",
"def get_protein_fasta(uniprot_id):\r\n url = \"http://www.uniprot.org/uniprot/{}.fasta\".format(uniprot_id)\r\n string = re.split(\"\\n\",ur.urlopen(url).read().decode(),1)[1]\r\n return re.sub(\"\\n\",\"\",string)",
"def fetch_genome(reference_name):\n from utils import script_dir\n genome_list = yaml.load(open(script_dir + \"/utils/genomes.yaml\",\"r\"))\n makedir(\"genomes\")\n if reference_name not in genome_list:\n msg(\"Reference Genome not available\", \"error\")\n ftp_loc = genome_list[reference_name]\n filename = os.path.split(ftp_loc)[1]\n makedir(\"{script_dir}/genomes/{reference_name}\".format(**locals()))\n reference_loc = \"{script_dir}/genomes/{reference_name}/{filename}\".format(**locals())\n if not file_exists( reference_loc + \".sa\"):\n print(\"Downloading {filename}\".format(**locals()))\n os.system(\"curl {ftp_loc} > {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n # Unzip and rezip with bgzip\n if filename.endswith(\".gz\"):\n os.system(\"gunzip {reference_loc} && bgzip {reference_loc2}\".format(reference_loc=reference_loc, reference_loc2=reference_loc.replace(\".gz\",\"\")))\n print(\"Indexing {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n os.system(\"bwa index {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n else:\n msg(\"Reference Already downloaded and indexed.\", \"error\")",
"def get_assemblies(term, download=True, path='assemblies'):\n\n from Bio import Entrez\n #provide your own mail here\n Entrez.email = \"A.N.Other@example.com\"\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax='200')\n record = Entrez.read(handle)\n ids = record['IdList']\n print (f'found {len(ids)} ids')\n links = []\n for id in ids:\n #get summary\n summary = get_assembly_summary(id)\n #get ftp link\n url = summary['DocumentSummarySet']['DocumentSummary'][0]['FtpPath_RefSeq']\n if url == '':\n continue\n label = os.path.basename(url)\n #get the fasta link - change this to get other formats\n link = os.path.join(url,label+'_genomic.fna.gz')\n print (link)\n links.append(link)\n if download == True:\n #download link\n urllib.request.urlretrieve(link, f'{label}.fna.gz')\n return links"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gapfill a model using probabilistic weights
|
def probabilistic_gapfill(model, universal_model, reaction_probabilities, clean_exchange_rxns=True, default_penalties=None, dm_rxns=False, ex_rxns=False, **solver_parameters):
universal_model = universal_model.copy()
model = clean_exchange_reactions(model) if clean_exchange_rxns else model.copy()
if default_penalties is None:
default_penalties = {'Universal': 1, 'Exchange': 100, 'Demand': 1, 'Reverse': 75}
penalties = default_penalties
reactions_to_remove = []
for r in universal_model.reactions:
if model.reactions.has_id(r.id):
reactions_to_remove.append(r)
penalties[r.id] = 0 # In the model
elif r.id in reaction_probabilities:
penalties[r.id] = max(0, 1 - reaction_probabilities[r.id]) * (penalties[r.id] if r.id in penalties else 1)
universal_model.remove_reactions(reactions_to_remove)
return cobra.flux_analysis.gapfill(model, universal_model, penalties=penalties, demand_reactions=dm_rxns, exchange_reactions=ex_rxns, **solver_parameters)
|
[
"def gap2d(_w_in):\n return nn.AdaptiveAvgPool2d((1, 1))",
"def gap2d(w_in):\n return nn.AdaptiveAvgPool2d((1,1))",
"def xgboost_weight(x, y, subprob_num, iter_num):\n data = x\n d_size = data.shape\n vim = np.zeros((d_size[1], subprob_num)).tolist() # vim: weights of Regulatory network\n\n for i in range(0, d_size[1]):\n print(\"----------------------------------------------------------------\", i,\n \"----------------------------------------------------------------\")\n y1 = y[:, i].reshape(d_size[0], 1)\n if i == 0:\n x = data[:, 1:subprob_num]\n elif i < subprob_num:\n x = np.hstack((data[:, 0:i], data[:, i + 1:subprob_num]))\n else:\n x = data[:, 0:subprob_num]\n\n # Build model\n params = {\n 'booster': 'gbtree',\n 'gamma': 0.2,\n 'max_depth': 4,\n 'min_child_weight': 4,\n 'alpha': 0,\n 'lambda': 0,\n 'subsample': 0.7,\n 'colsample_bytree': 0.9,\n 'silent': 1,\n 'eta': 0.0008\n }\n\n dtrain = xgb.DMatrix(x, y1)\n plst = params.items()\n model = xgb.train(plst, dtrain, iter_num)\n\n # Compute and sort feature importance\n importance = model.get_fscore()\n importance = sorted(importance.items(), key=operator.itemgetter(1), reverse=True)\n\n # Convert the importance list to matrix weights\n for j in range(0, len(importance)):\n num = re.findall(r'\\d+', importance[j][0])\n num = np.array(num)\n num = np.core.defchararray.strip(num, '()')\n num = int(num)\n if i >= subprob_num - 1:\n fea_num = num\n else:\n if num < i:\n fea_num = num\n else:\n fea_num = num + 1\n vim[i][fea_num] = importance[j][1]\n\n return vim",
"def fill_gaps(model):\r\n #first dilate then erode\r\n layer = res.resample_layer(model[-1])\r\n layer = res.dilate(model[-1])\r\n layer = res.erode(layer)\r\n #layers = res.rebrick(layer)\r\n \r\n #model[-1] = copy.deepcopy(layers[choice])\r\n return layer\r\n #return layers\r",
"def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)",
"def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)",
"def rebalance_weightings(context):\r\n total_ratio = 0\r\n log.info(\"*******Rebalancing weightings********\")\r\n print(context.up_ratios)\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n total_ratio += ratio\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n context.max_weights[asset] = ratio/total_ratio\r\n \r\n log.info(context.max_weights)",
"def balance_training_weight(w, y):\n sample_weight = w.copy()\n neg_mask = (y == 0)\n pos_mask = (y == 1)\n \n bkg_sum_weight = np.sum(sample_weight[neg_mask])\n sig_sum_weight = np.sum(sample_weight[pos_mask])\n\n sample_weight[pos_mask] = sample_weight[pos_mask] / sig_sum_weight\n sample_weight[neg_mask] = sample_weight[neg_mask] / bkg_sum_weight\n return sample_weight",
"def fillGap(self, X, y, T, knn):\n knnobj = neighbors.KNeighborsRegressor(knn)\n return knnobj.fit(X, y).predict(T)",
"def bias_prior(self):",
"def _update_model(self, w_grads, b_grads):\n for w, b, w_grad, b_grad in zip(self.weights, self.biases, w_grads, b_grads):\n w -= self.learning_rate * w_grad\n b -= self.learning_rate * b_grad",
"def update_weights(self, weights, g):\n for i in range(len(weights)):\n for param in weights[i].keys():\n \n ### update weights\n # weights[i][param] = None\n \n # ----------------\n # your code here\n weights[i][param] += self.step_size * g[i][param]\n \n # ----------------\n \n return weights",
"def test_damping():\n kernel = ks.kernel.Constant(1.0)\n model = ks.BinaryModel()\n for x in [\"A\", \"B\", \"C\", \"D\"]:\n model.add_item(x, kernel=kernel)\n model.observe(winners=[\"C\", \"D\"], losers=[\"A\", \"B\"], t=0.0)\n model.observe(winners=[\"A\", \"B\"], losers=[\"C\", \"D\"], t=0.0)\n model.observe(winners=[\"A\", \"B\"], losers=[\"C\", \"D\"], t=0.0)\n # Without damping, this simple example diverges.\n assert not model.fit(max_iter=20)\n # However, a little bit of damping is enough to make it converge.\n assert model.fit(max_iter=20, lr=0.8)",
"def train_gradient_boost(self, params, num_boost_round = 50):\n print \"training GB......\"\n dtrain = xgb.DMatrix(self.X, self.y)\n model = xgb.train(params, dtrain, num_boost_round = num_boost_round)\n self.models += [model]",
"def test_weightsDontChangeWithPredict(self):\r\n # AND the it's weights\r\n before_weights = self.per.weights[:]\r\n\r\n # AND a list of attributes\r\n attr_vec = [0.5] * input_length\r\n\r\n #\r\n # WHEN predicting the class for the list of attributes\r\n self.per.predict(attr_vec)\r\n\r\n #\r\n # THEN the weights do not change\r\n after_weights = self.per.weights[:]\r\n self.assertEqual(before_weights, after_weights, 'The weights should not change after predicting a class')",
"def transfer_weights(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)",
"def adjustweight(lr_weight=1.0):\n\n return 1.0 / lr_weight",
"def get_class_imbalance_weights_classification(training_df, params):\n predictions_array = (\n training_df[training_df.columns[params[\"headers\"][\"predictionHeaders\"]]]\n .to_numpy()\n .ravel()\n )\n class_count = np.bincount(predictions_array)\n classes_to_predict = np.unique(predictions_array)\n total_count = len(training_df)\n penalty_dict, weight_dict = {}, {}\n\n # for the classes that are present in the training set, construct the weights as needed\n for i in classes_to_predict:\n weight_dict[i] = (class_count[i] + sys.float_info.epsilon) / total_count\n penalty_dict[i] = (1 + sys.float_info.epsilon) / weight_dict[i]\n\n # this is a corner case\n # for the classes that are requested for training but aren't present in the training set, assign largest possible penalty\n for i in params[\"model\"][\"class_list\"]:\n i = int(i)\n if i not in weight_dict:\n print(\n \"WARNING: A class was found in 'class_list' that was not present in the training data, please re-check training data labels\"\n )\n weight_dict[i] = sys.float_info.epsilon\n penalty_dict[i] = (1 + sys.float_info.epsilon) / weight_dict[i]\n\n # ensure sum of penalties is always 1\n penalty_sum = (\n np.fromiter(penalty_dict.values(), dtype=np.float64).sum()\n + sys.float_info.epsilon\n )\n for i in range(params[\"model\"][\"num_classes\"]):\n penalty_dict[i] /= penalty_sum\n\n return penalty_dict, weight_dict",
"def _construct_gpy_model(self) -> GPy.models.GPRegression:\n kernel_func = self.model_settings.get_kernel()\n # This line used to be tried twice, with all exceptions being caught from the first try.\n # Removed, to test the hypothesis that it's no longer necessary.\n kern = kernel_func(\n self.train.ndims, ARD=self.model_settings.anisotropic_lengthscale\n ) # type: ignore # not callable\n\n if self.model_settings.add_bias: # pragma: no cover\n bias_kern = GPy.kern.Bias(self.train.ndims)\n kern = kern + bias_kern\n\n # The model produces -1 * observations, so that Bayes-Opt can minimize it\n gpr = GPy.models.GPRegression(self._x_train, -self._y_train[:, np.newaxis], kern)\n\n # Set the priors for hyperparameters (if specified in the config):\n for param_name, prior_config in self.model_settings.priors.items(): # pragma: no cover\n if param_name not in gpr.parameter_names():\n raise ValueError( # pragma: no cover\n f\"No such hyperparameter {param_name} to set a prior for. Hyperparameters present in the model \"\n f\"are:\\n{gpr.parameter_names()}\"\n )\n prior = prior_config.get_prior()\n gpr[param_name].set_prior(prior) # type: ignore # set_prior missing for ParamConcatenation\n logging.info(f\"- Prior on {param_name}: {prior}\")\n\n # Fix the hyperparameters as specified in config\n if self.model_settings.fixed_hyperparameters:\n for hyperparam_name, value in self.model_settings.fixed_hyperparameters.items():\n if hyperparam_name not in gpr.parameter_names():\n raise ValueError( # pragma: no cover\n f\"No such hyperparameter {hyperparam_name} to fix in the model. Hyperparameters present are:\"\n f\"\\n{gpr.parameter_names()}\"\n )\n logging.info(f\"- Fixing {hyperparam_name} as {value}\")\n gpr[hyperparam_name] = value\n gpr[hyperparam_name].fix()\n return gpr"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Exports the given reaction probabilities into a JSON formatted file, saved at filename
|
def export_json(rxn_probs, filename):
with open(filename, 'w') as f:
f.write(json.dumps(rxn_probs))
return filename
|
[
"def save(statistic_entries):\n with open('learn.json', 'w') as file:\n json.dump(statistic_entries, file, indent=2)",
"def save_outcomes():\n all_data = gen_outcomes()\n with open(\"result.json\", \"w\", encoding='utf-8') as jsonfile:\n json.dump(all_data, jsonfile, ensure_ascii=False)",
"def to_json(self, filename):\n with open(filename, 'w') as file:\n file.write(json.dumps({\"scores\": self.serialize()}))",
"def class2json(classifier, filename = \"classifier\"):\n model_json = classifier.to_json()\n with open(filename + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n # Serialize weights to HDF5\n classifier.save_weights(filename + \".h5\")\n print(\"Successfully saved the classifier to file \" + filename + \".\")",
"def output(self, filename):\n with open(filename, 'w') as f:\n op = {}\n layer_res = []\n alphas_res = []\n for layer in self._layers:\n weights = []\n alphas = []\n for neuron in layer._neurons:\n weights.append(neuron._weights)\n alphas.append(neuron._alpha)\n layer_res.append(weights)\n alphas_res.append(alphas)\n op['layers'] = layer_res\n op['alphas'] = alphas_res\n json.dump(op, f, indent='\\t')",
"def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))",
"def writeJSON(filename):\n if not filename.endswith('.json'):\n filename += '.json'\n with open(filename, 'w') as f:\n for x in range(numRows):\n scores = quizScores()\n types = getTypes(scores)\n row = { 'id': x,\n 'challenger': types[0], 'collaborator': types[1],\n 'communicator': types[2], 'contributor': types[3],\n 'q1': scores[0], 'q2': scores[1], 'q3': scores[2],\n 'q4': scores[3], 'q5': scores[4], 'q6': scores[5],\n 'q7': scores[6], 'q8': scores[7], 'q9': scores[8],\n 'q10': scores[9], 'q11': scores[10], 'q12': scores[11],\n 'q13': scores[12], 'q14': scores[13], 'q15': scores[14],\n 'q16': scores[15], 'q17': scores[16], 'q18': scores[17]\n }\n json.dump(row, f, sort_keys=True)",
"def export_json(filename, pokemon_data):\n with open(filename, \"w\") as file:\n file.write(json.dumps(pokemon_data))",
"def save(self, filename):\n import json\n\n json = json.dumps(self.joint_limits)\n with open(filename, 'w') as f:\n f.write(json)",
"def writeToFile(hits, fname):\n with open(fname, \"w\") as f:\n f.write(json.dumps(hits))",
"def outputEstimatorToFile(self, file_name):\n json_dict = {\n 'click_model': self.click_model.getModelJson(),\n 'IPW_list': self.IPW_list\n }\n with open(file_name, 'w') as fout:\n fout.write(json.dumps(json_dict, indent=4, sort_keys=True))\n return",
"def write_predictions(all_predictions, output_prediction_file):\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")",
"def outputEstimatorToFile(self, file_name):\n json_dict = {\n 'click_model': self.click_model.getModelJson(),\n }\n with open(file_name, 'w') as fout:\n fout.write(json.dumps(json_dict, indent=4, sort_keys=True))\n return",
"def write_reaction_output_file(self, path, delimiter='\\t'):\n path = utils.prevent_overwrite(path)\n with open(path, 'w') as outfile:\n outfile.write('ID\\tName\\tID equation\\tSMILES equation\\tRxn hash\\t'\n 'Reaction rules\\n')\n for rxn in sorted(self.reactions.values(), key=lambda x: x['ID']):\n outfile.write(delimiter.join([rxn['ID'], '', rxn['ID_rxn'],\n rxn[\"SMILES_rxn\"], rxn['_id'],\n ';'.join(rxn['Reaction_rules'])])\n + '\\n')",
"def __write_csv(self, prediction_probs, n, filename):\n d = {'Id': pd.Series([i for i in xrange(1, n + 1)]),\n 'Action': pd.Series(prediction_probs)}\n df = pd.DataFrame(d)\n df = df[['Id', 'Action']]\n df.to_csv(filename, sep=',', encoding='utf-8',\n index=False)",
"def save_highscores(self, contents):\n\t\ttry:\n\t\t\twith open(self.filename, 'w') as f_obj:\n\t\t\t\tf_obj.write(json.dumps(contents)) #save as json\n\t\texcept FileNotFoundError:\n\t\t\tprint('File for highscores not found! Call 016 741 6243 for assistance.')",
"def save_all_rates(filename):\n\tdata=output_rates()\n\twith open(filename, 'w') as outfile:\n\t\tjson.dump(data, outfile)",
"def save_priors(name, prior_dict):\n with open(name + \"_priors.json\", \"w\") as fp:\n json.dump(prior_dict, fp)",
"def json_output(result, json_file=os.path.join(os.path.abspath(os.path.dirname(__file__)),'OMDb_Ratings.txt')):\n with open(json_file, 'w') as f:\n json.dump(result, f, indent=4)\n print('Query output created at {}'.format(json_file))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return the probability of a given reaction
|
def get_probability(self, reaction):
return self.__getitem__(reaction)
|
[
"def probability(self):\n\t\ta0 = 0\n\t\ta = []\n\t\tfor reaction in self.reactions:\n\t\t\tfor colony in reaction.colonies:\n\t\t\t\tname = colony.name\n\t\t\t\ttry:\n\t\t\t\t\tnum = reaction.reactants[name]\n\t\t\t\t\tprob = choose(colony.size, num)*reaction.rate\n\t\t\t\t\ta.append(prob)\n\t\t\t\t\ta0 += prob\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\treturn a0, a",
"def get_action_probability(self, read_probability=1.0):\n\n action_probability = [0.0, 0.0]\n total_positive_accumulative_regret = 0.0\n\n # Compute the sum of all positive regret\n for i in range(self.ACTION_NUMBER):\n if self.accumulative_regret_array[i] > 0:\n action_probability[i] = self.accumulative_regret_array[i]\n else:\n action_probability[i] = 0.0\n\n total_positive_accumulative_regret += action_probability[i]\n\n # Divide by the sum of all positive regret\n for i in range(self.ACTION_NUMBER):\n if total_positive_accumulative_regret > 0:\n action_probability[i] = action_probability[\n i] / total_positive_accumulative_regret\n else:\n action_probability[i] = 1.0 / self.ACTION_NUMBER\n\n # Accumulate the action probability with read probability as weight, which is used for getting final action probability\n for i in range(self.ACTION_NUMBER):\n self.accumulative_action_probability_array[i] += action_probability[\n i] * read_probability\n\n return action_probability",
"def probability(self, item):\n count = self.counter.get(item, 0)\n if self.smoothing_dict:\n smooth_count = self.smoothing_dict.get(count, count)\n assert smooth_count > 0\n return smooth_count / self.smooth_total\n else:\n return count / self.total",
"def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))",
"def calculate_probability(self):\n return 0",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs",
"def get_response_probability(self, ind):\n return self.rp_t[ind]",
"def get_response_probability(self, ind):\n pass",
"def get_probability_score(self):\r\n return self.probability_score",
"def probability_of(self, conditions):\n return self.distribution.probability_of(conditions)",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)",
"def get_probability(self, word: Word):\n if len(word) == 0:\n return 0.0\n\n _check_is_legal_word(word, self.alphabet_size)\n result = 1.0\n current_state = self.initial_state\n for character in word:\n if current_state is None:\n return 0.0\n\n next_state, probability = self.transition_dict.get(current_state, {}).get(\n character, (None, 0.0)\n )\n current_state = next_state\n result *= probability\n\n return 0.0 if current_state != self.final_state else result",
"def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]",
"def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm",
"def _pick_next_reaction(net, r0):\n\n propensities = []\n for reaction in net.reactions:\n try:\n div_result = reaction.rate(net.species) / r0\n except ZeroDivisionError:\n div_result = reaction.rate(net.species) / 1\n propensities.append(div_result)\n\n random_reaction = GillespieSimulator._pick_weighted_random(net.reactions, propensities)\n return random_reaction.change_vector(net.species)",
"def calcProbability(self, look, hit, nohit, hit_count, denominator):\n\n try:\n combination = calcCombination(look, hit_count)\n hit_permutation = calcPermutation(hit, hit_count)\n nohit_permutation = calcPermutation(nohit, look - hit_count)\n probability = combination * hit_permutation * nohit_permutation / denominator\n except PermutationError:\n raise ProbabilityError(\"Permutation\")\n except CombinationError:\n raise ProbabilityError(\"Combination\")\n except ZeroDivisionError:\n raise ProbabilityError(\"denominator\")\n return probability",
"def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):\n assert step_rewards is not None\n probabilities = torch.exp(torch.tensor(step_rewards, dtype=self.dtype))\n probabilities = probabilities / torch.sum(probabilities)\n\n if action is not None:\n return probabilities[action]\n else:\n return probabilities",
"def simulation_probability(probability,\n initial_money,\n fortune,\n number_of_simulations=1000):\n number_of_successes = 0\n number_of_ruins = 0\n for t in range(number_of_simulations):\n outcome = gamblers_ruin_outcome(probability,\n initial_money=initial_money,\n fortune=fortune)\n if outcome == 1:\n number_of_successes += 1\n elif outcome == 0:\n number_of_ruins += 1\n return number_of_successes/number_of_simulations",
"def ProbCorrect(efficacy, difficulty, a=1):\n return 1 / (1 + math.exp(-a * (efficacy - difficulty)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deserialize a ReactionProbabilities from a JSON file
|
def from_json_file(path):
with open(path, 'r') as f:
return ReactionProbabilities.from_json(f.read())
|
[
"def load_priors(file_name):\n with open(file_name, \"r\") as fp:\n priors = json.load(fp)\n return priors",
"def read_classification_json(fn):\n with open(fn) as f:\n classification_data = json.load(f)\n f.close()\n \n return classification_data",
"def load_priors(self, json_file):\n\n with open(json_file, 'r') as jf:\n self.priors_dict = json.load(jf)",
"def load_reseq_conditions_from(json_file_or_dict):\n\n # refactor that common useage from TC io\n if isinstance(json_file_or_dict, dict):\n d = json_file_or_dict\n else:\n with open(json_file_or_dict, 'r') as f:\n d = json.loads(f.read())\n\n return ReseqConditions.from_dict(d)",
"def MDPfromJson (filename) :\n with open (filename, 'r') as fd :\n dct = json.loads(fd.read())\n\n s0 = int(dct['s0'])\n S = int(dct['S'])\n A = int(dct['A'])\n T = np.array(dct['T'])\n gamma = float(dct['gamma'])\n R = np.array(dct['R'])\n return MDP(s0, S, A, T, gamma, R, 20)",
"def from_json(cls, filepath):\n\n with open(filepath) as file:\n modifier_data = json.load(file)\n item_data = []\n for data in modifier_data[\"item_data\"]:\n item_data.append(ConTextItem.from_dict(data))\n return item_data",
"def read_proto(filename):\n results = results_pb2.ImprovementResults()\n with open(filename, 'rb') as f:\n results.ParseFromString(f.read())\n return results",
"def from_json(cls, fname):\n d = read_json(fname)\n return cls.from_dict(d)",
"def parse_creative_serving_decision(data):\n return json.loads(base64.b64decode(data))",
"def from_json(self, filename):\n if filename.split('.')[1] != 'json':\n raise TypeError(\"Only json files accepted\")\n with open(filename, 'r') as file:\n data = json.load(file)\n for each in data[\"scores\"]:\n self.add_score(Score(each[\"name\"], each[\"score\"]))",
"def json2class(filename = \"classifier\"):\n json_file = open(filename + \".json\", 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # Load weights into new model\n loaded_model.load_weights(filename + \".h5\")\n print(\"Loaded model from disk.\")\n return loaded_model",
"def prob1(filename=\"nyc_traffic.json\"):\n with open(filename, 'r') as f:\n traffic_data = json.load(f)\n reasons = dict()\n for incident in traffic_data:\n for key in incident.keys():\n if 'contributing_factor' in key:\n if key in reasons.keys():\n reasons[key] += 1\n else:\n reasons[key] = 0\n '''\n for i in range(1,6):\n factor = 'contributing_factor_vehicle_' + str(i)\n try:\n reason = incident[factor]\n except KeyError as e:\n pass\n else:\n if reason in reasons.keys():\n reasons[reason] += 1\n else:\n reasons[reason] = 0\n '''\n reasons = sorted(list(reasons.items()), key = lambda x: x[1], reverse = True)\n graphed_reasons = reasons[:7]\n labels = []\n number = []\n for r in graphed_reasons:\n labels.append(r[0].replace(' ', '\\n').replace('/','/\\n'))\n number.append(r[1])\n x = np.arange(len(labels))\n plt.figure(figsize = (10,7))\n plt.barh(x, number)\n plt.yticks(x, labels, fontsize = 6)\n plt.show()",
"def load_rentals_file(filename):\n with open(filename) as file:\n try:\n data = json.load(file)\n except FileNotFoundError:\n LOGGER.error(\"Missing file %s\", filename)\n exit(1)\n return data",
"def parsing(cls, file_path, decoder):\n data = []\n with open(file_path) as f:\n for line in f:\n try:\n obj = json.loads(line, cls=decoder)\n except Exception as e:\n raise Exception('error in line %s' % line + e.message)\n data.append(obj)\n return data",
"def file_to_dicts(self, file: str) -> [dict]:\n dicts = read_dpr_json(file, max_samples=self.max_samples, num_hard_negatives=self.num_hard_negatives, num_positives=self.num_positives, shuffle_negatives=self.shuffle_negatives, shuffle_positives=self.shuffle_positives)\n\n # shuffle dicts to make sure that similar positive passages do not end up in one batch\n dicts = random.sample(dicts, len(dicts))\n return dicts",
"def read_film_file(filename):\n with open(filename, 'r') as fp:\n return json_load_byteified(fp)",
"def load_personality_adj():\n return json.load(open(personality_adj()))",
"def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))",
"def load (cls, file):\n with open(file) as f:\n raw = json.load(f)\n obj = PasswordSetCharacteristics()\n obj.lengths = cls.to_num_dict(raw['lengths'])\n obj.lower_counts = cls.to_num_dict(raw['lowerCounts'])\n obj.upper_counts = cls.to_num_dict(raw['upperCounts'])\n obj.digit_counts = cls.to_num_dict(raw['digitCounts'])\n obj.symbol_counts = cls.to_num_dict(raw['symbolCounts'])\n obj.class_counts = cls.to_num_dict(raw['classCounts'])\n obj.word_counts = cls.to_num_dict(raw['wordCounts'])\n return obj"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates the Reaction Probabilities
|
def update(self, rxn_probs):
pass
|
[
"def update_probabilities(self):\n self.probabilities = self.pheromones**self.EXP_PH * self.mcv**self.EXP_MCV",
"def update(probabilities, one_gene, two_genes, have_trait, p):\n for person in probabilities:\n person_gene = get_gene(person, one_gene, two_genes)\n person_trait = get_trait(person, have_trait)\n old_p = probabilities[person][\"gene\"][person_gene]\n probabilities[person][\"gene\"][person_gene] = p + old_p\n old_p = probabilities[person][\"trait\"][person_trait]\n probabilities[person][\"trait\"][person_trait] = p + old_p",
"def update_policy(elite_states, elite_actions):\n\n new_policy = np.zeros([n_states, n_actions])\n \n i_size_list = len(elite_states)\n\n # <Your code here: update probabilities for actions given elite states & actions >\n \n # for each element of states and actions vector\n for i_temp_counter in range(0,i_size_list):\n \n # get the corresponding state and action\n temp_state = elite_states[i_temp_counter]\n temp_action = elite_actions[i_temp_counter]\n \n # for ith state, jth action\n # record the fact that these occurred (co-occurred)\n # this is now the policy[i,j]\n # add 1 more to the count in this entry\n new_policy[temp_state, temp_action] = new_policy[temp_state, temp_action]\n \n\n # for each element of policy matrix\n # turn it into a probability\n # normnalize by row sum counts frequencies\n for i_temp_counter in range(0,n_states):\n for j_temp_counter in range(0, n_actions):\n \n if new_policy[i_temp_counter, j_temp_counter] > 0:\n temp_row_sum = sum(new_policy[i_temp_counter,0:])\n \n print(temp_row_sum)\n \n new_policy[i_temp_counter, j_temp_counter] = new_policy[i_temp_counter, j_temp_counter]/temp_row_sum\n \n \n \n # Don't forget to set 1/n_actions for all actions in unvisited states.\n # Now you have counts/frequencies in new_policy\n # Make these probabilities\n \n # for each element of policy matrix\n for i_temp_counter in range(0,n_states):\n for j_temp_counter in range(0, n_actions):\n \n # if the element of this matrix is 0 then make 1/n_actions\n if new_policy[i_temp_counter, j_temp_counter] == 0:\n new_policy[i_temp_counter, j_temp_counter] = 1/n_actions\n \n\n \n \n return new_policy",
"def update(probabilities, one_gene, two_genes, have_trait, p):\n for person in probabilities:\n\n # count the genes for the person\n geneCount = 0\n if person in one_gene:\n geneCount = 1\n elif person in two_genes:\n geneCount = 2\n\n # check the trait\n hasTrait = False\n if person in have_trait:\n hasTrait = True\n\n # update the probabilities\n probabilities[person][\"gene\"][geneCount] += p\n probabilities[person][\"trait\"][hasTrait] += p",
"def update_probs(self, measure, p, enemy_net = False):\n tmp_net = []\n net_size = len(self.net) \n if not enemy_net:\n net = self.net\n else:\n net = self.enemy_net\n #Maps a given color to its corresponding column in the color's \n #probability table.\n if measure == GREEN:\n color = 0\n elif measure == YELLOW:\n color = 1\n elif measure == ORANGE:\n color = 2\n elif measure == RED:\n color = 3\n #Obtains new probabilities by using the distance between the\n #observed position (the one measured) and any other position.\n for j in range(0, net_size):\n distance = self.__get_distance(p, j)\n if distance == 0: #When updating the measured position's probability.\n tmp_net.append(net[j].value * self.ct[0][color])\n elif distance == 1: #When updating an adjacent position to the one measured.\n tmp_net.append(net[j].value * self.ct[1][color])\n elif distance == 2: #When updating a position at two cells from the one measured.\n tmp_net.append(net[j].value * self.ct[2][color])\n elif distance == 3: #When updating a position at three cells from the one measured.\n tmp_net.append(net[j].value * self.ct[3][color])\n else: #When updating a position at four or more cells from the one measured.\n tmp_net.append(net[j].value * self.ct[4][color])\n #Obtains summation of new probabilities in order to execute \n #a posterior normalization.\n total = sum(tmp_net)\n #Normalizes new probabilities and assigns them to its \n #corresponding position.\n for i in range(0, net_size):\n net[i].value = tmp_net[i]/total",
"def modify_rates(self):\n if self.modified:\n print 'Already Modified Probabilities'\n elif self.varGiven:\n print 'You must enter the conditional coalescent probabilties if you want to supply variance of'\n print 'the coalescent probabilities. Required since we cannot compute the variance of the conditionals'\n print 'given the variance of the marginals. Assuming that you gave the conditional probs.'\n else:\n testrates = self.obsRates.copy()\n tratesum = testrates.cumsum(1)\n nocoal = 1 - tratesum\n nocoal = nocoal[:, :-1]\n nocoal = np.hstack((np.ones((np.shape(nocoal)[0], 1)), nocoal))\n testrates = testrates.getA() / (nocoal.getA() + 1e-200)\n self.modified = True\n self.obsRates = np.matrix(np.max([np.min([testrates, np.ones(np.shape(testrates))], 0), np.zeros(np.shape(testrates))], 0))",
"def mutate(self, probability, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)",
"def update_critic(self, rewards, new_states, old_states, done):\n rewards = torch.tensor(rewards) #.to(self.device)\n #print(\"rewards.shape \", rewards.shape)\n # Predictions\n V_pred = self.critic(old_states).squeeze()\n #print(\"V_pred.shape \", V_pred.shape)\n # Targets\n V_trg = self.critic(new_states).squeeze().detach()\n #print(\"V_trg.shape \", V_trg.shape)\n V_trg = (1-done)*self.gamma*V_trg + rewards\n #print(\"V_trg.shape \", V_trg.shape)\n # MSE loss\n loss = torch.sum((V_pred - V_trg)**2)\n # backprop and update\n self.critic_optim.zero_grad()\n loss.backward()\n self.critic_optim.step()\n return",
"def update(probabilities, one_gene, two_genes, have_trait, p):\n for person in probabilities:\n if person in one_gene:\n probabilities[person][\"gene\"][1] += p\n if person in two_genes:\n probabilities[person][\"gene\"][2] += p\n if person in have_trait:\n probabilities[person][\"trait\"][True] += p\n else:\n probabilities[person][\"trait\"][False] += p\n \n #raise NotImplementedError",
"def update(probabilities, one_gene, two_genes, have_trait, p):\n\n for person, info in probabilities.items():\n\n # find the number of genes for person\n num_of_genes = how_many_genes(person, one_gene, two_genes)\n\n # Find the trait of the person\n if person in have_trait:\n trait = True\n else:\n trait = False\n \n # update joint probability of gene with new joint probability p\n probabilities[person][\"gene\"][num_of_genes] += p\n\n # update joint probability of trait with new joint probability p\n probabilities[person][\"trait\"][trait] += p",
"def modify_environment(self, P_c = P_C):\n for i in range(0, N_ACTS):\n if (self.random.random() <= P_c):\n self.acts[i] = self.randpayoff()\n self.stat_act_updates += 1 # Keep track of act update statistics",
"def update_mdp_transition_counts_reward_counts(\n mdp_data, state, action, new_state, reward\n):\n\n # *** START CODE HERE ***\n mdp_data[\"transition_counts\"][state, new_state, action] += 1\n\n if reward == -1:\n mdp_data[\"reward_counts\"][new_state, 0] += 1\n\n mdp_data[\"reward_counts\"][new_state, 1] += 1\n # *** END CODE HERE ***\n\n # This function does not return anything\n return",
"def mutate(self, probability, rate):\n for i in range(self.T):\n shape = np.shape(self.weights[i])\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)",
"def update(probabilities, one_gene, two_genes, have_trait, joint_prob):\n people = set(probabilities)\n no_gene = people - one_gene - two_genes\n for person in people:\n num_genes = 0\n is_trait = False\n if person in no_gene:\n num_genes = 0\n elif person in one_gene:\n num_genes = 1\n elif person in two_genes:\n num_genes = 2\n if person in have_trait:\n is_trait = True\n else:\n is_trait = False\n\n probabilities[person][\"gene\"][num_genes] += joint_prob\n probabilities[person][\"trait\"][is_trait] += joint_prob",
"def update_posterior_probs(vars_):\n vars_.weighted_sums += np.power(vars_.dprime_map[vars_.focus],2) * vars_.visual_field\n vars_.post_probs = np.exp(vars_.weighted_sums) * vars_.prior_prob\n vars_.post_probs /= np.sum(vars_.post_probs)",
"def update(self, s_p, actions, reward):\n qValueCurrent = self.getQvalue(self.s, self.action)\n feature = self.getFeatures(self.s, self.action)\n qValue_p = self.valueFromQvalues(s_p, actions)\n # reward = self.rewards(s_p)\n diff = (reward + self.gamma*qValue_p)-qValueCurrent\n for k in feature.keys():\n self.weights[k] = self.weights[k] + self.alpha*diff*feature[k]",
"def set_probabilities(self, p=[]):\n self.probabilities = p[:]",
"def _update_reward(self):\n pass",
"def observeUpdate(self, observation, gameState):\n \"*** YOUR CODE HERE ***\"\n pacman_position = gameState.getPacmanPosition()\n jail_position = self.getJailPosition()\n ghost_positions = self.allPositions\n # All the positions needed to calculate probability of evidence\n\n for position in ghost_positions: # Iterating through all positions\n prob_evidence = self.getObservationProb(observation, pacman_position, position, jail_position)\n # Probability of evidence calculated according to the function previously written\n self.beliefs[position] *= prob_evidence\n # P(X|e_1:t) = P(X|e_1:t-1) * P(E|X)\n\n self.beliefs.normalize()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes a big limit as an integer and get all the prime numbers in that range, including the limit itself. Returns a numpy array of the primes. Fragmentation is an int that multiplies the sqrt of the limit to increase the fragment size. Bigger fragmentation consumes more memory and less time. Fragmentation limit = sqrt of limit. For 4 GB RAM not enough memory for limit == 109. Fragmentation 1000 ok
|
def get_primes_in_big_limit(limit, fragmentation=1):
print("Getting primes...")
print("Fragmentation set to", fragmentation)
fragment_limit = int(np.sqrt(limit))
fragment_lowest = 0
fragment_highest = fragment_lowest + fragment_limit
primes_in_limit = np.array([], dtype=int)
while fragment_highest < limit:
if fragment_lowest == 0:
fragment_highest += 1
primes_in_first_fragment = get_primes_in(fragment_highest)
primes_in_limit = np.concatenate([primes_in_limit,
primes_in_first_fragment],
axis=None)
else:
primes_in_fragment = get_primes_in_fragment(fragment_lowest,
fragment_highest,
primes_in_first_fragment
)
primes_in_limit = np.concatenate([primes_in_limit,
primes_in_fragment],
axis=None)
fragment_lowest = fragment_highest
fragment_highest += (fragment_limit * fragmentation)
primes_in_last_fragment = get_primes_in_fragment(fragment_lowest,
limit+1,
primes_in_first_fragment
)
return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None)
|
[
"def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]",
"def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)",
"def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)",
"def get_big_primes(limit):\n n = 9999999\n count = 0\n primes = []\n while count < limit:\n if is_prime(n):\n primes.append(n)\n count += 1\n n += 1\n return primes",
"def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes",
"def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes",
"def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes",
"def prime_sieve(limit) -> List[int]:\n from itertools import compress\n sieve = bit_sieve(limit+1)\n return [2, *compress(range(3, limit+1, 2), sieve[3::2])]",
"def bit_sieve(limit: int) -> bytearray:\n sieve = bytearray([True]) * limit\n sieve[0] = False\n sieve[1] = False\n # old code ─ slow version\n # number_of_multiples = len(sieve[4::2])\n number_of_multiples = (limit - 4 + limit % 2) // 2\n sieve[4::2] = [False] * number_of_multiples\n\n for factor in range(3, int(math.sqrt(limit)) + 1, 2):\n if sieve[factor]:\n # old code ─ slow version\n # number_of_multiples = len(sieve[factor * factor::2*factor])\n number_of_multiples = ((limit - factor * factor - 1) // (2 * factor) + 1)\n sieve[factor * factor::factor * 2] = [False] * number_of_multiples\n return sieve",
"def create_prime_array(limit, ps=None):\n\n if ps is None:\n ps = generate_primes(limit)\n pa = bitarray(limit+1)\n pa.setall(False)\n for p in ps:\n pa[p] = True\n\n return pa",
"def all_prime(uplimit):\n primes = [2]\n num = 3\n while num <= uplimit:\n if num % 1000000 == 0:\n print num\n root = int(math.sqrt(num))\n is_prime = True\n for i in primes:\n if num % i == 0:\n is_prime = False\n break\n elif i > root:\n break\n if is_prime:\n primes.append(num)\n \n num += 1\n\n return primes",
"def get_primes_in_fragment(fragment_lowest, fragment_highest,\n primes_in_first_fragment):\n fragment_range = np.arange(fragment_lowest, fragment_highest)\n prime_mask = np.ones(len(fragment_range), dtype=bool)\n for p in primes_in_first_fragment:\n if fragment_lowest % p == 0:\n first_multiple = fragment_lowest // p\n else:\n first_multiple = fragment_lowest // p + 1\n first_multiple_index = first_multiple * p - fragment_lowest\n prime_mask[first_multiple_index::p] = False\n return fragment_range[prime_mask]",
"def get_big_primes2(limit):\n n = 9999999\n count = 0\n while count < limit:\n if is_prime(n):\n yield n\n count += 1\n n += 1",
"def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret",
"def get_primes(limit):\n print(f'\\nCalculating primes up to {limit}... ', end='')\n start_time = time()\n limitn = limit+1\n primes = dict()\n for i in range(2, limitn):\n primes[i] = True\n\n for i in primes:\n i_multiples = range(2*i, limitn, i)\n for f in i_multiples:\n primes[f] = False\n print(f'Finsished. Took {time() - start_time} seconds.\\n')\n return sorted([i for i in primes if primes[i] is True]) # Or change to set",
"def Sieve_Of_Eratosthenes(limit):\n if not isinstance(limit, int):\n raise NotIntegerError('Non integer values not accepted')\n\n if limit < 2:\n raise OutOfRangeError('Invalid value. Input must be greater than 1')\n\n primes = list(range(2, limit + 1))\n\n for prime in primes:\n if not prime:\n continue\n for num in range(prime*prime, limit + 1, prime):\n primes[num - 2] = None\n\n return [x for x in primes if x]",
"def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]",
"def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes",
"def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes a limit as an integer and get all the prime numbers in that range, NOT including the limit itself. Returns a numpy array of the primes.
|
def get_primes_in(limit):
range_limit = np.arange(limit)
prime_mask = np.ones(limit, dtype=bool)
prime_mask[0:2] = False
for i in range_limit[:int(np.sqrt(limit))+1]:
if prime_mask[i]:
prime_mask[2*i::i] = False
return range_limit[prime_mask]
|
[
"def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)",
"def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes",
"def get_big_primes(limit):\n n = 9999999\n count = 0\n primes = []\n while count < limit:\n if is_prime(n):\n primes.append(n)\n count += 1\n n += 1\n return primes",
"def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes",
"def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)",
"def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret",
"def get_primes_in_big_limit(limit, fragmentation=1):\n print(\"Getting primes...\")\n print(\"Fragmentation set to\", fragmentation)\n fragment_limit = int(np.sqrt(limit))\n fragment_lowest = 0\n fragment_highest = fragment_lowest + fragment_limit\n primes_in_limit = np.array([], dtype=int)\n while fragment_highest < limit:\n if fragment_lowest == 0:\n fragment_highest += 1\n primes_in_first_fragment = get_primes_in(fragment_highest)\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_first_fragment],\n axis=None)\n else:\n primes_in_fragment = get_primes_in_fragment(fragment_lowest,\n fragment_highest,\n primes_in_first_fragment\n )\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_fragment],\n axis=None)\n fragment_lowest = fragment_highest\n fragment_highest += (fragment_limit * fragmentation)\n primes_in_last_fragment = get_primes_in_fragment(fragment_lowest,\n limit+1,\n primes_in_first_fragment\n )\n return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None)",
"def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes",
"def prime_sieve(limit) -> List[int]:\n from itertools import compress\n sieve = bit_sieve(limit+1)\n return [2, *compress(range(3, limit+1, 2), sieve[3::2])]",
"def create_prime_array(limit, ps=None):\n\n if ps is None:\n ps = generate_primes(limit)\n pa = bitarray(limit+1)\n pa.setall(False)\n for p in ps:\n pa[p] = True\n\n return pa",
"def Sieve_Of_Eratosthenes(limit):\n if not isinstance(limit, int):\n raise NotIntegerError('Non integer values not accepted')\n\n if limit < 2:\n raise OutOfRangeError('Invalid value. Input must be greater than 1')\n\n primes = list(range(2, limit + 1))\n\n for prime in primes:\n if not prime:\n continue\n for num in range(prime*prime, limit + 1, prime):\n primes[num - 2] = None\n\n return [x for x in primes if x]",
"def get_primes(limit):\n print(f'\\nCalculating primes up to {limit}... ', end='')\n start_time = time()\n limitn = limit+1\n primes = dict()\n for i in range(2, limitn):\n primes[i] = True\n\n for i in primes:\n i_multiples = range(2*i, limitn, i)\n for f in i_multiples:\n primes[f] = False\n print(f'Finsished. Took {time() - start_time} seconds.\\n')\n return sorted([i for i in primes if primes[i] is True]) # Or change to set",
"def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]",
"def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]",
"def firstnprimes(n, limit=500):\n\tret = []\n\tfor p in sieveofera(limit):\n\t\tret.append(p)\n\t\tif len(ret) >= n:\n\t\t\treturn ret\n\traise Exception(\"Prime limit exceeded\")",
"def all_prime(uplimit):\n primes = [2]\n num = 3\n while num <= uplimit:\n if num % 1000000 == 0:\n print num\n root = int(math.sqrt(num))\n is_prime = True\n for i in primes:\n if num % i == 0:\n is_prime = False\n break\n elif i > root:\n break\n if is_prime:\n primes.append(num)\n \n num += 1\n\n return primes",
"def get_big_primes2(limit):\n n = 9999999\n count = 0\n while count < limit:\n if is_prime(n):\n yield n\n count += 1\n n += 1",
"def primes_range(lesser_bound, upper_bound):\n tab = [True] * (upper_bound + 1)\n sieve = []\n for p in range(2, upper_bound + 1):\n if tab[p]:\n sieve.append(p)\n for i in range(p, upper_bound + 1, p):\n tab[i] = False\n\n return list(filter(lambda x: x >= lesser_bound, sieve))",
"def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes fragment lowest and highest limits as an integers and get all the prime numbers in that range, NOT including the limit itself. Returns a numpy array of the primes. Needs the primes from the first fragment of the program as input.
|
def get_primes_in_fragment(fragment_lowest, fragment_highest,
primes_in_first_fragment):
fragment_range = np.arange(fragment_lowest, fragment_highest)
prime_mask = np.ones(len(fragment_range), dtype=bool)
for p in primes_in_first_fragment:
if fragment_lowest % p == 0:
first_multiple = fragment_lowest // p
else:
first_multiple = fragment_lowest // p + 1
first_multiple_index = first_multiple * p - fragment_lowest
prime_mask[first_multiple_index::p] = False
return fragment_range[prime_mask]
|
[
"def get_primes_in_big_limit(limit, fragmentation=1):\n print(\"Getting primes...\")\n print(\"Fragmentation set to\", fragmentation)\n fragment_limit = int(np.sqrt(limit))\n fragment_lowest = 0\n fragment_highest = fragment_lowest + fragment_limit\n primes_in_limit = np.array([], dtype=int)\n while fragment_highest < limit:\n if fragment_lowest == 0:\n fragment_highest += 1\n primes_in_first_fragment = get_primes_in(fragment_highest)\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_first_fragment],\n axis=None)\n else:\n primes_in_fragment = get_primes_in_fragment(fragment_lowest,\n fragment_highest,\n primes_in_first_fragment\n )\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_fragment],\n axis=None)\n fragment_lowest = fragment_highest\n fragment_highest += (fragment_limit * fragmentation)\n primes_in_last_fragment = get_primes_in_fragment(fragment_lowest,\n limit+1,\n primes_in_first_fragment\n )\n return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None)",
"def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]",
"def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)",
"def primes_range(lesser_bound, upper_bound):\n tab = [True] * (upper_bound + 1)\n sieve = []\n for p in range(2, upper_bound + 1):\n if tab[p]:\n sieve.append(p)\n for i in range(p, upper_bound + 1, p):\n tab[i] = False\n\n return list(filter(lambda x: x >= lesser_bound, sieve))",
"def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes",
"def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)",
"def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes",
"def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]",
"def primeSieve(upperBound):\n numbers = range(3, upperBound, 2)\n primes = [2]\n while numbers:\n prime = numbers.pop(0)\n primes.append(prime)\n numbers = [n for n in numbers if n % prime]\n return primes",
"def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes",
"def get_big_primes(limit):\n n = 9999999\n count = 0\n primes = []\n while count < limit:\n if is_prime(n):\n primes.append(n)\n count += 1\n n += 1\n return primes",
"def create_prime_array(limit, ps=None):\n\n if ps is None:\n ps = generate_primes(limit)\n pa = bitarray(limit+1)\n pa.setall(False)\n for p in ps:\n pa[p] = True\n\n return pa",
"def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]",
"def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes",
"def all_prime(uplimit):\n primes = [2]\n num = 3\n while num <= uplimit:\n if num % 1000000 == 0:\n print num\n root = int(math.sqrt(num))\n is_prime = True\n for i in primes:\n if num % i == 0:\n is_prime = False\n break\n elif i > root:\n break\n if is_prime:\n primes.append(num)\n \n num += 1\n\n return primes",
"def find_primes() :\r\n start, end = np.random.randint(20, 70), np.random.randint(100, 150)\r\n \r\n lst = [num for num in range(start, end+1) if is_prime(num)]\r\n return (random.choice(lst), random.choice(lst))",
"def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes",
"def primeNumbers(lower, upper):\n for num in range(lower, upper + 1):\n if num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n break\n else:\n prime.append(num)",
"def prime_sieve(limit) -> List[int]:\n from itertools import compress\n sieve = bit_sieve(limit+1)\n return [2, *compress(range(3, limit+1, 2), sieve[3::2])]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes a tuple where the first element is the dividend and the second element is the divisor. Both element sould be int. Performs a long division
|
def long_division(dividend_divisor_tuple, decimal_limit=5):
natural, decimal = [], []
dividend, divisor = dividend_divisor_tuple[0], dividend_divisor_tuple[1]
assert isinstance(dividend, int), "Dividend not int"
assert isinstance(divisor, int), "Divisor not int"
floor_div = dividend // divisor
rest = dividend % divisor
# Natural part of the division
while floor_div > 0:
natural.append(str(floor_div))
dividend = rest
floor_div = dividend // divisor
rest = dividend % divisor
if rest == 0: # Divisor is factor of dividend
print("Divisor is factor of dividend")
return ("".join(natural), None, None)
# Decimal part of the division
dividend_list = []
recurring_index = None
while len(decimal) < decimal_limit:
dividend_list.append(dividend)
dividend *= 10
floor_div = dividend // divisor
decimal.append(str(floor_div))
rest = dividend % divisor
if rest == 0: # Terminating decimal reached
return ("".join(natural), "".join(decimal), None)
elif rest in dividend_list: # Recurring cycle found
recurring_index = dividend_list.index(rest)
print("Recurring cycle found")
break
else:
dividend = rest
if recurring_index is not None:
recurring = decimal[recurring_index:]
decimal = decimal[:recurring_index]
return ("".join(natural), "".join(decimal), "".join(recurring))
else:
print("Decimal limit reached")
return ("".join(natural), "".join(decimal), None)
|
[
"def div_mod(dividend: int, divisor: int) -> Tuple[int, int]:\n try:\n quotient = int(dividend) // int(divisor)\n remainder = int(dividend) % int(divisor)\n except (TypeError, ValueError, ZeroDivisionError):\n raise Exception(\n \"The dividend and divisor must be interger, \"\n \"and the divisor cannot be zero.\"\n )\n\n return quotient, remainder",
"def divf(a, b):\n return int(a // b)",
"def div_numbers(a: int, b: int) -> int:\n return a / b",
"def intDiv(num1,num2):\n return math.floor(num1/num2)",
"def a_divisor_b(a: int, b:int) -> tuple:\n a_div_b = is_divisor(a, b)\n b_div_a = is_divisor(b, a)\n return(a_div_b, b_div_a)",
"def divide(self, dividend: int, divisor: int) -> int:\n def binary_search(target):\n l, r = 0, len(multi) - 1\n if multi[l] > target:\n return -1\n while l < r:\n mid = l + (r - l + 1) // 2\n if multi[mid] > target:\n r = mid - 1\n else:\n l = mid\n return l\n\n if dividend == 0:\n return 0\n flag = 1\n if (divisor > 0 and dividend < 0) or (divisor < 0 and dividend > 0):\n flag = -1\n divisor = abs(divisor)\n x = divisor\n dividend = abs(dividend)\n multi = []\n while x <= dividend:\n multi.append(x)\n x <<= 1\n ans = 0\n if len(multi) == 0:\n return 0\n while True:\n idx = binary_search(dividend)\n if idx == -1:\n break\n ans += 1 << idx\n dividend -= multi[idx]\n if flag == 1:\n if ans > 2 ** 31 - 1:\n return 2 ** 31 - 1\n else:\n return ans\n else:\n if -ans < -2 ** 31:\n return -2 ** 31\n else:\n return -ans",
"def ceildiv(a: int, b: int) -> int:\n return a // b + (a % b > 0)",
"def finddiv(x):\r\n \r\n div = (1, x)\r\n for i in range(2, x//2+1):\r\n if x%i==0:\r\n div+=(i,)\r\n return div",
"def div_mod(a,b):\r\n c,d=a,b\r\n mod1=0\r\n div1=0\r\n while (c>d):\r\n div1=div1+1;\r\n c=c-d\r\n return (div1,c)",
"def finddiv(x, y):\r\n \r\n div = ()\r\n for i in range(1, min(x, y)+1):\r\n if x%i==0 and y%i==0:\r\n div+=(i,)\r\n return div",
"def divide(x):\n return x // 2",
"def divr(a, b):\n if b < 0: (a, b) = (-a, -b)\n if a < 0:\n a = -a\n return -int((a + a + b) // (b + b))\n else:\n return int((a + a + b) // (b + b))",
"def div(a, b):\n if b == 0: return None\n (d, r) = divmod(a, b)\n if r != 0: return None\n return d",
"def floor_division(x, y):\n return x // y",
"def find_division(dividend, divisor):\n if divisor == 0:\n print (\"Human you can't get to infinity\")\n return None\n if dividend < divisor:\n return 0\n quotient = 1\n divisor_incremented = divisor\n while divisor_incremented < dividend:\n quotient += 1\n divisor_incremented += divisor\n # print (divisor_incremented)\n if divisor_incremented != dividend:\n # not a multiple quotient will be one higher \n quotient -= 1\n\n # print (quotient)\n return quotient",
"def multiple(a, b):\n from fractions import gcd\n def lcm(x,y):\n \treturn (x*y)//gcd(x,y)\n #return lcm(a,b)\n \n def gcd(x,y):\n if y > x:\n x, y = y, x\n while y != 0:\n x, y = y, x % y\n return x\n return (a*b) // gcd(a,b)",
"def divide(*args):\n body = ['<h1>Divison Calculator</h1>']\n try:\n quotient = reduce(lambda x,y: x / y, map(int,args))\n body.append(f'Total equals: {quotient}')\n except ZeroDivisionError:\n raise ZeroDivisionError\n return '\\n'.join(body)",
"def safe_division(dividend, divisor):\n if divisor:\n return dividend / divisor\n else:\n return 0",
"def ceil_intdiv(a, b):\r\n # If a and b are int with not many significant bits, we could\r\n # cast them to float to avoid doing the modulo. We do not know if this\r\n # is faster or not. But this is not safe for int64 as the cast will\r\n # lose precision.\r\n # e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))\r\n\r\n # We cast for the case when a and b are uint*. Otherwise neq will\r\n # force their upcast to int.\r\n div = int_div(a, b)\r\n ret = cast(neq(a % b, 0), div.dtype) + div\r\n assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])\r\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get length of number in digits.
|
def get_number_length(number):
return len(str(number))
|
[
"def numdigits(n):\n return len(str(n))",
"def number_of_digits(a: int) -> int:\n # suggested solution\n return len(str(a))",
"def get_length_of_number(self, number, precision):\n number_format = \"{:.\" + str(precision) + \"f}\"\n return len(number_format.format(number))",
"def count_digits(n):\n return len(str(n))",
"def _number_of_digits(number: int) -> int:\n return int(log10(number)) + 1",
"def num_digits_faster(n: int) -> int:\n return len(str(abs(n)))",
"def get_int_width(integer):\n return len(str(integer))",
"def num_digits(n):\n out = 0\n while n:\n out += 1\n n //= 10\n return out - 1",
"def count_digits(n):\n digits = 1\n while n >= 10 ** digits:\n digits += 1\n return digits",
"def digits(x):\n from amath.testing.types import intQ, isFraction\n\n if isFraction(x): # if x is a Fraction\n return x.digits() # run the specified digits function\n if intQ(x):\n x = int(x)\n if int(x) == x:\n return len(str(abs(x))) # for int\n else:\n return len(str(abs(x))) - 1 # for float",
"def num_digits(n: int) -> int:\n digits = 0\n n = abs(n)\n while True:\n n = n // 10\n digits += 1\n if n == 0:\n break\n return digits",
"def number_of_digits(num, base=10):\n return int(log(num)/log(base)) + 1",
"def get_length(x):\n\n try:\n return int(x)\n except Exception:\n return len(x)",
"def compute_width_digits(self):\n number_lines = self.editor.blockCount()\n return max(1, math.ceil(math.log10(\n number_lines + 1)))",
"def get_num_digits(num, base=10):\n cnt = 0\n while num > 0:\n cnt += 1\n num //= base\n return cnt",
"def _get_length(self):\n return self._length",
"def num_digits(n, b=10):\n return math.floor(math.log(n, b) + 1)",
"def getLength(self) -> float:\n return self.length",
"def num_digits_fast(n: int) -> int:\n return 1 if n == 0 else math.floor(math.log(abs(n), 10) + 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
opens the chosen exceldocument and returns it as sheet
|
def get_excel(exceldocument):
sheet = xlrd.open_workbook(exceldocument).sheet_by_index(0)
return sheet
|
[
"def openExcelSheet(outputFileName):\n workbook = Workbook()\n worksheet = workbook.add_sheet(\"Sheet 1\")\n return workbook, worksheet",
"def open_excelr(self):\n try:\n data = xlrd.open_workbook(self.file)\n return data\n except Exception as e:\n print(str(e))\n return None",
"def open_xls():\n\n from mmap import mmap, ACCESS_READ\n from xlrd import open_workbook\n print open_workbook('simple.xls')\n with open('simple.xlsx', 'rb') as f:\n print open_workbook(\n file_contents=mmap(f.fileno(), 0, access=ACCESS_READ)\n )\n aString = open('simple.xls', 'rb').read()\n print aString\n print open_workbook(file_contents=aString)",
"def get_workbook():\n return openpyxl.Workbook()",
"def _get_sheet(file_name, sheet_name_or_index):\n book = xlrd.open_workbook(file_name)\n sheet = book.sheet_by_index(sheet_name_or_index) if is_number(sheet_name_or_index) else book.sheet_by_name(\n sheet_name_or_index)\n return sheet",
"def open_spreadsheet(self, path, as_template=False):\n extra = ()\n if as_template:\n pv = uno.createUnoStruct('com.sun.star.beans.PropertyValue')\n pv.Name = 'AsTemplate'\n pv.Value = True\n extra += (pv,)\n # UNO requires absolute paths\n url = uno.systemPathToFileUrl(os.path.abspath(path))\n document = self._open_url(url, extra)\n return SpreadsheetDocument(document)",
"def secure_open_workbook(*args, **kwargs) -> xlrd.Book:\n try:\n return xlrd.open_workbook(*args, **kwargs)\n except EntitiesForbidden:\n raise ValueError('Please use a xlsx file without XEE')",
"def get_sheet():\n\n sheets = get_selected_by_cat(DB.BuiltInCategory.OST_Sheets, as_list=True)\n if sheets:\n if len(sheets) > 1:\n raise ScriptError(\"Please select only one sheet\") # FIXME\n return sheets[0]\n else:\n sheet = doc.ActiveView\n if sheet.ViewType != DB.ViewType.DrawingSheet:\n raise ScriptError(\"ActiveView is not sheet\") # FIXME\n return sheet",
"def spreadsheet(self) -> Spreadsheet:\n return self.client.open_by_key(self.spreadsheet_id)",
"def read_opyxl(cls, file_name):\n if file_name in cls._Excels_opyxl:\n return cls._Excels_opyxl[file_name]\n else:\n excel = load_workbook(file_name, read_only=True)\n cls._Excels_opyxl[file_name] = excel\n return excel",
"def open_spreadsheet(self, path, as_template=False):\n desktop = self.cls(self.hostname, self.port)\n return desktop.open_spreadsheet(path, as_template=as_template)",
"def download_excel_workbook(self, file_id):\n buff = io.BytesIO()\n self.download(file_id, buff, mimetypes.XLSX)\n buff.seek(0)\n return openpyxl.load_workbook(buff, read_only=True)",
"def open_xls(self, filepath):\n\t\tself.xls_file_path = filepath\n\t\tself.xls_file = xlrd.open_workbook(filepath)\n\t\treturn self",
"def open_worksheet(self, stream_name: str) -> Worksheet:\n try:\n stream = self.spreadsheet.worksheet_by_title(stream_name)\n except WorksheetNotFound:\n stream = self.spreadsheet.add_worksheet(stream_name)\n return stream",
"def open_file(path):\n book = xlrd.open_workbook(path)\n\n # print number of sheets\n print\n book.nsheets\n\n # print sheet names\n print\n book.sheet_names()\n\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n\n # read a row\n print\n first_sheet.row_values(0)\n\n # read a cell\n cell = first_sheet.cell(0, 0)\n print\n cell\n print\n cell.value\n\n # read a row slice\n print\n first_sheet.row_slice(rowx=0,\n start_colx=0,\n end_colx=2)",
"def open_excel_workbook():\n wb = openpyxl.Workbook()\n del wb[\"Sheet\"]\n # wb.remove_s(wb.get_sheet_by_name(\"Sheet\"))\n return wb",
"def create_spreadsheet(self):\n url = 'private:factory/scalc'\n document = self._open_url(url)\n return SpreadsheetDocument(document)",
"def open_book():",
"def _read_xls(self, options, datas):\n book = xlrd.open_workbook(file_contents=datas)\n return self._read_xls_book(book)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
creates an xml structure with root and motherelements
|
def createxmlmall():
root = ET.Element("state")
model = ET.SubElement(root, "model")
model.text = r""
dataid = ET.SubElement(root, "dataids")
application = ET.SubElement(root, "application")
application.text = "SIBS Configurator"
safecookie = ET.SubElement(root, "safecookie")
steps = ET.SubElement(root, "steps")
prev = ET.SubElement(steps, "prev")
lastproxy = ET.SubElement(root, "last-proxy").text = "tcserver0"
tree = ET.ElementTree(root) # saves tree in variable "tree"
return tree, safecookie, steps, prev
|
[
"def createTree(self,root):\n if AMOEBA_SET_FUNDAMENTALS_DEBUG:\n print \"Experiment fundamentals, Write to XML.\"\n #Create the element tree.\n root.attrib['name']=str(self.name)\n root.attrib['reading']=str(self.reading)\n root.attrib['sync']=str(self.sync)\n return root",
"def create_xml(self):\n common_util.file_generate(self._xml_abspath, common_util.to_pretty_xml(\n self._xml_obj.getroot()))",
"def new_xml(self, root_name):\n\n self.tree = ET.ElementTree(ET.fromstring('<?xml version=\"1.0\" encoding=\"UTF-8\"?><%s></%s>'%(\n root_name, root_name)))\n return self.tree.getroot()",
"def buildXMLTree(self): \n self.logProgress(stack()[0][3])\n\n def indent(elem, level=0):\n# self.logProgress(stack()[0][3])\n\n i = \"\\n\" + level * \" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level + 1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n # create the root element\n root = Element('lccSchema')\n tree = ElementTree(root)\n\n # add attributes to root for XSD validation of XML document \n rootAttribDict = root.attrib\n for aAttribute in constants.XmlValidation:\n rootAttribDict.update(aAttribute)\n \n # create metadata nodes\n meta = Element(constants.XmlElementMetadata)\n root.append(meta)\n metaName = Element(constants.XmlElementMetaname)\n metaName.text = str(self.MetadataNameLineEdit.text())\n meta.append(metaName)\n metaDesc = Element(constants.XmlElementMetadescription)\n metaDesc.text = str(self.MetadataDescriptionTextEdit.toPlainText())\n meta.append(metaDesc)\n\n # add the coefficients text and coefficients\n coeffText = etree.Comment(\"\"\" \n * The coefficients node contains coefficients to be assigned to values.\n \n * REQUIRED ATTRIBUTES\n * Id - text, unique identifier\n * Name - text, word or phrase describing coefficient\n * fieldName - text, name of field to be created for output\n * method - text, \"P\" or \"A\", designates \"P\"ercentage or per unit \"A\"rea calculation routine\n \"\"\")\n root.append(coeffText)\n coeffs = Element(constants.XmlElementCoefficients)\n \n for coef in self.tempLccObj.coefficients:\n tempid = self.tempLccObj.coefficients[str(coef)].coefId\n tempname = self.tempLccObj.coefficients[str(coef)].name\n tempfieldname = self.tempLccObj.coefficients[str(coef)].fieldName\n tempCalcMethod = self.tempLccObj.coefficients[str(coef)].calcMethod\n coeff = Element(constants.XmlElementCoefficient, Id=tempid, Name=tempname, fieldName=tempfieldname, method=tempCalcMethod)\n coeffs.append(coeff)\n root.append(coeffs)\n\n # add the value text and values\n# valText = etree.Comment(\"\"\"\n# * The \"values\" node defines the full set of values that can exist in a landcover raster\n# * The \"excluded\" attribute is used to exclude values from the total, excluded=false is the default\n# * Actual excluded values are always treated as excluded=true, cannot be used in classes, and should not be listed here. \n# \"\"\")\n valText = etree.Comment(\"\"\" \n * The values node defines the full set of values that can exist in a land cover raster.\n \n * REQUIRED ATTRIBUTES\n * Id - integer, raster code\n *\n * OPTIONAL ATTRIBUTES\n * Name - text, word or phrase describing value\n * excluded - boolean, \"true\" or \"false\" or \"1\" or \"0\"\n * - used to exclude values from effective area calculations\n * - excluded=false is the default \n \n * A value element can optionally contain one or more coefficient elements\n\n * REQUIRED COEFFICIENT ATTRIBUTES\n * Id - text, must match an Id attribute from a coefficients node element\n * value - decimal, weighting/calculation factor\n \"\"\")\n root.append(valText)\n values = Element(constants.XmlElementValues)\n # get the values\n for key in sorted(self.tempLccObj.values.iteritems(), key=operator.itemgetter(0)):\n valDict = {}\n valDict[constants.XmlAttributeId] = str(key[1].valueId)\n valDict[constants.XmlAttributeName] = str(key[1].name)\n if (key[1].excluded):\n valDict[constants.XmlAttributeNodata] = 'true'\n val = Element(constants.XmlAttributeValue, attrib=valDict)\n values.append(val)\n # get the coefficients for each value\n for coef in key[1]._coefficients:\n coefDict = {}\n coefDict[constants.XmlAttributeId] = key[1]._coefficients[str(coef)].coefId\n if key[1]._coefficients[str(coef)].value == 0.0:\n coefDict[constants.XmlAttributeValue] = \"0.0\"\n else:\n coefDict[constants.XmlAttributeValue] = str(key[1]._coefficients[str(coef)].value)\n if coefDict[\"value\"] == \"\":\n pass\n coefe = Element(constants.XmlElementCoefficient, attrib=coefDict)\n val.append(coefe)\n root.append(values)\n if self.tempLccObj.classes.topLevelClasses == None:\n indent(tree.getroot())\n return tree\n # add the class text and the class nodes\n# classText = etree.Comment(\"\"\"\n# * The \"classes\" node contains values grouped into classes.\n# * A class can contain either values or classes but not both types\n# * Values contain only an id which refers to a value in values node.\n# * The id attribute is used for the root of the field name in the output(for example %forest would be P + for = Pfor)\n# * Two classes with the same id are not allowed.\n# * Special class attributes:\n# - onSlopeVisible: Make available in \"On Slope\" metric category, default is false\n# - overwriteField: if present, it overides default \"Land Cover Proportions\" field name with the supplied value\n# \"\"\")\n classText = etree.Comment(\"\"\"\n * The classes node contains values from a land cover raster grouped into one or more classes.\n \n * REQUIRED ATTRIBUTES\n * Id - text, unique identifier, also used for automated generation of output field name\n \n * OPTIONAL ATTRIBUTES\n * Name - text, word or phrase describing class\n * filter - text, a string of one or more tool name abbreviations separated by a \";\"\n * - possible abbreviations are: lcp, rlcp, lcosp, splcp, and caem\n * - used to exclude the class from the selectable classes in the tool's GUI\n * xxxxField - text, overrides ATtILA-generated field name for output\n * - where xxxx equals a tool name abbreviation\n * - possible abbreviations are: lcp, rlcp, lcosp, splcp, and caem\n * - a separate xxxxField attribute can exist for each tool\n\n * A class can contain either values or classes but not both types.\n * Value elements contain only an Id attribute which refers to a value in a raster.\n * Values tagged as excluded=\"true\" in the values node should not be included in any class.\n \"\"\")\n root.append(classText)\n classes = Element('classes')\n root.append(classes)\n # function to find child classes of the parent classes\n def printDescendentClasses(landCoverClass, classE):\n# self.logProgress(stack()[0][3])\n\n if landCoverClass.childClasses:\n for childClass in landCoverClass.childClasses:\n assert isinstance(childClass, lcc.EditorLandCoverClass)\n # childClass\n clasDict = {}\n clasDict[constants.XmlAttributeId] = str(childClass.classId)\n clasDict[constants.XmlAttributeName] = str(childClass.name)\n for field in self.tempLccObj.overwriteFieldsNames:\n if childClass.classoverwriteFields[field]:\n clasDict[field] = childClass.classoverwriteFields[field]\n clasDict[constants.XmlAttributeFilter] = \"\" \n childClas = Element(constants.XmlElementClass, attrib=clasDict)\n classE.append(childClas)\n for childValueId in sorted(childClass.childValueIds):\n if self.tempLccObj.values[childValueId].excluded:\n continue\n childVal = Element(constants.XmlElementValue, Id=str(childValueId))\n childClas.append(childVal)\n printDescendentClasses(childClass, childClas)\n else:\n return\n for clas in self.tempLccObj.classes.topLevelClasses:\n clasDict = {}\n clasDict[constants.XmlAttributeId] = str(clas.classId)\n clasDict[constants.XmlAttributeName] = str(clas.name)\n for field in self.tempLccObj.overwriteFieldsNames:\n if clas.classoverwriteFields[field]:\n clasDict[field] = clas.classoverwriteFields[field]\n clasDict[constants.XmlAttributeFilter] = \"\"\n classE = Element(constants.XmlElementClass, attrib=clasDict)\n classes.append(classE)\n for childValueId in clas.childValueIds:\n if self.tempLccObj.values[childValueId].excluded:\n continue\n childVal = Element(constants.XmlElementValue, Id=str(childValueId))\n classE.append(childVal)\n printDescendentClasses(clas, classE) \n\n # indent the output correctly and the write to file\n indent(tree.getroot())\n \n\n self.logProgress(stack()[0][3] + \" END\")\n\n return tree",
"def buildTree():\r\n root = Element('world-crises')\r\n\r\n crises = Element('crises')\r\n crises_objects = db.GqlQuery(\"SELECT * FROM Crisis\")\r\n\r\n # Build XML for crises\r\n for crisis_object in crises_objects:\r\n crisis = addCrisis(crisis_object)\r\n crises.append(crisis)\r\n root.append(crises)\r\n\r\n # Build XML for Organizations\r\n organizations = Element('organizations')\r\n organizations_objects = db.GqlQuery(\"SELECT * FROM Organization\")\r\n for organization_object in organizations_objects:\r\n organization = addOrganization(organization_object)\r\n organizations.append(organization)\r\n root.append(organizations)\r\n\r\n # Build XML for people\r\n people = Element('people')\r\n people_objects = db.GqlQuery(\"SELECT * FROM Person\")\r\n for person_object in people_objects:\r\n person = addPerson(person_object)\r\n people.append(person)\r\n root.append(people)\r\n\r\n return root",
"def _initRootElement(self):\n\n self.registerNamespace('cml', 'http://www.xml-cml.org/schema')\n self.registerNamespace('convention',\n 'http://www.xml-cml.org/convention/')\n self.registerNamespace('xsd',\n 'http://www.w3.org/2001/XMLSchema')\n\n root = ET.Element('{http://www.xml-cml.org/schema}cml')\n\n return root",
"def create_root(self):\n pass",
"def _makeXmlTree(self):\n if self._xrcname is None:\n self._xrcname = self.__class__.__name__\n if self._xmltree is None:\n xmlfile = file(self._xrcfile)\n self._xmltree = XMLDocTree(xmlfile)",
"def create_roots(self):\n self.root = SchemaNode.element(\"nmt:netmod-tree\",\n interleave=False, occur=2)\n self.confdata = SchemaNode.element(\"nmt:top\", self.root,\n interleave=True, occur=2)\n self.rpcs = SchemaNode.element(\"nmt:rpc-methods\", self.root,\n interleave=False, occur=2)\n self.notifications = SchemaNode.element(\"nmt:notifications\", self.root,\n interleave=True, occur=2)",
"def xml(self):\n top = ElementTree.Element('component', id=self.id, name=self.name)\n for key in sorted(self.parameters):\n top.append(ElementTree.Comment( \\\n \", \".join(['param', key, self.parameters[key][1]])))\n top.append(ElementTree.Element( \\\n 'param', name=key, value=self.parameters[key][0]))\n for key in sorted(self.stats):\n top.append(ElementTree.Comment( \\\n \", \".join(['stat', key, self.stats[key][1]])))\n top.append(ElementTree.Element( \\\n 'stat', name=key, value=self.stats[key][0]))\n top.append(self.predictor.xml())\n top.append(self.itlb.xml())\n top.append(self.icache.xml())\n top.append(self.dtlb.xml())\n top.append(self.dcache.xml())\n top.append(self.btb.xml())\n return top",
"def _createXmlDoc(self):\n\n # Free the current XML document.\n self._freeXmlDoc()\n\n # Create the XML document.\n self.xmlDoc = libxml2.newDoc('1.0')\n\n # Create the root node and add it to the document.\n self._rootNode = libxml2.newNode(self.rootTagName)\n self.xmlDoc.setRootElement(self._rootNode)",
"def build_tree(self) -> bytes:\n from lxml.etree import (\n Element,\n SubElement,\n tostring,\n )\n\n self.root = Element(f\"{self.prefix_uri}{self.root_name}\", nsmap=self.namespaces)\n\n for d in self.frame_dicts.values():\n elem_row = SubElement(self.root, f\"{self.prefix_uri}{self.row_name}\")\n\n if not self.attr_cols and not self.elem_cols:\n self.elem_cols = list(d.keys())\n self.build_elems(d, elem_row)\n\n else:\n elem_row = self.build_attribs(d, elem_row)\n self.build_elems(d, elem_row)\n\n self.out_xml = tostring(\n self.root,\n pretty_print=self.pretty_print,\n method=\"xml\",\n encoding=self.encoding,\n xml_declaration=self.xml_declaration,\n )\n\n if self.stylesheet is not None:\n self.out_xml = self.transform_doc()\n\n return self.out_xml",
"def CreateKmlDoc():\n\n kml_doc = xml.dom.minidom.Document()\n kml_element = kml_doc.createElementNS('http://www.opengis.net/kml/2.2', 'kml')\n kml_element.setAttribute('xmlns', 'http://www.opengis.net/kml/2.2')\n kml_element = kml_doc.appendChild(kml_element)\n document = kml_doc.createElement('Document')\n kml_element.appendChild(document)\n return kml_doc",
"def _create_nrml():\n return etree.Element(NRML04_ROOT_TAG, nsmap=NSMAP)",
"def build(self):\n root = ET.Element(\"container\", xmlns=self.namespace,\n version=self.version)\n rfs = ET.SubElement(root, \"rootfiles\")\n attrs = {\"full-path\": self.full_path, \"media-type\": self.media_type, }\n dummy = ET.SubElement(rfs, # pragma pylint: disable=W0612\n \"rootfile\", **attrs)\n # pragma pylint: enable=W0612\n return root",
"def create_tree(self,path):\n titre0,contenu0,titre1,contenu1,titre3,contenu3,titre4,contenu4,titre5,contenu5,titre6,contenu6,titre8,contenu8,titre9,contenu9,titre10,contenu10,titre12,contenu12 = self.extraction()\n \n #listes qui servent pour la méthode générale\n titre = [titre0,titre1,titre3,titre4,titre5,titre6,titre8,titre9,titre10,titre12]\n contenu = [contenu0,contenu1,contenu3,contenu4,contenu5,contenu6,contenu9,contenu10,contenu12]\n \n \n #création de la racine de l'arbre\n station = etree.Element(self.nameStation)\n doc = etree.ElementTree(station)\n \n #liste des entiers de 0 à 9 pour tester si des balises commencent par un chiffre\n liste = ['0','1','2','3','4','5','6','7','8','9']\n #quand un titre commence par un chiffre, on ajoute un a devant\n #une boucle for par bloc, pas optimal mais ne fonctionne pas autrement \n title = etree.SubElement(station,'a'+titre0[0])\n for i in range(1,len(titre0)):\n t = titre0[i]\n c = contenu0[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre1[0]) \n for i in range(1,len(titre1)):\n t = titre1[i]\n c = contenu1[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre3[0])\n for i in range(1,len(titre3)):\n t = titre3[i]\n c = contenu3[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre4[0])\n for i in range(1,len(titre4)):\n t = titre4[i]\n c = contenu4[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre5[0])\n for i in range(1,len(titre5)):\n t = titre5[i]\n c = contenu5[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre6[0])\n for i in range(1,len(titre6)):\n t = titre6[i]\n c = contenu6[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre8[0])\n for i in range(1,len(titre8)):\n t = titre8[i]\n c = contenu8[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre9[0])\n for i in range(1,len(titre9)):\n t = titre9[i]\n c = contenu9[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre10[0])\n for i in range(1,len(titre10)):\n t = titre10[i]\n c = contenu10[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n title = etree.SubElement(station,'a'+titre12[0])\n for i in range(1,len(titre12)):\n t = titre12[i]\n c = contenu12[i]\n for k in range(len(liste)):\n if liste[k] in t :\n t = 'a'+ t\n if t == '':\n t = 'empty'\n c = 'empty'\n t = etree.SubElement(title,t)\n t.text = c \n \n #méthode générale, qui ne fonctionne pas\n# for i in range(len(titre)):\n# title = etree.SubElement(station,'a'+titre[i][0])\n# \n# for j in range(1,len(titre[0])):\n# \n# t = titre[i][j]\n# c = contenu[i][j]\n# print(c)\n# for k in range(len(liste)):\n# if liste[k] in t :\n# t = 'a'+ t\n# if t == '':\n# t = 'empty'\n# contenu = 'empty'\n# \n# t = etree.SubElement(title,t)\n# t.text = c\n \n \n #print(etree.tostring(station,pretty_print=True))\n \n #écriture du fichier en sortie\n output = path + self.nameStation + '.xml'\n \n doc.write(output, xml_declaration=True, encoding='utf-8')",
"def buildxml(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml()\")\n self.buildplatformxml()\n self.buildnemxml()\n self.buildtransportxml()\n self.buildeventservicexml()",
"def generate_xml(data):\n if VERBOSE: print \"Generating XML...\"\n \n # Top element\n results = ET.Element('results')\n results.set(\"gold\", data['goldfile'])\n results.set(\"system\", data['sysfile'])\n\n # Types\n types = ET.SubElement(results, 'types')\n types.text = data['types']\n\n # Regime\n regime = ET.SubElement(results, 'regime')\n regime.text = data['regime']\n\n # Counts\n counts = ET.SubElement(results, 'counts')\n\n for k,v in data['counts'].iteritems():\n element = ET.SubElement(counts, k)\n element.text = str(v)\n\n # Scores\n scores = ET.SubElement(results, 'scores')\n\n for t in data['scores']:\n score_type = ET.SubElement(scores, t)\n\n for m,v in data['scores'][t].iteritems():\n measure = ET.SubElement(score_type, m)\n measure.text = str(v)\n\n # Gold edits\n goldedits = ET.SubElement(results, 'goldedits')\n\n for edit in data['goldedits']:\n element = ET.SubElement(goldedits, 'edit', edit)\n\n # Spurious edits\n spuriousedits = ET.SubElement(results, 'spuriousedits')\n\n for edit in data['spuriousedits']:\n element = ET.SubElement(spuriousedits, 'edit', edit)\n\n return results",
"def generate_xml(self) -> XmlNode:\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a folder and saves xml tree in a specific path
|
def save_xml(tree, file_name, folder_name):
import os # ändrar plats för filer
os.chdir(folder_name)
tree.write(file_name) # Namnet på ny fil
|
[
"def create_folder(self,path):\r\n path = utils.resourceref.remove_begin_slash(path)\r\n path = utils.resourceref.join_refs([self.get_path(), self.get_current_path(), path])\r\n if not os.path.exists(path):\r\n os.makedirs(path)",
"def create_folder(path):\n command = ['mkdir', TEST_DIR]\n file_operation(path, command)",
"def create_directory(path):\n os.makedirs(path)",
"def create_folder(path, with_init=False):\n\tfrom frappe.utils import touch_file\n\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\n\t\tif with_init:\n\t\t\ttouch_file(os.path.join(path, \"__init__.py\"))",
"def create_folder(self):\n Path(self.root_name).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/\").mkdir(parents=True, exist_ok=True)\n Path(self.image_folder_path).mkdir(parents=True, exist_ok=True)\n Path(self.annot_path).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/ImageSets/\").mkdir(parents=True, exist_ok=True)\n Path(self.txt_path).mkdir(parents=True, exist_ok=True)",
"def create_folder(path):\n if not exists(path):\n os.makedirs(path)",
"def create_folder(path):\n try:\n os.listdir(path)\n except:\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n return path",
"def create_folder(path):\n if not os.path.exists(path):\n os.mkdir(path)\n\n return path",
"def makefolder(self, parentpath, name='folder'):\r\n if not self.pathexists(parentpath):\r\n print(\"Error: parent path does not exist.\")\r\n raise\r\n parent = SNCRS.Node.LoadNode(parentpath)\r\n folder = SNCR.Folder(parent)\r\n if folder is not None:\r\n folder.Name = name\r\n try:\r\n folder.Save()\r\n except SystemError as err:\r\n errmsg = \" Unable to make folder: \" + SNCR.Storage.RepositoryPath.Combine(parentpath, name)\r\n print(errmsg)\r\n err.message = err.message + errmsg\r\n raise",
"def _create_working_files(file_path, content):\n # Create tree directory that the file in it:\n if '/' in file_path:\n parent_paths = file_path.split('/')[:-1]\n for i in range(len(parent_paths) - 1):\n make_directory('/'.join(parent_paths[:i + 1]))\n # Create new file:\n write_file(file_path, content)",
"def mkdir(token, name, parent_id=0):\r\n global a\r\n print(\"mkdir: name={name}, parent_id={parent_id}, token={token}\".format(**locals()))\r\n data = (\"\"\"{\"parent_folder_id\":%s,\"name\":\"%s\"}\"\"\"%(str(parent_id), name)).encode('utf-8')\r\n x = a.post(DOMAIN+\"/apps/files/new_folder\",\r\n data,\r\n headers={\"requesttoken\":token,\"x-requested-with\": \"XMLHttpRequest\", \"Content-Type\":\"text/plain;charset=UTF-8\"}\r\n )\r\n try:\r\n data = json.loads(x.text)\r\n except:\r\n data = {\"success\": False}\r\n if \"success\" not in data or data[\"success\"]!=True:\r\n assert \"name_conflicts\" in x.text, \"Unkown error\"+x.text\r\n raise FileExistsError(\"failed to create {name} under folder_{parent_id}. data={data}\".format(**locals()))\r\n return str(data[\"new_folder\"][\"id\"])",
"def create_folder(self, unformatted_path):\n os.makedirs(self.format_path(unformatted_path), exist_ok=True)",
"def createRigFolders( path ):\n if os.path.exists( path ) == False:\n os.makedirs( path )\n if os.path.exists( (path + \"/rigFile\") ) == False:\n os.makedirs( (path + \"/rigFile\") )",
"def createDir(self, dir_name):\n os.mkdir(os.path.join(self.user[\"Save\"], dir_name))",
"def createdatafolder(name):\n folder = os.path.join(pathtofolder(),name)\n os.makedirs(folder)\n pass",
"def create_folder_structure(self):\n\n if not self.reload:\n print(\"Creating folder structure\")\n os.makedirs(join(self.output_dir, 'general'))\n os.makedirs(join(self.output_dir, 'best'))",
"def new_dir(self, path):\n\n return self.run('mkdir \\\"{0}\\\" -Force'.format(path), powershell=True)",
"def create_folder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)",
"def createDirectoryTarget(self):\n\n os.mkdir('testes2')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate the approximation of a contour shape to another shape with less number of vertices depending upon the precision we specify.
|
def __CalculateApproximation(self, contour):
epsilon = 0.1 * cv2.arcLength(contour, True)
return cv2.approxPolyDP(contour, epsilon, True)
|
[
"def flatten(self, precision=RELATIVE):\n if precision == RELATIVE:\n precision = RELATIVE_PRECISION\n contours = [[]]\n x0, y0 = None, None\n closeto = None\n for pt in self:\n if (pt.cmd == LINETO or pt.cmd == CURVETO) and x0 == y0 is None:\n raise NoCurrentPointForPath\n elif pt.cmd == LINETO:\n contours[-1].append((x0, y0))\n contours[-1].append((pt.x, pt.y))\n elif pt.cmd == CURVETO:\n # Curves are interpolated from a number of straight line segments.\n # With relative precision, we use the (rough) curve length to determine the number of lines.\n x1, y1, x2, y2, x3, y3 = pt.ctrl1.x, pt.ctrl1.y, pt.ctrl2.x, pt.ctrl2.y, pt.x, pt.y\n if isinstance(precision, float):\n n = int(max(0, precision) * bezier.curvelength(x0, y0, x1, y1, x2, y2, x3, y3, 3))\n else:\n n = int(max(0, precision))\n if n > 0:\n xi, yi = x0, y0\n for i in range(n+1):\n xj, yj, vx1, vy1, vx2, vy2 = bezier.curvepoint(float(i)/n, x0, y0, x1, y1, x2, y2, x3, y3)\n contours[-1].append((xi, yi))\n contours[-1].append((xj, yj))\n xi, yi = xj, yj\n elif pt.cmd == MOVETO:\n contours.append([]) # Start a new contour.\n closeto = pt\n elif pt.cmd == CLOSE and closeto is not None:\n contours[-1].append((x0, y0))\n contours[-1].append((closeto.x, closeto.y))\n x0, y0 = pt.x, pt.y\n return contours",
"def contour_approximation(contour, approximation_coefficient=0.02):\n perimeter = cv2.arcLength(contour, True)\n approximation = cv2.approxPolyDP(contour, approximation_coefficient * perimeter, True)\n return approximation",
"def contourApprox(cnt, epsilon = 0.005):\n\tepsilon = epsilon*cv2.arcLength(cnt, True)\n\tapprox = cv2.approxPolyDP(cnt, epsilon, True)\n\treturn approx",
"def get_approx_polygon(contour: np.ndarray, points: int = 4) -> Union[np.ndarray, None]:\n num_points = contour.shape[0]\n\n # contour has less # points than target\n if num_points < points:\n return None\n\n processed_contour = contour\n arc_length = cv2.arcLength(processed_contour, closed=True)\n epsilon = 0.2 * arc_length\n while num_points > points:\n processed_contour = cv2.approxPolyDP(processed_contour, epsilon=epsilon, closed=True)\n num_points = processed_contour.shape[0]\n epsilon *= 1.1\n\n return processed_contour",
"def simplify_contour(contour, n, iteration_count: int = 20):\n\n mineps = 0\n maxeps = cv2.arcLength(contour, True)\n iterations = 0\n \n good = None\n\n while True:\n # We're done!\n if iterations >= iteration_count:\n # If found this will contain the best result\n # Otherwise it will be None\n return good\n\n epsilon = (mineps + maxeps) / 2\n approx = cv2.approxPolyDP(contour, epsilon, True)\n\n # Too many points - epsilon too low \n if len(approx) > n:\n mineps = epsilon\n # Too few points - epsilon too high\n elif len(approx) < n:\n maxeps = epsilon\n # Good number of points\n else:\n # Mark it down \n good = approx\n # Try to reduce the epsilon\n maxeps = epsilon\n \n iterations += 1",
"def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]",
"def fracture(self, max_points=_max_points, precision=1e-3):\n if max_points > 4:\n ii = 0\n while ii < len(self.polygons):\n if len(self.polygons[ii]) > max_points:\n pts0 = sorted(self.polygons[ii][:, 0])\n pts1 = sorted(self.polygons[ii][:, 1])\n ncuts = len(pts0) // max_points\n if pts0[-1] - pts0[0] > pts1[-1] - pts1[0]:\n # Vertical cuts\n cuts = [\n pts0[int(i * len(pts0) / (ncuts + 1.0) + 0.5)]\n for i in range(1, ncuts + 1)\n ]\n chopped = clipper._chop(self.polygons[ii], cuts, 0,\n 1 / precision)\n else:\n # Horizontal cuts\n cuts = [\n pts1[int(i * len(pts1) / (ncuts + 1.0) + 0.5)]\n for i in range(1, ncuts + 1)\n ]\n chopped = clipper._chop(self.polygons[ii], cuts, 1,\n 1 / precision)\n self.polygons.pop(ii)\n layer = self.layers.pop(ii)\n datatype = self.datatypes.pop(ii)\n self.polygons.extend(\n numpy.array(x)\n for x in itertools.chain.from_iterable(chopped))\n npols = sum(len(c) for c in chopped)\n self.layers.extend(layer for _ in range(npols))\n self.datatypes.extend(datatype for _ in range(npols))\n else:\n ii += 1\n return self",
"def contour(chain,p,**kwargs):\n return contourSingle(chain,p,**kwargs)",
"def _bounding_precision(self) :\n if not self.precision().is_infinite() :\n return self.precision()\n \n return self.parent().monoid().minimal_composition_filter( self.coefficients().keys(),\n [self.parent().monoid().zero_element()] )",
"def projection(poly1, dim, solver=None, abs_tol=ABS_TOL, verbose=0):\n if isinstance(poly1, Region):\n ret = Polytope()\n for i in range(len(poly1.list_poly)):\n p = projection(\n poly1.list_poly[i], dim,\n solver=solver, abs_tol=abs_tol)\n ret = ret + p\n return ret\n # flat ?\n if (poly1.dim < len(dim)) or is_empty(poly1):\n return poly1\n # `poly1` isn't flat\n poly_dim = poly1.dim\n dim = np.array(dim)\n org_dim = range(poly_dim)\n new_dim = dim.flatten() - 1\n del_dim = np.setdiff1d(org_dim, new_dim) # Index of dimensions to remove\n # logging\n logger.debug('polytope dim = ' + str(poly_dim))\n logger.debug('project on dims = ' + str(new_dim))\n logger.debug('original dims = ' + str(org_dim))\n logger.debug('dims to delete = ' + str(del_dim))\n mA, nA = poly1.A.shape\n # fewer rows than dimensions ?\n if mA < poly_dim:\n msg = 'fewer rows in A: ' + str(mA)\n msg += ', than polytope dimension: ' + str(poly_dim)\n logger.warning(msg)\n # enlarge A, b with zeros\n A = poly1.A.copy()\n poly1.A = np.zeros((poly_dim, poly_dim))\n poly1.A[0:mA, 0:nA] = A\n # stack\n poly1.b = np.hstack([poly1.b, np.zeros(poly_dim - mA)])\n logger.debug('m, n = ' + str((mA, nA)))\n # Compute cheby ball in lower dim to see if projection exists\n norm = np.sum(poly1.A * poly1.A, axis=1).flatten()\n norm[del_dim] = 0\n c = np.zeros(len(org_dim) + 1, dtype=float)\n c[len(org_dim)] = -1\n G = np.hstack([poly1.A, norm.reshape(norm.size, 1)])\n h = poly1.b\n sol = lpsolve(c, G, h)\n if sol['status'] != 0:\n # Projection not fulldim\n return Polytope()\n if sol['x'][-1] < abs_tol:\n return Polytope()\n # select projection solver\n if solver == \"esp\":\n return projection_esp(poly1, new_dim, del_dim)\n elif solver == \"exthull\":\n return projection_exthull(poly1, new_dim)\n elif solver == \"fm\":\n return projection_fm(poly1, new_dim, del_dim)\n elif solver == \"iterhull\":\n return projection_iterhull(poly1, new_dim)\n elif solver is not None:\n logger.warning('unrecognized projection solver \"' +\n str(solver) + '\".')\n # `solver` undefined or unknown\n # select method based on dimension criteria\n if len(del_dim) <= 2:\n logger.debug(\"projection: using Fourier-Motzkin.\")\n return projection_fm(poly1, new_dim, del_dim)\n elif len(org_dim) <= 4:\n logger.debug(\"projection: using exthull.\")\n return projection_exthull(poly1, new_dim)\n else:\n logger.debug(\"projection: using iterative hull.\")\n return projection_iterhull(poly1, new_dim)",
"def compute_coverage_for_contour_pair(\n contour1: np.ndarray,\n contour2: np.ndarray,\n max_size: int = DEFAULT_MAX_CONTOUR_MASK_SIZE,\n):\n im1, im2 = compute_contour_binary_masks(contour1, contour2, max_size=max_size)\n return (im1 & im2).sum() / im1.sum()",
"def create_filled_contour(self, lower_level, upper_level): # real signature unknown; restored from __doc__\n pass",
"def __CalculateExtend(self, contour):\r\n area = self.__CalculateArea(contour)\r\n boundingBox = self.__CalculateBoundingBox(contour)\r\n return area / (boundingBox[2] * boundingBox[3])",
"def contour(phi_s, eta0, T, h, npts, a1, a2, Hamil):\n\n\n\tif phi_s == 0.0:\n\t\tphi_u = -math.pi\n\telse:\n\t\tphi_l = turning_point(phi_s, H=Hamil, phi_guess = -0.8)\n\t\tphi_u = turning_point(phi_s, H=Hamil, phi_guess = phi_s)\n\t\t\n\n\t#phi_u = phi_u + 0.5*math.pi\n\tprint \"phi_l, phi_u, H \",phi_l, phi_u, Hamil\n\tprint \"math.pi - phi_s \",-(math.pi - phi_s)\n\n\t#phi_a: array of phase values at which to evaluate separatrix\n\t \n\tif phi_u > 0:\n\t\tphi_a = np.linspace(phi_l, phi_u, npts)\n\telse:\n\t\tphi_a = np.linspace(phi_u, phi_l, npts)\n\n\tprint \"Hamil \",Hamil\n\n\n\tratio = a2/a1\n\tsignc = -np.sign(2*np.cos(phi_s) - (math.pi - phi_s-phi_s)*np.sin(phi_s))\n\tprint \"a2/a2 \",a2/a1\n\n\tdp_cont = []\n\tphi_real = []\n\tfor phi in phi_a:\n\t\t#original form for opposite sign eta\n\t\t#dpsq = 0.5*(bh**2)*(np.cos(phi) + np.cos(phi_s) - (math.pi - phi - phi_s)*np.sin(phi_s))\n\n\t\tdpsq = -((Hamil/a1) - ratio*signc*(np.cos(phi) - np.cos(phi_s) + (phi-phi_s)*np.sin(phi_s))) \n\n\t\tif dpsq > 0:\n\t\t\tdp = dpsq**0.5\n\t\t\tdp_cont.append(dp)\n\t\t\tphi_real.append(phi)\n\n\tprint \"number of points on contour \",len(phi_real)\n\tdp_cont = np.array(dp_cont)\n\n\t#plt.plot(phi_a, dp_sep)\n\t#plt.ylim(ymin=0)\n\t#plt.show()\n\t#sys.exit()\n\n\tphi_real_a = np.array(phi_real)\n\tt_phi_ns = 1e9*T*(phi_real_a)/(2*math.pi) \n\n\n\treturn dp_cont, t_phi_ns",
"def get_contours(mask, arcsize=0.005):\n contours,hierarchy = cv2.findContours((mask*255).astype(np.uint8), 1, 2)\n\n\n areas=[]\n for c in contours:\n \tareas.append(cv2.contourArea(c))\n\n imax = np.argmax(np.asarray(areas))\n\n cnt = contours[imax]\n\n epsilon = arcsize*cv2.arcLength(cnt,True)\n\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n\n return approx",
"def prec_to_partial(precision):\n partial_correlation = -cov_to_corr(precision)\n np.fill_diagonal(partial_correlation, 1.0)\n return partial_correlation",
"def func_curvature(self):\n return u.Curvature.CONVEX",
"def test_simplify_polygons(self):\n pass",
"def polyclip(i, j, pol_x, pol_y, area=False):\n n = len(pol_x)\n nout = n + 4\n px_out, py_out = [0] * nout, [0] * nout\n clip_vals = [i, i + 1, j + 1, j]\n\n for ctype in range(4):\n cv = clip_vals[ctype]\n if ctype == 0:\n inside = [px > i for px in pol_x]\n elif ctype == 1:\n inside = [(px < i + 1) for px in pol_x]\n elif ctype == 2:\n inside = [(py < j + 1) for py in pol_y]\n else:\n inside = [py > j for py in pol_y]\n if all(inside):\n continue\n\n shiftp1 = inside.copy()\n shiftp1.insert(0, shiftp1.pop(-1))\n crosses = [i1 != i2 for (i1, i2) in zip(inside, shiftp1)]\n pind = 0\n for k in range(n):\n px, py = pol_x[k], pol_y[k]\n if crosses[k]: # out->in or in->out, add intersection\n ind = n - 1 if k == 0 else k - 1\n sx, sy = pol_x[ind], pol_y[ind]\n try:\n if ctype <= 1: # left or right\n px_out[pind] = cv\n py_out[pind] = sy + ((py - sy) / (px - sx)) * (cv - sx)\n else: # top or bottom\n px_out[pind] = sx + ((px - sx) / (py - sy)) * (cv - sy)\n py_out[pind] = cv\n except ZeroDivisionError: # pragma: no cover\n px_out[pind] = np.nan\n py_out[pind] = np.nan\n pind += 1\n\n if inside[k]: # out->in or in->in, add 2nd point\n px_out[pind] = px\n py_out[pind] = py\n pind += 1\n\n if pind >= nout - 2:\n nout *= 2\n px_out = px_out + [0] * nout\n py_out = py_out + [0] * nout\n nout *= 2\n\n if pind == 0: # polygon is entirely outside this line\n return None, None\n n = pind\n pol_x = px_out[:n].copy()\n pol_y = py_out[:n].copy()\n\n if area:\n if pol_x is None: # pragma: no cover\n return 0.0\n shiftx = pol_x.copy()\n shifty = pol_y.copy()\n shiftx.append(shiftx.pop(0))\n shifty.append(shifty.pop(0))\n a1 = [p[0] * p[1] for p in zip(pol_x, shifty)]\n a2 = [p[0] * p[1] for p in zip(pol_y, shiftx)]\n a = [p[0] - p[1] for p in zip(a1, a2)]\n return abs(sum(a)) / 2\n\n return pol_x, pol_y"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate the contour area by the function cv2.contourArea() or from moments, M["m00"].
|
def __CalculateArea(self, contour):
return cv2.contourArea(contour)
|
[
"def area(cnt):\n return cv2.contourArea(cnt)",
"def area(cnt):\n\treturn cv2.contourArea(cnt)",
"def findarea(src, n):\n temp1 = src\n #finding all contours in the image.\n contour, hierarchy = cv2.findContours(src,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n tmoment = cv2.moments(contour[n])\n shape = cv2.approxPolyDP(contour[n], cv2.arcLength(contour[n], 0)*0.1, 0)\n area = tmoment['m00']\n return area",
"def __CalculateMoments(self, contour):\r\n return cv2.moments(contour)",
"def get_max_area(contours):\n max_area = 0\n for c in contours:\n temp = cv2.contourArea(c)\n if temp > max_area:\n max_area = temp\n\n return max_area",
"def area_calc(contour_array):\n\n x = contour_array[:, 0]\n y = contour_array[:, 1]\n if min(x)<0:\n x = np.array(x) - min(x)\n if min(y)<0:\n y = np.array(y) - min(y)\n\n area = 0\n for i in range(1, len(y) - 1):\n area += (y[i - 1] * x[i] - x[i - 1] * y[i])\n\n area = abs(area) / 2.0\n return area",
"def getArea(self, image):\n M00 = -999\n subImage = self.getSubImage(image).transpose()\n M00 = self.getRawMoment(subImage, 0, 0)\n return M00",
"def area_separate(cnt):\n return sum([cv2.contourArea(x) for x in cnt])",
"def calcZmArea(self):\n #-- NO EXTRAPOLATION\n if self.extrapolation == \"none\":\n self.ZmArea = sum(self.zmareas)\n #-- AREA EXTRAPOLATION\n if self.extrapolation == \"area\":\n self.ZmArea = sum(self.zmareas) * self.stratum.A2 / self.stratum.Aij\n #-- LINEAR EXTRAPOLATION\n if self.extrapolation == \"linear\":\n self.ZmArea = self.stratum.LT / self.stratum.LN * self.meanZmArea() * self.stratum.Ni\n return self.ZmArea",
"def minAreaRect(contours):\n rect_keep = []\n area = []\n for i in contours:\n rect = cv2.minAreaRect(i)\n rect_keep.append(rect)\n area_ = cv2.contourArea(i)\n area.append(area_)\n return rect_keep, area",
"def find_area_of_interest(frame):\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n lower_red = np.array([110, 60, 0])\n upper_red = np.array([130, 255, 255])\n\n mask = cv2.inRange(hsv, lower_red, upper_red)\n res = cv2.bitwise_and(frame, frame, mask=mask)\n\n res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n\n # Converting the image to black and white\n (_, res) = cv2.threshold(res, 90, 255, cv2.THRESH_BINARY)\n\n contours, _ = cv2.findContours(res, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n \n cnt = contours[0]\n max_area = cv2.contourArea(cnt)\n\n for cont in contours:\n if cv2.contourArea(cont) > max_area:\n cnt = cont\n max_area = cv2.contourArea(cont)\n\n contours_2d = np.vstack(cnt.squeeze())\n\n # get the all index for xmin xmax ymin ymax\n xmin_contour = contours_2d[np.argmin(contours_2d[:,0]), :][0]\n xmax_contour = contours_2d[np.argmax(contours_2d[:,0]), :][0]\n ymin_contour = contours_2d[np.argmin(contours_2d[:,1]), :][1]\n ymax_contour = contours_2d[np.argmax(contours_2d[:,1]), :][1]\n\n return [ymin_contour, xmin_contour, ymax_contour, xmax_contour]",
"def get_center_of_mass(contour):\n M = cv2.moments(contour)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n\n return cX, cY",
"def filter_area(contours):\r\n filtered_area = [] # oder lieber als set?\r\n\r\n for cnt_area in contours:\r\n area = cv2.contourArea(cnt_area)\r\n # print(area)\r\n if area > sv.min_area: # 6000 ?\r\n filtered_area.append(cnt_area)\r\n\r\n return filtered_area",
"def filter_area( contours, debug=False ):\r\n ret = []\r\n\r\n for x in contours:\r\n area = cv2.contourArea( x )\r\n if area > MIN_AREA and area < MAX_AREA:\r\n if debug:\r\n print \"Area\", area\r\n ret.append( x )\r\n return( ret )",
"def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid",
"def center_point(contour):\n m = cv2.moments(contour)\n if m['m00'] == 0:\n return None\n\n return int(m['m10'] / m['m00']), int(m['m01'] / m['m00'])",
"def __CalculateApproximation(self, contour):\r\n epsilon = 0.1 * cv2.arcLength(contour, True)\r\n return cv2.approxPolyDP(contour, epsilon, True)",
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def compute_cells_area(m):\n areaf = vtk.vtkMeshQuality()\n areaf.SetInputData(m)\n areaf.SetTriangleQualityMeasureToArea()\n areaf.SaveCellQualityOn() ##default: quality is stored as cell data\n areaf.Update()\n vtk_x = areaf.GetOutput().GetCellData().GetArray(\"Quality\")\n np_x = nps.vtk_to_numpy(vtk_x)\n return np_x"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.