query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Extract emission line fluxes from a grid (represented as a DataFrame) by inputting gridpoint indices and taking the fluxes at the nearest gridpoint
def extract_grid_fluxes_i(DF, p_name_ind_map, line_names): val_arrs = {p:np.unique(DF[p].values) for p in p_name_ind_map} assert len(DF) == np.product([len(v) for v in val_arrs.values()]) where = np.full(len(DF), 1, dtype=bool) for p,ind in p_name_ind_map.items(): where &= (DF.loc[:,p] == val_arrs[p][ind]) assert np.sum(where) == 1 return [DF[line].values[where][0] for line in line_names]
[ "def _get_xy_individual_fluxes(self):\n y = self._dataset.flux[self._dataset.good]\n\n if self.fix_source_flux is False:\n x = np.array(self._data_magnification)\n if self.model.n_sources == 1:\n x = x[self._dataset.good]\n else:\n x = x[:, self._dataset.good]\n\n self.n_fluxes = self._model.n_sources\n else:\n x = None\n if self._model.n_sources == 1:\n y -= (self.fix_source_flux[0] *\n self._data_magnification[self._dataset.good])\n else:\n for i in range(self._model.n_sources):\n if self.fix_source_flux[i] is False:\n self.n_fluxes += 1\n if x is None:\n x = self._data_magnification[i][self._dataset.good]\n else:\n x = np.vstack(\n (x, self._data_magnification[i][\n self._dataset.good]))\n\n else:\n y -= (self.fix_source_flux[i] *\n self._data_magnification[i][self._dataset.good])\n\n return (x, y)", "def get_index_v1(lat, long, lat_gridpoints, long_gridpoints, row_gridpoints, column_gridpoints):\n\n points = np.vstack([lat_gridpoints, long_gridpoints]).transpose()\n row = int(np.round(interpolate.griddata(points, row_gridpoints, (lat, long))))\n column = int(np.round(interpolate.griddata(points, column_gridpoints, (lat, long))))\n return row, column", "def findFlow( terrain ):\n rowoffset, coloffset=findLowNhbr(terrain)\n flow=np.ones_like(terrain)\n indx=np.argsort(-1*terrain,axis==None)\n row_indx=indx/terrain.shape[1]\n col_indx=indx%terrain.shape[1]\n for i in range(terrain.size):\n row_lowNbr=row_indx[i]+rowoffset[row_indx[i],col_indx[i]]\n col_lowNhr=col_indx[i]+coloffset[row_indx[i],col_indx[i]]\n if rowoffset[row_indx[i],col_indx[i]]!=0 or coloffset[row_indx[i],col_indx[i]]!=0:\n flow[row_lowNbr,col_lowNbr]+=flow[row_indx[i],col_indx[i]]\n \n \n return flow", "def test_project_sensors_onto_inflated(tmp_path):\n pytest.importorskip(\"nibabel\")\n raw = mne.io.read_raw_fif(fname_raw)\n trans = _get_trans(fname_trans)[0]\n for subject in (\"sample\", \"fsaverage\"):\n os.makedirs(tmp_path / subject / \"surf\", exist_ok=True)\n for hemi in (\"lh\", \"rh\"):\n # fake white surface for pial\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.white\",\n tmp_path / subject / \"surf\" / f\"{hemi}.pial\",\n )\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.curv\",\n tmp_path / subject / \"surf\" / f\"{hemi}.curv\",\n )\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.inflated\",\n tmp_path / subject / \"surf\" / f\"{hemi}.inflated\",\n )\n if subject == \"fsaverage\":\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.cortex.patch.flat\",\n tmp_path / subject / \"surf\" / f\"{hemi}.cortex.patch.flat\",\n )\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.sphere\",\n tmp_path / subject / \"surf\" / f\"{hemi}.sphere\",\n )\n # now make realistic sEEG locations, picked from T1\n raw.pick_types(meg=False, eeg=True)\n raw.load_data()\n raw.set_eeg_reference([])\n raw.set_channel_types({ch: \"seeg\" for ch in raw.ch_names})\n pos = (\n np.array(\n [\n [25.85, 9.04, -5.38],\n [33.56, 9.04, -5.63],\n [40.44, 9.04, -5.06],\n [46.75, 9.04, -6.78],\n [-30.08, 9.04, 28.23],\n [-32.95, 9.04, 37.99],\n [-36.39, 9.04, 46.03],\n ]\n )\n / 1000\n )\n raw.drop_channels(raw.ch_names[len(pos) :])\n raw.set_montage(\n mne.channels.make_dig_montage(\n ch_pos=dict(zip(raw.ch_names, pos)), coord_frame=\"head\"\n )\n )\n proj_info = _project_sensors_onto_inflated(\n raw.info, trans, \"sample\", subjects_dir=tmp_path\n )\n assert_allclose(\n proj_info[\"chs\"][0][\"loc\"][:3],\n np.array([0.0555809, 0.0034069, -0.04593032]),\n rtol=0.01,\n )\n # check all on inflated surface\n x_dir = np.array([1.0, 0.0, 0.0])\n head_mri_t = mne.transforms.invert_transform(trans) # need head->mri\n for hemi in (\"lh\", \"rh\"):\n coords, faces = mne.surface.read_surface(\n tmp_path / \"sample\" / \"surf\" / f\"{hemi}.inflated\"\n )\n x_ = coords @ x_dir\n coords -= np.max(x_) * x_dir if hemi == \"lh\" else np.min(x_) * x_dir\n coords /= 1000 # mm -> m\n for ch in proj_info[\"chs\"]:\n loc = ch[\"loc\"][:3]\n if not np.isnan(loc).any() and (loc[0] <= 0) == (hemi == \"lh\"):\n assert (\n np.linalg.norm(\n coords - mne.transforms.apply_trans(head_mri_t, loc), axis=1\n ).min()\n < 1e-16\n )\n\n # test flat map\n montage = raw.get_montage()\n montage.apply_trans(mne.transforms.invert_transform(trans))\n mri_mni_t = mne.read_talxfm(\"sample\", subjects_dir)\n montage.apply_trans(mri_mni_t) # mri to mni_tal (MNI Taliarach)\n montage.apply_trans(\n mne.transforms.Transform(fro=\"mni_tal\", to=\"mri\", trans=np.eye(4))\n )\n raw.set_montage(montage)\n trans = mne.channels.compute_native_head_t(montage)\n\n flat_proj_info = _project_sensors_onto_inflated(\n raw.info,\n trans=trans,\n subject=\"fsaverage\",\n subjects_dir=tmp_path,\n flat=True,\n )\n\n # check all on flat surface\n x_dir = np.array([1.0, 0.0, 0.0])\n head_mri_t = mne.transforms.invert_transform(trans) # need head->mri\n for hemi in (\"lh\", \"rh\"):\n coords, faces, _ = mne.surface._read_patch(\n tmp_path / \"fsaverage\" / \"surf\" / f\"{hemi}.cortex.patch.flat\"\n )\n coords = coords[:, [1, 0, 2]]\n coords[:, 1] *= -1\n x_ = coords @ x_dir\n coords -= np.max(x_) * x_dir if hemi == \"lh\" else np.min(x_) * x_dir\n coords /= 1000 # mm -> m\n for ch in flat_proj_info[\"chs\"]:\n loc = ch[\"loc\"][:3]\n if not np.isnan(loc).any() and (loc[0] <= 0) == (hemi == \"lh\"):\n assert (\n np.linalg.norm(\n coords - mne.transforms.apply_trans(head_mri_t, loc), axis=1\n ).min()\n < 1e-16\n )\n\n # plot to check\n # brain = mne.viz.Brain('fsaverage', subjects_dir=tempdir, alpha=0.5,\n # surf='flat')\n # brain.add_sensors(flat_proj_info, trans=trans)", "def match_det2cube_msm(naxis1, naxis2, naxis3,\n cdelt1, cdelt2,\n zcdelt3,\n xcenters, ycenters, zcoord,\n spaxel_flux,\n spaxel_weight,\n spaxel_iflux,\n flux,\n coord1, coord2, wave,\n rois_pixel, roiw_pixel, weight_pixel, softrad_pixel):\n\n nplane = naxis1 * naxis2\n\n# now loop over the pixel values for this region and find the spaxels that fall\n# withing the region of interest.\n nn = coord1.size\n\n# ilow = 0\n# ihigh = 0\n# imatch = 0\n# print('looping over n points mapping to cloud',nn)\n#________________________________________________________________________________\n for ipt in range(0, nn - 1):\n#________________________________________________________________________________\n # xcenters, ycenters is a flattened 1-D array of the 2 X 2 xy plane\n # cube coordinates.\n # find the spaxels that fall withing ROI of point cloud defined by\n # coord1,coord2,wave\n lower_limit = softrad_pixel[ipt]\n xdistance = (xcenters - coord1[ipt])\n ydistance = (ycenters - coord2[ipt])\n radius = np.sqrt(xdistance * xdistance + ydistance * ydistance)\n indexr = np.where(radius <= rois_pixel[ipt])\n indexz = np.where(abs(zcoord - wave[ipt]) <= roiw_pixel[ipt])\n\n # on the wavelength boundaries the point cloud may not be in the IFUCube\n # the edge cases are skipped and not included in final IFUcube.\n # Left commented code for checking later for NIRSPEC the spectral size\n # in the reference file may be too small\n# if len(indexz[0]) == 0:\n# if wave[ipt] < zcoord[0]:\n# ilow = ilow + 1\n# elif wave[ipt] > zcoord[-1]: \n# ihigh = ihigh + 1\n# else:\n# imatch = imatch + 1\n# print(' no z match found ',wave[ipt],roiw_pixel[ipt])\n# print(zcoord[naxis3-11:naxis3])\n# diff = abs(zcoord[naxis3-11:naxis3] - wave[ipt])\n# print(diff)\n# exit()\n if len(indexz[0]) > 0:\n d1 = np.array(coord1[ipt] - xcenters[indexr]) / cdelt1\n d2 = np.array(coord2[ipt] - ycenters[indexr]) / cdelt2\n d3 = np.array(wave[ipt] - zcoord[indexz]) / zcdelt3[indexz]\n\n dxy = (d1 * d1) + (d2 * d2)\n\n # shape of dxy is #indexr or number of overlaps in spatial plane\n # shape of d3 is #indexz or number of overlaps in spectral plane\n # shape of dxy_matrix & d3_matrix (#indexr, #indexz)\n # rows = number of overlaps in spatial plane\n # cols = number of overlaps in spectral plane\n dxy_matrix = np.tile(dxy[np.newaxis].T, [1, d3.shape[0]])\n d3_matrix = np.tile(d3 * d3, [dxy_matrix.shape[0], 1])\n\n wdistance = dxy_matrix + d3_matrix\n weight_distance = np.power(np.sqrt(wdistance), weight_pixel[ipt])\n weight_distance[weight_distance < lower_limit] = lower_limit\n weight_distance = 1.0 / weight_distance\n weight_distance = weight_distance.flatten('F')\n weighted_flux = weight_distance * flux[ipt]\n\n icube_index = [iz * nplane + ir for iz in indexz[0] for ir in indexr[0]]\n spaxel_flux[icube_index] = spaxel_flux[icube_index] + weighted_flux\n spaxel_weight[icube_index] = spaxel_weight[icube_index] + weight_distance\n spaxel_iflux[icube_index] = spaxel_iflux[icube_index] + 1", "def seafloor_grid(depths, lat, lon):", "def pointdata_time_series(self, p_list, ti_start=0, ti_end=-1):\n # get the grid from the first timestep\n df_inst = self.get_df_inst(time=self.times[0])\n grid_data, grid_dims = self.fielddata_from_df(df_inst)\n\n # extract grid coordinates\n X = grid_data['X']\n Y = grid_data['Y']\n Z = grid_data['Z']\n\n # initialize empty lists\n kji_nearest = []\n p_nearest = []\n\n # loop through each point\n for p in p_list:\n xp, yp, zp = p\n\n # compute distance from point to each grid node\n R = np.power(X-xp, 2) + np.power(Y-yp, 2) + np.power(Z-zp, 2)\n\n # find the indices of the place where r is a minimum\n zi, yi, xi = np.unravel_index(R.argmin(), R.shape)\n\n # add this index to the list of indices\n kji_nearest.append((zi, yi, xi))\n\n # get the actual coordinate of the nearest point and add to list of\n # nearest points\n p_nearest.append((X[zi, yi, xi],\n Y[zi, yi, xi],\n Z[zi, yi, xi]))\n\n # preallocate arrays\n num_times = len(self.times[ti_start:ti_end])\n num_points = len(p_list)\n\n u = np.zeros([num_points, num_times])\n v = np.zeros([num_points, num_times])\n w = np.zeros([num_points, num_times])\n ufs = np.zeros([num_points, num_times])\n vfs = np.zeros([num_points, num_times])\n wfs = np.zeros([num_points, num_times])\n\n # loop through the files and extract data\n for ti, time in enumerate(self.times[ti_start:ti_end]):\n # get the dataframe for the current time\n df_inst = self.get_df_inst(time=time)\n\n # extract data from the dataframe\n grid_data, grid_dims = self.fielddata_from_df(df_inst)\n\n for pi, coords in enumerate(kji_nearest):\n # extract data at point and store in array\n u[pi, ti] = (grid_data['U'])[coords]\n v[pi, ti] = (grid_data['V'])[coords]\n w[pi, ti] = (grid_data['W'])[coords]\n ufs[pi, ti] = (grid_data['Ufs'])[coords]\n vfs[pi, ti] = (grid_data['Vfs'])[coords]\n wfs[pi, ti] = (grid_data['Wfs'])[coords]\n\n data_dict = {'t': self.times[ti_start:ti_end],\n 'u': u,\n 'v': v,\n 'w': w,\n 'ufs': ufs,\n 'vfs': vfs,\n 'wfs': wfs}\n\n return data_dict, p_nearest", "def grid_glider_data(df, varname, delta_z=.3):\n df.dropna(inplace=True)\n #df.dropna() # Changed to work with ru29 2020 datatset by JG\n df.drop(df[df['depth'] < .1].index, inplace=True) # drop rows where depth is <1\n df.drop(df[df[varname] == 0].index, inplace=True) # drop rows where the variable equals zero\n df.sort_values(by=['time', 'depth'], inplace=True)\n\n # find unique times and coordinates\n timeg, ind = np.unique(df.time.values, return_index=True)\n latg = df['latitude'].values[ind]\n long = df['longitude'].values[ind]\n dg = df['depth'].values\n vg = df[varname].values\n zn = np.int(np.max(np.diff(np.hstack([ind, len(dg)]))))\n\n depthg = np.empty((zn, len(timeg)))\n depthg[:] = np.nan\n varg = np.empty((zn, len(timeg)))\n varg[:] = np.nan\n\n for i, ii in enumerate(ind):\n if i < len(timeg) - 1:\n i_f = ind[i + 1]\n else:\n i_f = len(dg)\n depthi = dg[ind[i]:i_f]\n vari = vg[ind[i]:i_f]\n depthg[0:len(dg[ind[i]:i_f]), i] = depthi\n varg[0:len(vg[ind[i]:i_f]), i] = vari\n\n # sort time variable\n okt = np.argsort(timeg)\n timegg = timeg[okt]\n depthgg = depthg[:, okt]\n vargg = varg[:, okt]\n\n # Grid variables\n depthg_gridded = np.arange(0, np.nanmax(depthgg), delta_z)\n varg_gridded = np.empty((len(depthg_gridded), len(timegg)))\n varg_gridded[:] = np.nan\n\n for t, tt in enumerate(timegg):\n depthu, oku = np.unique(depthgg[:, t], return_index=True)\n varu = vargg[oku, t]\n okdd = np.isfinite(depthu)\n depthf = depthu[okdd]\n varf = varu[okdd]\n ok = np.asarray(np.isfinite(varf))\n if np.sum(ok) < 3:\n varg_gridded[:, t] = np.nan\n else:\n okd = np.logical_and(depthg_gridded >= np.min(depthf[ok]), depthg_gridded < np.max(depthf[ok]))\n varg_gridded[okd, t] = np.interp(depthg_gridded[okd], depthf[ok], varf[ok])\n\n return timegg, long, latg, depthg_gridded, varg_gridded", "def MFS(mesh,plotx,ploty,plotz,k,incDir,incAmp=1.0,tau=10,frac_samp=2,numSource=0,numSamp=0,offset=0.15):\n mesh.MFS=True\n \n if numSource == 0:\n for d in mesh.dList:\n d.numSource = int(np.ceil(tau**2*k**2*d.area()/(4*np.pi*np.pi)))\n #d.numSource = int(np.ceil(tau*k*d.length()/(2*np.pi)))\n #d.numSamp = int(frac_samp*d.numSource)\n \n def number_of_points(d,N):\n a = d.numelements * N**2\n b = d.edges * (N-2)\n c = d.corners * 3\n d = d.extraordinary_points\n return a-b-c+d \n \n # Singular (source) points\n for d in mesh.dList:\n N=1\n if numSource == 0:\n while number_of_points(d,N) < d.numSource: N+=1\n else:\n while number_of_points(d,N) < numSource: N+=1\n d.numSource = number_of_points(d,N)\n xi1 = np.linspace(d.eList[0].limits[0],d.eList[0].limits[1],N)\n xi2 = np.linspace(d.eList[0].limits[2],d.eList[0].limits[3],N) \n xi1,xi2=np.meshgrid(xi1,xi2)\n xi1=xi1.reshape(-1,) ; xi2=xi2.reshape(-1,) \n souvals = d.eList[0].vals(d.eList[0].limits[0],d.eList[0].limits[2])\n sounorms = d.eList[0].normals(d.eList[0].limits[0],d.eList[0].limits[2])\n for e in d.eList:\n newvals = e.vals(xi1,xi2)\n newnorms = e.normals(xi1,xi2) \n px,py,pz = newvals\n px=px.reshape(-1,1)\n py=py.reshape(-1,1)\n pz=pz.reshape(-1,1)\n qx,qy,qz=newvals\n rx=px-qx ; ry=py-qy ; rz=pz-qz\n rx=np.tril(rx, -1)[:,:-1]+np.triu(rx, 1)[:,1:]\n ry=np.tril(ry, -1)[:,:-1]+np.triu(ry, 1)[:,1:]\n rz=np.tril(rz, -1)[:,:-1]+np.triu(rz, 1)[:,1:]\n r = np.sqrt( rx**2 + ry**2 + rz**2 )\n delete = np.where(np.any(r<1e-10,axis=1))[0]\n newvals = np.delete(newvals,delete[1:],axis=1)\n newnorms = np.delete(newnorms,delete[1:],axis=1)\n px,py,pz = newvals\n px=px.reshape(-1,1)\n py=py.reshape(-1,1)\n pz=pz.reshape(-1,1) \n qx,qy,qz=souvals\n r = np.sqrt( (qx-px)**2 + (qy-py)**2 + (qz-pz)**2 )\n delete = np.where(np.any(r<1e-12,axis=1))[0]\n souvals = np.hstack([souvals,np.delete(newvals,delete,axis=1)])\n sounorms = np.hstack([sounorms,np.delete(newnorms,delete,axis=1)]) \n d.sourceVals = souvals + offset*sounorms\n d.sourceNormals = sounorms\n mesh.sourceVals = np.hstack([d.sourceVals for d in mesh.dList])\n mesh.sourceNormals = np.hstack([d.sourceNormals for d in mesh.dList])\n \n # Sampling points \n for d in mesh.dList:\n N=1\n if numSamp == 0:\n while number_of_points(d,N) < frac_samp*d.numSource: N+=1\n else:\n while number_of_points(d,N) < numSource: N+=1\n d.numSamp = number_of_points(d,N)\n xi1 = np.linspace(d.eList[0].limits[0],d.eList[0].limits[1],N)\n xi2 = np.linspace(d.eList[0].limits[2],d.eList[0].limits[3],N) \n xi1,xi2=np.meshgrid(xi1,xi2)\n xi1=xi1.reshape(-1,) ; xi2=xi2.reshape(-1,) \n sampvals = d.eList[0].vals(d.eList[0].limits[0],d.eList[0].limits[0])\n sampnorms = d.eList[0].normals(d.eList[0].limits[0],d.eList[0].limits[0])\n for e in d.eList:\n newvals = e.vals(xi1,xi2)\n newnorms = e.normals(xi1,xi2) \n px,py,pz = newvals\n px=px.reshape(-1,1)\n py=py.reshape(-1,1)\n pz=pz.reshape(-1,1)\n qx,qy,qz=newvals\n rx=px-qx ; ry=py-qy ; rz=pz-qz\n rx=np.tril(rx, -1)[:,:-1]+np.triu(rx, 1)[:,1:]\n ry=np.tril(ry, -1)[:,:-1]+np.triu(ry, 1)[:,1:]\n rz=np.tril(rz, -1)[:,:-1]+np.triu(rz, 1)[:,1:]\n r = np.sqrt( rx**2 + ry**2 + rz**2 )\n delete = np.where(np.any(r<1e-10,axis=1))[0]\n newvals = np.delete(newvals,delete[1:],axis=1)\n newnorms = np.delete(newnorms,delete[1:],axis=1)\n px,py,pz = newvals\n px=px.reshape(-1,1)\n py=py.reshape(-1,1)\n pz=pz.reshape(-1,1) \n qx,qy,qz=sampvals\n r = np.sqrt( (qx-px)**2 + (qy-py)**2 + (qz-pz)**2 )\n delete = np.where(np.any(r<1e-12,axis=1))[0] \n sampvals = np.hstack([sampvals,np.delete(newvals,delete,axis=1)])\n sampnorms = np.hstack([sampnorms,np.delete(newnorms,delete,axis=1)]) \n d.sampVals = sampvals\n d.sampNormals = sampnorms\n mesh.sampVals = np.hstack([d.sampVals for d in mesh.dList])\n mesh.sampNormals = np.hstack([d.sampNormals for d in mesh.dList]) \n \n dphidn = evaluate_dphidn(mesh,k,incAmp,incDir) # derivative phi_inc wrt n\n \n T = evaluate_T(mesh,k)\n\n A = np.dot(T,T.T)\n b = np.sum(-T*dphidn,axis=1)\n\n # Solve for fundamental solution amplitudes\n mesh.amplitudes = np.linalg.solve(A,b)\n\n return get_potentials(np.vstack([plotx,ploty,plotz]),mesh,k,incAmp,incDir)", "def at_decks(self, points: List[Point]) -> List[float]:\n self._at_deck_interp(0, 0) # Ensure the grid of points is calculated.\n xzs = np.array([[point.x, point.z] for point in points])\n points, values = self.griddata\n return griddata(points, values, xzs)\n # result = self.grid_interp2d(xzs[0].flatten(), xzs[1].flatten())[0]\n # return result", "def ndsnap_regular(points, *grid_axes): \n # https://stackoverflow.com/q/8457645/717525\n snapped = [] \n for i, ax in enumerate(grid_axes): \n diff = ax[:, np.newaxis] - points[:, i]\n best = np.argmin(np.abs(diff), axis=0) \n snapped.append(ax[best]) \n return np.array(snapped).T", "def calculate_spatial_risk(host_df, foi, deltat, xbounds, ybounds, calc=\"cumulative\"):\n \n # Continuous trajectory to grid cells\n xcoord, ycoord = map_to_grid(host_df.x.values, host_df.y.values, \n xbounds, ybounds)\n \n # Plot grid \n # XX, YY = np.meshgrid(xbounds['bnd'], ybounds['bnd'])\n # plt.plot(XX, YY, 'o', ms=1, color='red')\n # plt.plot(host1.x.values, host1.y.values)\n # plt.plot(host2.x.values, host2.y.values)\n # plt.plot(xbounds['y'][xcoord], ybounds['y'][ycoord], 'o', ms=4)\n\n # The trajectory in grid form\n grids = pd.DataFrame([(x, y) for x, y in zip(xcoord, ycoord)], columns=[\"x\", \"y\"])\n grids = grids.assign(foi=foi)\n\n spatial_foi = grids.groupby(['x', 'y']).apply(spatial_risk, deltat, calc).reset_index()\n \n return((spatial_foi, grids))", "def find_track_starts(coordAmp):\n trackStarts = [] \n for track in range(1, coordAmp.shape[0]):\n xPositions = coordAmp.loc[track].loc['xPos 1'::8]\n yPositions = coordAmp.loc[track].loc['yPos 1'::8]\n frame = 0\n xPosition = np.nan\n while (np.isnan(xPosition)):\n xPosition = xPositions[frame]\n frame = frame + 1\n trackStarts.append((xPositions[frame], yPositions[frame]))\n return trackStarts", "def onFrontier(self, index):\n connected = False\n top = False\n bottom = False\n \n if (index>=self.ogrid_sizeX):\n top = True\n # Check the cell above to see if its connected to a known \n # cell\n if self.current_map.data[index-self.ogrid_sizeX] ==-1:#<50 and self.current_map.data[index-self.ogrid_sizeX]>=0:\n connected = True\n \n if (index<(self.ogrid_sizeX*self.ogrid_sizeY-self.ogrid_sizeX)): # check this math\n bottom =True\n # Check the cell below to see if its connected to a known \n # cell\n if self.current_map.data[index+self.ogrid_sizeX] ==-1:#<50 and self.current_map.data[index+self.ogrid_sizeX]>=0:\n connected = True\n \n if (np.mod(index,self.ogrid_sizeX) != 0):\n # Check the cell to the left to see if its connected to a \n # known cell\n if self.current_map.data[index-1] ==-1:#<50 and self.current_map.data[index-1]>=0:\n connected = True\n # Check top left\n if top and self.current_map.data[index-self.ogrid_sizeX-1] ==-1:\n connected = True\n # Check bottom left\n if bottom and self.current_map.data[index+self.ogrid_sizeX-1] ==-1:\n connected = True\n \n if (np.mod(index,self.ogrid_sizeX) != self.ogrid_sizeX-1):\n # Check the cell to the right to see if its connected to a \n # known cell\n if self.current_map.data[index+1] ==-1:#<50 and self.current_map.data[index+1]>=0:\n connected = True\n # Check top right\n if top and self.current_map.data[index-self.ogrid_sizeX+1] ==-1:\n connected = True\n # Check bottom right\n if bottom and self.current_map.data[index+self.ogrid_sizeX+1] ==-1:\n connected = True\n \n return connected", "def eta_filter_csv(\n file: str,\n) -> Tuple[\n np.ndarray,\n np.ndarray,\n np.ndarray,\n np.ndarray,\n np.ndarray,\n ArrayLike,\n ArrayLike,\n ArrayLike,\n]:\n eta_filter_df = pd.read_csv(file, header=0)\n\n F_int = eta_filter_df.columns.values.astype(float)\n\n if np.average(F_int) < 10.0**9:\n F_int = F_int * 10.0**9\n eta_filter_df.columns = F_int\n\n # Fit to lorentzian model\n fit = np.apply_along_axis(fit_lorentzian, 1, eta_filter_df.to_numpy(), x=F_int)\n fit_df = pd.DataFrame(fit, columns=[\"Center\", \"HWHM\", \"max height\", \"chi sq\"])\n\n eta_filter_df = eta_filter_df.join(fit_df)\n\n # Sort frequency bins\n eta_filter_df = eta_filter_df.sort_values(\"Center\", axis=0)\n eta_filter_df.set_index(np.arange(0, len(eta_filter_df)), inplace=True)\n\n # Extract values\n F = eta_filter_df[\"Center\"].to_numpy(float)\n\n # Make filter matrix\n eta_filter = eta_filter_df.to_numpy()[:, :-4]\n\n # calculate integration bandwith, copy second-last bin BW to last\n W_F_int = np.copy(F_int)\n W_F_int[0:-2] = F_int[1:-1] - F_int[0:-2]\n W_F_int[-2] = W_F_int[-3]\n\n # Equivalent in-band box filter approximation\n box_height = np.pi / 4 * eta_filter_df[\"max height\"].to_numpy(float)\n box_width = 2 * eta_filter_df[\"HWHM\"].to_numpy(float)\n chi_sq = eta_filter_df[\"chi sq\"].to_numpy(float)\n\n eta_inband = eta_inband_mask(F_int, F, box_width / 2) * eta_filter\n\n return eta_filter, eta_inband, F, F_int, W_F_int, box_height, box_width, chi_sq", "def smooth_interp(self):\n #### 1) Prepare for ROMS grid\n xroms=np.zeros_like(self.lon0) ## xroms: longitude, yroms: latitude\n yroms=np.zeros_like(self.lat0)\n (y,x)=self.lon0.shape\n for i in range(y):\n for j in range(x):\n (yroms[i][j],xroms[i][j])=utm.from_latlon(self.lat0[i][j],self.lon0[i][j])[0:2]\n \n xy_roms = np.vstack((xroms[self.maskss0==1],yroms[self.maskss0==1])).T\n Fuv = interpXYZ(xy_roms,xy_roms, method='kriging')\n \n uroms, vroms = self.combine()\n for tstep in range(self.time_ss.shape[0]):\n utem=np.zeros_like(xroms)\n utem[self.maskss0==1]=Fuv(uroms[self.ind0+tstep,:,:][self.maskss0==1])\n uroms[self.ind0+tstep,:,:]=utem\n \n vtem=np.zeros_like(xroms)\n vtem[self.maskss0==1]=Fuv(vroms[self.ind0+tstep,:,:][self.maskss0==1])\n vroms[self.ind0+tstep,:,:]=vtem\n \n basemap = Basemap(projection='merc',llcrnrlat=self.lat0.min(),urcrnrlat=self.lat0.max(), \\\n llcrnrlon=self.lon0.min(),urcrnrlon=self.lon0.max(),resolution='i')\n fig1 = plt.figure()\n ax = fig1.add_subplot(111)\n \n basemap.drawcoastlines()\n basemap.fillcontinents()\n basemap.drawcountries()\n basemap.drawstates()\n x_rho, y_rho = basemap(self.lon0, self.lat0)\n\n basemap.pcolormesh(x_rho, y_rho, uroms[-2,:,:], vmin=uroms.min(),vmax=uroms.max()) \n plt.show() \n \n #pdb.set_trace()", "def distance(xi, yi, zi, index, surface,df):\n df = df.drop([index]) #I delete the sphere's center from the dataframe\n\n dis_euc = [] #a list containing the distance values\n\n for index2, row in df.iterrows():#dataframe parsing\n\n p2 = list(df.loc[index2,[\"x\",\"y\",\"z\"]]) #coordinates of an atom \n\n for ind in range(len(xi)): # for each point of the 100 points \n\n p1 = [xi[ind], yi[ind], zi[ind]] #coordinates of the 100 points \n\n dist_p1_p2 = np.linalg.norm(np.array(p1)-np.array(p2)) #calculating the distance between p1 & p2\n\n dis_euc.append(dist_p1_p2)#put the distance in a list\n\n return (dis_euc)", "def _compute_wind_index(self, plan):", "def get_xy_for_electrode(self, idx):\n # check if grid exists\n if self.grid is None:\n raise ValueError('Grid is not set.')\n else:\n x, y = self.grid[:, idx]\n return int(x-1), int(y-1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure the parameter estimates are as expected
def test_parameter_estimates(self): DF_est = self.Result.Posterior.DF_estimates self.assertTrue(all(p in DF_est.index for p in self.params)) # Tolerance for distance between gridpoint we chose and the estimate: grid_sep_frac = 0.1 # Allowed fraction of distance between gridpoints for p, test_ind in zip(self.params, self.test_gridpoint): tol = np.diff(self.val_arrs[p])[0] * grid_sep_frac value = self.val_arrs[p][test_ind] # Expected parameter value est = DF_est.loc[p, "Estimate"] # NebulaBayes estimate self.assertTrue(np.isclose(est, value, atol=tol))
[ "def test_parameter_estimates(self):\n DF_est = self.Result.Posterior.DF_estimates # DataFrame\n p0_est = DF_est.loc[\"p0\", \"Estimate\"]\n self.assertTrue(np.isclose(p0_est, self.expected_p0, atol=1))", "def test_estimate_bounds_checks(self):\n DF = self.Result.Posterior.DF_estimates # Parameter estimate table\n for p in [\"12 + log O/H\", \"log P/k\", \"log U\"]:\n for col in [\"Est_in_CI68?\", \"Est_in_CI95?\"]:\n self.assertTrue(DF.loc[p,col] == \"Y\")\n for col in [\"Est_at_lower?\", \"Est_at_upper?\", \"P(lower)>50%?\",\n \"P(upper)>50%?\"]:\n self.assertTrue(DF.loc[p,col] == \"N\")\n self.assertTrue(DF.loc[p,\"n_local_maxima\"] == 1)", "def test_params_module():\n # Get the inputs required by the Scales object\n (profile, disp_phases, z0) = get_sim_data()\n\n\n # Test that the governing parameters are computed correctly\n # First, test a single dispersed phase\n model = params.Scales(profile, disp_phases[1])\n check_get_variables(model, z0, 0.15, 0.21724144538674975,\n 0.001724100901081246, 0.22611661456807244, 0.15)\n\n # Second, try a list of dispersed phases, where the dominant phase is\n # not the first one\n particles = [disp_phases[1], disp_phases[0], disp_phases[2]]\n model = params.Scales(profile, particles)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Third, make sure we get the same answer as the previous case if the\n # particles are in a different order (i.e., the original order)\n model = params.Scales(profile, disp_phases)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Using the latest Scales object, check that the other methods return\n # the correct results. Since these methods only depend on the values\n # of B, N, and us computed by the get_variables() method, only one case\n # needs to be tested\n assert_approx_equal(model.h_T(z0), 346.40139518559153, significant=6)\n assert_approx_equal(model.h_P(z0), 627.57408319500291, significant=6)\n assert_approx_equal(model.h_S(z0, 0.15), 295.45365120553163,\n significant=6)\n assert_approx_equal(model.lambda_1(z0, 0), 0.74523735215223819,\n significant=6)\n assert_approx_equal(model.u_inf_crit(z0), 0.063723667111426671,\n significant=6)", "def test_reestimate_params(self):\n pdf_matrix = self.cluster_obj_4.compute_pdf_matrix()\n posterior_matrix = self.cluster_obj_4.compute_posterior(pdf_matrix)\n self.cluster_obj_4.reestimate_params(posterior_matrix)\n self.assertEqual(round(self.cluster_obj_4.mean[0], 2), 0.24)\n self.assertEqual(round(self.cluster_obj_4.variance[0], 2), 0.02)\n self.assertEqual(round(self.cluster_obj_4.weight[0], 2), 0.13)", "def _check_model_params(self):", "def _assert_validity_of_inputs(self):\n for item in [\"frequency\", \"Dt\"]:\n if isinstance(self.__getattribute__(item), bool):\n raise TypeError(f\"Parameter '{item}' must be numeric.\")\n if not isinstance(self.__getattribute__(item), (int, float)):\n raise TypeError(f\"Parameter '{item}' is not a non-zero number.\")\n if self.__getattribute__(item) <= 0.0:\n raise ValueError(f\"Parameter '{item}' must be a non-zero number.\")\n for item in ['q0', 'P', 'R']:\n if self.__getattribute__(item) is not None:\n if isinstance(self.__getattribute__(item), bool):\n raise TypeError(f\"Parameter '{item}' must be an array of numeric values.\")\n if not isinstance(self.__getattribute__(item), (list, tuple, np.ndarray)):\n raise TypeError(f\"Parameter '{item}' is not an array. Got {type(self.__getattribute__(item))}.\")\n self.__setattr__(item, np.copy(self.__getattribute__(item)))\n if self.q0 is not None:\n if self.q0.shape != (4,):\n raise ValueError(f\"Parameter 'q0' must be an array of shape (4,). It is {self.q0.shape}.\")\n if not np.allclose(np.linalg.norm(self.q0), 1.0):\n raise ValueError(f\"Parameter 'q0' must be a versor (norm equal to 1.0). Its norm is equal to {np.linalg.norm(self.q0)}.\")\n for item in ['P', 'R']:\n if self.__getattribute__(item).ndim != 2:\n raise ValueError(f\"Parameter '{item}' must be a 2-dimensional array.\")\n m, n = self.__getattribute__(item).shape\n if m != n:\n raise ValueError(f\"Parameter '{item}' must be a square matrix. It is {m}x{n}.\")", "def _checkModelParameters(self):\r\n\t param_counts = self._countModelParameters()\r\n\t expected_param_counts = {\r\n\t # vocab_size * embedding_size\r\n\t \"seq_embedding\": 24100200,\r\n\t # (embedding_size + num_lstm_units + 1) * 4 * num_lstm_units\r\n\t \"lstm\": 467968,\r\n\t # (num_lstm_units + 1) * logits_size\r\n\t \"logits\": 32896,\r\n\t \"global_step\": 1,\r\n\t }\r\n\t self.assertDictEqual(expected_param_counts, param_counts)", "def testDomainParamEstimateVomm(marklist,domlist,paramdict,sortmarkers,sidecoefs,nodecounts,params,varcount):\n compcount = 1\n if params['model'] in [\"linear\",\"binary\"] and params[\"order\"] == 1:\n [Xdom,ydom,solx] = sidecoefs\n elif params['model'] in [\"linear\",\"binary\"]:\n [Xdom,ydom,lincoefs,logcoefs,solx,termobjval,muvec] = sidecoefs\n elif params['model'] == \"nonparam\":\n [lincoefs,logcoefs,solx,objval,muvec,compcount] = sidecoefs\n assert TestSEDFMest.testPreParamDataVomm(marklist,domlist,sortmarkers,nodecounts,params)\n assert TestSEDFMest.testParamDict(paramdict,params['model'],params['width'],compcount)\n assert TestSEDFMest.compareSol2dictVomm(solx,paramdict,sortmarkers,params['width'],compcount)\n return True\n if params['model'] == \"nonparam\": \n assert TestSEDFMest.testPreNonParamData(marklist,domlist,sortmarkers,varcount,nodecounts,params,compcount,logcoefs) \n estobjval = TestSEDFMest.estNonParamObj(paramdict,marklist,domlist,sortmarkers,params,nodecount,compcount,stmuvec,endmuvec)\n assert abs(stobjval + endobjval - estobjval) <= 0.1\n elif params['model'] in [\"binary\",\"linear\"] and params[\"order\"] != 1:\n #def temploglikeEst(paramx,lincoefs,logcoefs,tmuvec):\n # \"\"\"negative log likelihood obj + penalty\n # \"\"\"\n # tobjval = np.dot(paramx,lincoefs)\n # templist = [np.dot(paramx,logcoef) for logcoef in logcoefs]\n # tobjval += sum([tval if tval >= 10 else math.log(1.0 + math.exp(tval)) for tval in templist])\n # tobjval += estPenaltyParam(paramx,tmuvec,singcount,width,curlam)\n # return tobjval \n #print \"start\" \n #print temploglikeEst(res['x'],lincoefs,logcoefs,muvec)\n #print res['fun']\n #assert abs(temploglikeEst(res['x'],lincoefs,logcoefs,muvec) - res['fun']) < 0.1 \n for lcoef in lincoefs:\n assert lcoef >= 0\n estobjval = TestSEDFMest.estParamEstObj(marklist,domlist,paramdict,sortmarkers,nodecounts,params)\n print estobjval\n exit(1)\n return True", "def test_mutable(data):\n (input_data, y, formula) = data\n model_prefit = gammy.BayesianGAM(formula)\n model_fitted = model_prefit.fit(input_data, y)\n assert_arrays_equal(\n model_prefit.mean_theta,\n model_fitted.mean_theta\n )\n return", "def test_basics(self):\n self.report('Testing adding data, evaluation and marginal likelihood.' +\n ' Probabilistic test, might fail.')\n num_coeffs_vals = [2, 1, 4, 5] * 5\n num_tests = 0\n num_successes = 0\n for dataset in self.datasets:\n for dist_type in self.dist_types:\n for kernel_type in self.kernel_types[dist_type]:\n curr_num_coeffs = num_coeffs_vals.pop(0)\n curr_gp = build_nngp_with_dataset(dataset, kernel_type, curr_num_coeffs,\n dist_type)\n # Predictions & Marginal likelihood\n curr_preds, _ = curr_gp.eval(dataset[2], 'std')\n curr_gp_err = compute_average_prediction_error(dataset, curr_preds)\n const_err = compute_average_prediction_error(dataset, dataset[1].mean())\n lml = curr_gp.compute_log_marginal_likelihood()\n is_success = curr_gp_err < const_err\n num_tests += 1\n num_successes += is_success\n self.report(('(%s, ntr=%d, nte=%d):: GP-lml=%0.4f, GP-err=%0.4f, ' +\n 'Const-err=%0.4f. succ=%d')%(dataset[-1][:5], len(dataset[0]),\n len(dataset[2]), lml, curr_gp_err, const_err, is_success),\n 'test_result')\n succ_frac = num_successes / float(num_tests)\n self.report('Summary: num_successes / num_floats = %d/%d = %0.4f'%(num_successes,\n num_tests, succ_frac), 'test_result')\n assert succ_frac > 0.5", "def _check_params(self, params):\n params_keys = params.keys()\n assert \"bandwidth\" in params_keys\n assert \"count\" in params_keys\n assert params[\"bandwidth\"] > 0.0\n assert params[\"count\"] > 0\n if not \"enforce_no_matrix\" in params_keys:\n params[\"enforce_no_matrix\"] = False\n if not \"max_memory_usage\" in params_keys:\n params[\"max_memory_usage\"] = 512\n if not \"normalize\" in params_keys:\n params[\"normalize\"] = False\n return params", "def test_set_suite_pvalue(self):\n # force stats to fail\n self._set_suite_pvalue(0.99)\n obs = [2,5,6]\n exp = [1,2,3,4,5,6,7,8,9]\n self.assertRaises(AssertionError, self.assertSimilarMeans, obs, exp)\n \n # force stats to pass\n self._set_suite_pvalue(0.01)\n self.assertSimilarMeans(obs, exp)", "def _validate_parameters(supplied_params):\n\n # Define the parameters, their data types, and if they are required\n parameters = [('Rs', float, True),\n ('Mp', float, True),\n ('Rp', float, True),\n ('T', float, True),\n ('logZ', int, False),\n ('CO_ratio', float, False),\n ('log_cloudtop_P', int, False),\n ('log_scatt_factor', int, False),\n ('scatt_slope', int, False),\n ('error_multiple', int, False),\n ('T_star', int, False)]\n\n for parameter in parameters:\n name, data_type, required = parameter\n\n # Ensure that all required parameters are supplied\n if required:\n assert name in supplied_params, '{} missing from parameters'.format(parameter)\n\n # Ensure the supplied parameter is of a valid data type\n if name in supplied_params:\n assert type(supplied_params[name]) == data_type, '{} is not of type {}'.format(parameter, data_type)", "def testConsistency(self):\n tolerance= 1.0e-10\n self.assertFalse(abs(self.payment.NPV() - self.nominal) > tolerance)", "def _validate_estimator(self):\n super()._validate_estimator()", "def checkParameters(self):\n self.DEBUG(\"EDPluginBioSaxsHPLCv1_0.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.rawImage, \"No raw image\")\n self.checkMandatoryParameters(self.dataInput.sample, \"no Sample parameter\")\n self.checkMandatoryParameters(self.dataInput.experimentSetup, \"No experimental setup parameter\")", "def _assert_validity_of_inputs(self):\n for item in [\"frequency\", \"Dt\", \"alpha\", \"beta\", \"threshold\"]:\n if isinstance(self.__getattribute__(item), bool):\n raise TypeError(f\"Parameter '{item}' must be numeric.\")\n if not isinstance(self.__getattribute__(item), (int, float)):\n raise TypeError(f\"Parameter '{item}' is not a non-zero number.\")\n if self.__getattribute__(item) <= 0.0:\n raise ValueError(\"Parameter '{item}' must be a non-zero number.\")\n if not isinstance(self.adaptive, bool):\n raise TypeError(f\"Parameter 'adaptive' is not a boolean type. It is {type(self.adaptive)}.\")\n if not isinstance(self.frame, str):\n raise TypeError(f\"Parameter 'frame' is not a string. It is {type(self.frame)}.\")\n if self.frame.upper() not in ['NED', 'ENU']:\n raise ValueError(f\"Frame '{self.frame}' is not valid. Try 'NED' or 'ENU'.\")", "def _params_validate_and_generate(self) -> None:\n # default params\n if \"d\" not in self.params:\n self.params[\"d\"] = 3\n\n # calculated params\n self.params[\"T\"] = -1 # -1 until a stabilizer round is added!\n self.params[\"num_readout\"] = -1 # -1 until a logical readout is performed!\n self.params[\n \"num_lattice_readout\"\n ] = -1 # -1 until a lattice readout is performed!\n self.params[\"num_data\"] = self.params[\"d\"]\n self.params[\"num_syn\"] = self.params[\"d\"] - 1", "def test_guess_incorrect():\n\n assert update_guess(0, 0.3, 0.1, 0.7) <= 0.3\n assert update_guess(0, 0.1, 0.3, 0.7) <= 0.1\n assert update_guess(0, 0.01, 0.01, 0.01) <= 0.01\n assert update_guess(0, 0.49, 0.49, 0.99) <= 0.49" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure the raw grid spec is as expected
def test_raw_Grid_spec(self): RGrid_spec = self.NB_Model_1.Raw_grids self.assertEqual(RGrid_spec.param_names, self.params) self.assertEqual(RGrid_spec.ndim, len(self.params)) self.assertEqual(RGrid_spec.shape, self.n_gridpts_list) self.assertEqual(RGrid_spec.n_gridpoints, np.product(self.n_gridpts_list)) for a1, a2 in zip(RGrid_spec.param_values_arrs, self.val_arrs.values()): self.assertTrue(np.allclose(np.asarray(a1), np.asarray(a2)))
[ "def test_interpolated_Grid_spec(self):\n IGrid_spec = self.Result.Grid_spec\n self.assertEqual(IGrid_spec.param_names, self.params)\n self.assertEqual(IGrid_spec.param_display_names, self.params)\n self.assertEqual(IGrid_spec.shape, tuple(self.interpd_shape))\n self.assertEqual(IGrid_spec.n_gridpoints, np.product(self.interpd_shape))", "def test_vector_grid_formatting(self):\n self.assertEqual(1, 1)", "def test_is_grid_row_invalid():\n assert not sudoku.is_grid_valid(BAD_ROW_GRID)", "def test_odd_grid(self):\n grid_size = functions.set_grid(576, 720, constant.MAX_GRID, constant.MIN_GRID)\n self.assertNotEqual(grid_size % 2, 0)", "def __modify_model_grid(self, grid):\n\t\theight = len(grid)\n\t\twidth = len(grid[0])\n\t\t\n\t\tfor i in range(height):\n\t\t\tfor j in range(width):\n\t\t\t\tself.color_game.grid[i][j] = grid[i][j]\n\t\t\n\t\tself.assertRaises(DimensionException, ColorBlocksModel, 2, 2)", "def test_set_grid(self):\n\t\ttest_grid = self.load_grid_181()\n\t\tresult_grid = test_grid.get_grid()\n\n\t\tself.assertEquals(result_grid, range(1, 82))", "def test_extract_grid_size(self):\n parser = Parser()\n for k, v in self.GRID_SIZES.items():\n assert parser._extract_grid_size(k) == v", "def validate_grid(self) -> bool:\n if not self.grid:\n return False\n\n # grid type and length\n if type(self.grid) is not list or len(self.grid) != 9:\n return False\n\n for row in self.grid:\n # rows type and length\n if type(row) is not list or len(row) != 9:\n return False\n\n # numbers type and range\n for number in row:\n if type(number) is not int or number not in range(0, 10):\n return False\n\n # duplicates in row, column, box\n for i in range(1, 10):\n row = self.get_row(i)\n column = self.get_column(i)\n box = self.get_box(i)\n\n for number in range(1, 10):\n if row.count(number) > 1 or column.count(number) > 1 or \\\n box.count(number) > 1:\n return False\n\n return True", "def test_generic_grid():\n for dim in (1, 2, 3):\n periodic = random.choices([True, False], k=dim)\n shape = np.random.randint(2, 8, size=dim)\n a = np.random.random(dim)\n b = a + np.random.random(dim)\n \n cases = [grids.UnitGrid(shape, periodic=periodic),\n grids.CartesianGrid(np.c_[a, b], shape, periodic=periodic)]\n for grid in cases:\n assert grid.dim == dim\n dim_axes = len(grid.axes) + len(grid.axes_symmetric)\n assert dim_axes == dim\n vol = grid.cell_volume_data * np.prod(shape)\n assert grid.volume == pytest.approx(vol)\n \n # random points\n points = [[np.random.uniform(a[i], b[i]) for i in range(dim)]\n for _ in range(10)]\n c = grid.point_to_cell(points)\n p = grid.cell_to_point(c)\n np.testing.assert_array_equal(c, grid.point_to_cell(p))\n \n assert grid.contains_point(grid.get_random_point())\n w = 0.499 * (b - a).min()\n assert grid.contains_point(grid.get_random_point(w))", "def test_grdview_wrong_kind_of_grid(grid):\n dataset = grid.to_dataset() # convert xarray.DataArray to xarray.Dataset\n assert data_kind(dataset) == \"matrix\"\n\n fig = Figure()\n with pytest.raises(GMTInvalidInput):\n fig.grdview(grid=dataset)", "def test_fill_grid_type(self):\n field = Field(3, [monster_1, monster_2, monster_3])\n\n field.finalise_grid()\n pprint(field.grid)\n\n for i in field.grid:\n for j in i:\n if type(j) != SubField:\n assert False\n\n assert True", "def test_not_none_grid(self):\n img = cv2.imread(constant.TEST_FRAME_PATH)\n grid_size = 3\n img_map, row, column = functions.grid_map(img, grid_size)\n self.assertNotEqual(img_map, None)", "def test_board_print_as_grid(self):\n board = Board()\n board_grid_str = \"\"\"\n |0|1|2|\n |3|4|5|\n |6|7|8|\n \"\"\"\n self.assertEqual(board_grid_str, board.__str__())", "def assert_grid_info_f09_g17(self, grid_info):\n self.assertEqual(grid_info[\"ATM_NX\"], 288)\n self.assertEqual(grid_info[\"ATM_NY\"], 192)\n self.assertEqual(grid_info[\"ATM_GRID\"], \"0.9x1.25\")\n self.assertEqual(grid_info[\"ATM_DOMAIN_MESH\"], \"fv0.9x1.25_ESMFmesh.nc\")\n\n self.assertEqual(grid_info[\"LND_NX\"], 288)\n self.assertEqual(grid_info[\"LND_NY\"], 192)\n self.assertEqual(grid_info[\"LND_GRID\"], \"0.9x1.25\")\n self.assertEqual(grid_info[\"LND_DOMAIN_MESH\"], \"fv0.9x1.25_ESMFmesh.nc\")\n\n self.assertEqual(grid_info[\"OCN_NX\"], 320)\n self.assertEqual(grid_info[\"OCN_NY\"], 384)\n self.assertEqual(grid_info[\"OCN_GRID\"], \"gx1v7\")\n self.assertEqual(grid_info[\"OCN_DOMAIN_MESH\"], \"gx1v7_ESMFmesh.nc\")\n\n self.assertEqual(grid_info[\"ICE_NX\"], 320)\n self.assertEqual(grid_info[\"ICE_NY\"], 384)\n self.assertEqual(grid_info[\"ICE_GRID\"], \"gx1v7\")\n self.assertEqual(grid_info[\"ICE_DOMAIN_MESH\"], \"gx1v7_ESMFmesh.nc\")\n\n self.assertEqual(\n grid_info[\"ATM2OCN_FMAPNAME\"], \"map_fv0.9x1.25_TO_gx1v7_aave.nc\"\n )\n self.assertEqual(\n grid_info[\"OCN2ATM_FMAPNAME\"], \"map_gx1v7_TO_fv0.9x1.25_aave.nc\"\n )\n self.assertFalse(\"OCN2ATM_SHOULDBEABSENT\" in grid_info)", "def test_just_inside():\n rmg = RasterModelGrid(4, 5, dx=2.0)\n\n assert_equal(rfuncs.is_coord_on_grid(rmg, (0., 4.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (8. - 1e-12, 4.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 0.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 6. - 1e-12)), True)", "def check_sample_map_equals_sample_grid(self):\n return (\n self.grid.x_size == self.sample_map.x_size\n and self.grid.y_size == self.sample_map.x_size\n and self.grid.x_offset == 0\n and self.grid.y_offset == 0\n )", "def is_grid_valid(grid: list) -> bool:\n for row in grid:\n if not is_row_valid(row):\n log.debug(\"Row dimensions are invalid...%s\", row)\n return False\n\n transposed_grid = map(list, zip(*grid))\n for column in transposed_grid:\n if not is_column_valid(column):\n log.debug(\"Column dimensions are invalid...%s\", column)\n return False\n\n log.debug(\"Entire grid is valid!\")\n return True", "def test_pointbasedgrids_post(self):\n pass", "def test_grid_plotting():\n grids.UnitGrid([4]).plot()\n grids.UnitGrid([4, 4]).plot()\n \n with pytest.raises(NotImplementedError):\n grids.UnitGrid([4, 4, 4]).plot()\n\n grids.PolarGrid(4, 8).plot()\n grids.PolarGrid((2, 4), 8).plot()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure the interpolated grid spec is as expected
def test_interpolated_Grid_spec(self): IGrid_spec = self.Result.Grid_spec self.assertEqual(IGrid_spec.param_names, self.params) self.assertEqual(IGrid_spec.param_display_names, self.params) self.assertEqual(IGrid_spec.shape, tuple(self.interpd_shape)) self.assertEqual(IGrid_spec.n_gridpoints, np.product(self.interpd_shape))
[ "def test_interpolation_to_grid(grid):\n sf = ScalarField.random_uniform(grid)\n sf2 = sf.interpolate_to_grid(grid)\n np.testing.assert_allclose(sf.data, sf2.data, rtol=1e-6)", "def test_vector_grid_formatting(self):\n self.assertEqual(1, 1)", "def test_raw_Grid_spec(self):\n RGrid_spec = self.NB_Model_1.Raw_grids\n self.assertEqual(RGrid_spec.param_names, self.params)\n self.assertEqual(RGrid_spec.ndim, len(self.params))\n self.assertEqual(RGrid_spec.shape, self.n_gridpts_list)\n self.assertEqual(RGrid_spec.n_gridpoints, np.product(self.n_gridpts_list))\n for a1, a2 in zip(RGrid_spec.param_values_arrs, self.val_arrs.values()):\n self.assertTrue(np.allclose(np.asarray(a1), np.asarray(a2)))", "def test_generic_grid():\n for dim in (1, 2, 3):\n periodic = random.choices([True, False], k=dim)\n shape = np.random.randint(2, 8, size=dim)\n a = np.random.random(dim)\n b = a + np.random.random(dim)\n \n cases = [grids.UnitGrid(shape, periodic=periodic),\n grids.CartesianGrid(np.c_[a, b], shape, periodic=periodic)]\n for grid in cases:\n assert grid.dim == dim\n dim_axes = len(grid.axes) + len(grid.axes_symmetric)\n assert dim_axes == dim\n vol = grid.cell_volume_data * np.prod(shape)\n assert grid.volume == pytest.approx(vol)\n \n # random points\n points = [[np.random.uniform(a[i], b[i]) for i in range(dim)]\n for _ in range(10)]\n c = grid.point_to_cell(points)\n p = grid.cell_to_point(c)\n np.testing.assert_array_equal(c, grid.point_to_cell(p))\n \n assert grid.contains_point(grid.get_random_point())\n w = 0.499 * (b - a).min()\n assert grid.contains_point(grid.get_random_point(w))", "def test_grid_plotting():\n grids.UnitGrid([4]).plot()\n grids.UnitGrid([4, 4]).plot()\n \n with pytest.raises(NotImplementedError):\n grids.UnitGrid([4, 4, 4]).plot()\n\n grids.PolarGrid(4, 8).plot()\n grids.PolarGrid((2, 4), 8).plot()", "def test_grid_volume():\n for grid in iter_grids():\n vol_discr = np.broadcast_to(grid.cell_volume_data, grid.shape).sum()\n assert vol_discr == pytest.approx(grid.volume)", "def test_grid_plotting():\n grids.UnitGrid([4]).plot()\n grids.UnitGrid([4, 4]).plot()\n\n with pytest.raises(NotImplementedError):\n grids.UnitGrid([4, 4, 4]).plot()\n\n grids.PolarGrid(4, 8).plot()\n grids.PolarGrid((2, 4), 8).plot()", "def test_interpolation_singular():\n grid = UnitGrid([1])\n field = ScalarField(grid, data=3)\n\n # test constant boundary conditions\n x = np.linspace(0, 1, 7).reshape((7, 1))\n y = field.interpolate(x)\n np.testing.assert_allclose(y, 3)\n\n # # test boundary interpolation\n for upper in [True, False]:\n val = field.get_boundary_values(axis=0, upper=upper, bc=[{\"value\": 1}])\n assert val == pytest.approx(1)\n\n b_field = field.get_boundary_field((0, upper), bc=[{\"value\": 1}])\n assert b_field.data == pytest.approx(1)", "def test_odd_grid(self):\n grid_size = functions.set_grid(576, 720, constant.MAX_GRID, constant.MIN_GRID)\n self.assertNotEqual(grid_size % 2, 0)", "def test_correct_resample_interpolate(self):\n (res, freq) = dt_freq(self.hom.ref_regress.df_model.index)\n assert (res, freq) == (1., 'M')\n\n assert self.can_adjusted.index.size == self.values_to_adjust.index.size\n\n corrections = self.hom.adjust_obj.adjustments # interpolated M to D\n assert all(self.can_adjusted == (self.values_to_adjust - corrections)) # todo: this should be +\n\n plot_corrections = self.hom.plot_adjustments()\n\n model = self.hom.get_model_params() # the 1 model that counts\n\n assert model['poly_order'] == 2\n np.testing.assert_almost_equal(model['coef_0'], 1.1555392089)\n np.testing.assert_almost_equal(model['coef_1'], -0.010662915)\n np.testing.assert_almost_equal(model['inter'], 0.22256330678)\n np.testing.assert_almost_equal(model['r2'], 0.68610786923)\n np.testing.assert_almost_equal(model['n_input'], 51)\n np.testing.assert_almost_equal(model['filter_p'], 5.)", "def test_just_inside():\n rmg = RasterModelGrid(4, 5, dx=2.0)\n\n assert_equal(rfuncs.is_coord_on_grid(rmg, (0., 4.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (8. - 1e-12, 4.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 0.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 6. - 1e-12)), True)", "def test_uniform_ones(mock_visibility_data, tmp_path):\n\n coords = coordinates.GridCoords(cell_size=0.005, npix=800)\n\n uu, vv, weight, data_re, data_im = mock_visibility_data\n weight = 0.1 * np.ones_like(uu)\n data_re = np.ones_like(uu)\n data_im = np.zeros_like(uu)\n\n gridder = gridding.Gridder(\n coords=coords,\n uu=uu,\n vv=vv,\n weight=weight,\n data_re=data_re,\n data_im=data_im,\n )\n\n # with uniform weighting, the gridded sheet should be uniform and = 1\n gridder._grid_visibilities(weighting=\"uniform\")\n\n print(\n \"re\",\n np.mean(gridder.data_re_gridded),\n np.std(gridder.data_re_gridded),\n np.min(gridder.data_re_gridded),\n np.max(gridder.data_re_gridded),\n )\n\n im = plt.imshow(\n gridder.ground_cube[4].real, origin=\"lower\", extent=gridder.coords.vis_ext\n )\n plt.colorbar(im)\n plt.savefig(tmp_path / \"gridded_re.png\", dpi=300)\n\n plt.figure()\n\n im2 = plt.imshow(\n gridder.ground_cube[4].imag, origin=\"lower\", extent=gridder.coords.vis_ext\n )\n plt.colorbar(im2)\n plt.savefig(tmp_path / \"gridded_im.png\", dpi=300)\n\n plt.close(\"all\")\n\n # if the gridding worked, we should have real values approximately 1\n assert np.max(gridder.data_re_gridded) == pytest.approx(1)\n # except in the cells with no data\n assert np.min(gridder.data_re_gridded) == pytest.approx(0)\n\n # make sure all average values are set to 1\n diff_real = np.abs(1 - gridder.vis_gridded[gridder.mask].real)\n assert np.all(diff_real < 1e-10)\n\n # and imaginary values approximately 0 everywhere\n assert np.min(gridder.data_im_gridded) == pytest.approx(0)\n assert np.max(gridder.data_im_gridded) == pytest.approx(0)", "def test_cell_to_point_conversion():\n for grid in iter_grids():\n c = grid.point_to_cell(grid.get_random_point())\n c2 = grid.point_to_cell(grid.cell_to_point(c))\n np.testing.assert_almost_equal(c, c2)\n\n p_emtpy = np.zeros((0, grid.num_axes))\n assert grid.point_to_cell(p_emtpy).size == 0\n assert grid.cell_to_point(p_emtpy).size == 0", "def test_set_grid(self):\n\t\ttest_grid = self.load_grid_181()\n\t\tresult_grid = test_grid.get_grid()\n\n\t\tself.assertEquals(result_grid, range(1, 82))", "def test_cell_to_point_conversion():\n for grid in iter_grids():\n c = grid.point_to_cell(grid.get_random_point())\n c2 = grid.point_to_cell(grid.cell_to_point(c))\n np.testing.assert_almost_equal(c, c2)\n \n p_emtpy = np.zeros((0, grid.num_axes))\n assert grid.point_to_cell(p_emtpy).size == 0\n assert grid.cell_to_point(p_emtpy).size == 0", "def test_grid_layer_update_grid_source_geotiff(self):\n pass", "def test_just_outside():\n rmg = RasterModelGrid(4, 5, dx=2.0)\n\n assert_equal(rfuncs.is_coord_on_grid(rmg, (0. - 1e-12, 4.)), False)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (8., 4.)), False)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 0. - 1e-12)), False)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 6.)), False)", "def test_interpolation_mutable():\n grid = UnitGrid([2], periodic=True)\n field = ScalarField(grid)\n\n field.data = 1\n np.testing.assert_allclose(field.interpolate([0.5]), 1)\n field.data = 2\n np.testing.assert_allclose(field.interpolate([0.5]), 2)\n\n # test overwriting field values\n data = np.full_like(field._data_full, 3)\n intp = field.make_interpolator()\n np.testing.assert_allclose(intp(np.array([0.5]), data), 3)\n\n # test overwriting field values\n data = np.full_like(field.data, 4)\n intp = field.make_interpolator()\n np.testing.assert_allclose(intp(np.array([0.5]), data), 4)", "def test_pointbasedgrids_post(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the list of best model keys is what is documented
def test_best_model_dict_keys(self): expected_keys = sorted(["table", "chi2", "extinction_Av_mag", "grid_location"]) key_list = sorted(list(self.Result.Posterior.best_model.keys())) self.assertEqual(key_list, expected_keys)
[ "def _check_model_params(self):", "def test_all_field_opts_model(self, all_field_opts):\n for field in all_field_opts:\n api_keys = field.keys()\n # Tests if API and model have same number of keys\n assert len(self.model_keys) == len(api_keys)\n # Tests if the API and model keys and value types are equal\n for key in api_keys:\n assert key in self.model_keys\n assert type(field[key]) in field_opt_model[key]", "def _checkModelParameters(self):\r\n\t param_counts = self._countModelParameters()\r\n\t expected_param_counts = {\r\n\t # vocab_size * embedding_size\r\n\t \"seq_embedding\": 24100200,\r\n\t # (embedding_size + num_lstm_units + 1) * 4 * num_lstm_units\r\n\t \"lstm\": 467968,\r\n\t # (num_lstm_units + 1) * logits_size\r\n\t \"logits\": 32896,\r\n\t \"global_step\": 1,\r\n\t }\r\n\t self.assertDictEqual(expected_param_counts, param_counts)", "def check_partial_keywords_for_new_model(self, **input_dict):\n model = input_dict[self.get_model_descriptor_name()]\n actual_dict = self.get_model_dict(model)\n for key in input_dict:\n if key not in actual_dict:\n raise ie.InputArgumentsError(\n 'Input Arguments Error',\n input_dict,\n actual_dict)\n return True", "def check_model_constraints(self):\n # TODO: adapterize, utilitize or do anythin' or leave me as is.", "def test_best_model_table_fields(self):\n correct_fields = [\"In_lhood?\", \"Obs\", \"Model\", \"Resid_Stds\", \"Obs_S/N\"]\n t_fields = self.DF_best.columns.tolist()\n self.assertTrue(t_fields == correct_fields, t_fields)", "def validate(self):\n return isinlist(str(self.result_keys), [str(ele) for ele in self._choices])", "def verify_model(loaded_data, expected, model_type):\n for key in expected:\n if key == 'password' or key == 'wishlist':\n continue\n assert loaded_data[key] == expected[key]\n assert loaded_data['type'] == model_type\n if 'wishlist' in loaded_data:\n for i in range(len(expected['wishlist'])):\n verify_model(loaded_data['wishlist'][i], expected['wishlist'][i], 'book')", "def test_all_users(self, all_users):\n assert len(all_users) > 0\n for user in all_users:\n api_keys = user.keys()\n assert len(api_keys) == len(self.model_keys)\n for key in api_keys:\n assert key in self.model_keys\n assert type(user[key]) in user_model[key]", "def _validate_params(self):\n assert all(key in self.params for key in self.required_params), set(self.required_params) - set(\n self.params.keys()\n )", "def matrix_check_req(self, matrix_reqlist):\n for req_shift in matrix_reqlist:\n if not self.option_matrix[req_shift]:\n raise KeyError('The required option %s missing from opt-matrix' %(req_shift))", "def validate_request_keys_ordered(posted_data: dict, endpoint_name: str) -> bool:\n return list(posted_data) == C.API_REQUEST_KEYS[endpoint_name]", "def return_possible_fitting_models():\n model_dictionary_keys = fitting_models.keys()\n for i,model_name in enumerate(model_dictionary_keys):\n print(\"%i: '%s'\" % (i+1, model_name))", "def _validate_json( self ):\n for key in self._key_list:\n if not key in self._json_content.keys():\n raise AccessManagementException(self._key_error_message)", "def test__list_fields(self):\n correct_fields = [\n \"classifier\",\n \"features\",\n \"num_features\",\n \"method\",\n \"num_examples\",\n \"target\",\n ]\n\n self.assertItemsEqual(self.model._list_fields(), correct_fields)", "def test_missing_keys(self):\n self.assertEqual(None, tsig_keys.check({}))", "def test_user_list_keys(self):\n pass", "def test_minimum_metadata(self):\n registry = self.manager.registry\n\n for key, collection_pydantic in sorted(self.manager.collections.items()):\n self.assertIsInstance(collection_pydantic, Collection)\n collection = collection_pydantic.dict()\n with self.subTest(key=key):\n self.assertRegex(key, \"^\\\\d{7}$\")\n self.assertIn(\"name\", collection)\n self.assertIn(\"authors\", collection)\n self.assertIsInstance(collection[\"authors\"], list, msg=f\"Collection: {collection}\")\n for author in collection[\"authors\"]:\n self.assertIn(\"name\", author)\n self.assertIn(\"orcid\", author)\n self.assertRegex(author[\"orcid\"], self.manager.get_pattern(\"orcid\"))\n self.assertIn(\"description\", collection)\n incorrect = {prefix for prefix in collection[\"resources\"] if prefix not in registry}\n self.assertEqual(set(), incorrect, msg=\"Invalid prefixes\")\n duplicates = {\n prefix\n for prefix, count in Counter(collection[\"resources\"]).items()\n if 1 < count\n }\n self.assertEqual(set(), duplicates, msg=\"Duplicates found\")\n self.assertEqual(\n sorted(collection_pydantic.resources), collection_pydantic.resources\n )", "def _is_valid_key(key: CachePlayerKey) -> bool:\n if not isinstance(key, tuple) or len(key) != 2:\n return False\n\n if not (isinstance(key[0], Player) and isinstance(key[1], Player)):\n return False\n\n if Classifiers[\"stochastic\"](key[0]) or Classifiers[\"stochastic\"](key[1]):\n return False\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that the "checking columns" in the estimate table are all showing that the estimates are good.
def test_estimate_bounds_checks(self): DF = self.Result.Posterior.DF_estimates # Parameter estimate table for p in ["12 + log O/H", "log P/k", "log U"]: for col in ["Est_in_CI68?", "Est_in_CI95?"]: self.assertTrue(DF.loc[p,col] == "Y") for col in ["Est_at_lower?", "Est_at_upper?", "P(lower)>50%?", "P(upper)>50%?"]: self.assertTrue(DF.loc[p,col] == "N") self.assertTrue(DF.loc[p,"n_local_maxima"] == 1)
[ "def data_checks():\n for func in [read_adult, read_bank, read_compas, read_german, read_sqf,\n read_synthetic]:\n xtr, xte, ytr, yte, ztr, zte = func()\n\n if np.any(xtr[:, 0] != 1.) or np.any(xte[:, 0] != 1.):\n print(\"WARNING: intercept issue in {}\".format(func.__name__))\n if np.any((ytr != 1) & (ytr != 0)) or np.any((yte != 1) & (yte != 0)):\n print(\"WARNING: label issue in {}\".format(func.__name__))\n if np.any(np.std(xtr[:, 1:], 0) == 0) or np.any(np.std(xte[:, 1:], 0) == 0):\n print(\"WARNING: constant column in X {}\".format(func.__name__))\n if np.any(np.std(ztr, 0) == 0) or np.any(np.std(zte, 0) == 0):\n print(\"WARNING: constant column in Z {}\".format(func.__name__))\n if np.std(ytr) == 0 or np.std(yte) == 0:\n print(\"WARNING: constant column in y {}\".format(func.__name__))\n\n print(\"Done running checks.\")", "def test_best_model_table_fields(self):\n correct_fields = [\"In_lhood?\", \"Obs\", \"Model\", \"Resid_Stds\", \"Obs_S/N\"]\n t_fields = self.DF_best.columns.tolist()\n self.assertTrue(t_fields == correct_fields, t_fields)", "def test_column_values(self):\n for column in self.table.columns:\n assert len(column.values) == 0", "def errorCheckSubmission( self, answer):\n \n for colName in [\"Code\", \"Convention\", \"GroupOrder\"]:\n assert colName in answer.columns, \"We need a %s column in the master spreadsheet\" % colName", "def checkMatchStatistic(self):\n numOfNan = self.matches[self.matches['w_ace'].isnull() | self.matches['w_df'].isnull() |\n self.matches['w_svpt'].isnull() | self.matches['w_1stIn'].isnull() |\n self.matches['w_1stWon'].isnull() | self.matches['w_2ndWon'].isnull() |\n self.matches['w_SvGms'].isnull() | self.matches['w_bpSaved'].isnull() |\n self.matches['w_bpFaced'].isnull()].shape[0]\n\n numOfNan += self.matches[self.matches['l_ace'].isnull() | self.matches['l_df'].isnull() |\n self.matches['l_svpt'].isnull() | self.matches['l_1stIn'].isnull() |\n self.matches['l_1stWon'].isnull() | self.matches['l_2ndWon'].isnull() |\n self.matches['l_SvGms'].isnull() | self.matches['l_bpSaved'].isnull() |\n self.matches['l_bpFaced'].isnull()].shape[0]\n\n print(\"Sanity checking match statistic: \" + str(numOfNan))\n\n self.matches.dropna(\n subset=['w_ace', 'w_df', 'w_svpt', 'w_1stIn', 'w_1stWon', 'w_2ndWon', 'w_SvGms', 'w_bpSaved', 'w_bpFaced'],\n inplace=True)\n\n self.matches.dropna(\n subset=['l_ace', 'l_df', 'l_svpt', 'l_1stIn', 'l_1stWon', 'l_2ndWon', 'l_SvGms', 'l_bpSaved', 'l_bpFaced'],\n inplace=True)", "def _validate(self):\n warnings = []\n # check that no columns have null values\n null_cols = self.view[self._activeCols].isnull().any()\n for i in null_cols.iteritems():\n col, hasna = i\n if hasna:\n warnings.append(\"{} has null values.\".format(col))\n # cross check values with allowed values in self.schema\n if self.schema is not None:\n malformed_values = schemaModule.validateView(self.view, self.schema)\n if malformed_values:\n for k in malformed_values:\n warnings.append(\"{} contains the following values which are \"\n \"not specified in the schema: {}\".format(\n k, \", \".join(map(str, malformed_values[k]))) +\n \"\\n\\tPossible values are {}\".format(\n \", \".join(self.schema.loc[k].value.values)))\n return warnings", "def consistency_check(self):\n for _row in self.lattice:\n assert len(_row) == self.col_dim\n assert callable(self.neighbor_function)\n assert callable(self.weight_function)", "def validate(ddtable):\n margin_upp = ddtable.sum(axis=1).transpose()\n count_upp = count_vec(margin_upp)\n remainder_upp = np.remainder(margin_upp, count_upp)\n\n margin_low = ddtable.sum(axis=0)\n count_low = count_vec(margin_low)\n remainder_low = np.remainder(margin_low, count_low)\n\n if not ((remainder_low == 0).all() and (remainder_upp == 0).all()):\n return False\n\n # e_ij <= d^u_i * d^l_j\n div_upp = np.divide(margin_upp, count_upp)\n div_low = np.divide(margin_low, count_low)\n for i in xrange(0,div_upp.size):\n for j in xrange(0,div_low.size):\n if ddtable[i,j] > div_upp.A1[i] * div_low.A1[j]: # is this the right way to access this?\n print (i, j, ddtable[i,j], div_upp.A1[i] * div_low.A1[j])\n return False\n return True", "def _check_target_columns(self):\n if not self.target_columns:\n self.target_columns = self._infer_target_columns()\n else:\n for target in self.target_columns:\n info = 'target \"%s\" not found in data frame'\n assert target in self.columns, info % target", "def check_data_consistency(self):\r\n current_data = self.all_data\r\n required_columns = [self._year, self._month, self._day_of_week]\r\n if current_data.columns.any() not in required_columns:\r\n self.reload_data()", "def checkColumns(self, row, columns, log):\n rescols = set(row.keys())\n cols = set(columns.values())\n if not rescols >= cols:\n log.error(\n \"result missing columns: '%s'\",\n \",\".join(cols.difference(rescols)),\n )\n return False\n return True", "def test_column_values(self):\n for column in self.table.columns:\n assert len(column.values) == 3", "def check_for_invalid_columns(problems: list, table: str, df: DataFrame) -> list:\n r = cs.GTFS_REF\n valid_columns = r.loc[r[\"table\"] == table, \"column\"].values\n for col in df.columns:\n if col not in valid_columns:\n problems.append([\"warning\", f\"Unrecognized column {col}\", table, []])\n\n return problems", "def check_for_required_columns(problems: list, table: str, df: DataFrame) -> list:\n r = cs.GTFS_REF\n req_columns = r.loc[(r[\"table\"] == table) & r[\"column_required\"], \"column\"].values\n for col in req_columns:\n if col not in df.columns:\n problems.append([\"error\", f\"Missing column {col}\", table, []])\n\n return problems", "def wants_well_tables(self):\n if self.db_type == DB_SQLITE:\n return False\n else:\n return (\n self.wants_agg_mean_well\n or self.wants_agg_median_well\n or self.wants_agg_std_dev_well\n )", "def show_diff_table(self):\n return not self.outputs_match() and not \\\n (self.given_empty and not self.correct_empty)", "def _check_all_columns(self, mag_columns, filter_names):\n \n if (filter_names != None):\n \n # checksum on number of filters vs number of columns\n if ( len(filter_names) != len(mag_columns) ):\n msg = \"ERROR! column number (\" + str(len(mag_columns)) + \") and \"\n msg +=\"filter number (\" + str(len(filter_names)) + \") don't match\"\n raise ValueError(msg)\n \n # check all the columns to return (also visually that they match the filter names!)\n for column, filt in zip(mag_columns, filter_names):\n print \"col =\", column ,\"filter =\", filt ,\"match??\"\n self._check_column_valid(column)\n else:\n \n for column in mag_columns:\n self._check_column_valid(column)", "def test_compare_table_sanity_check(model, logger):\n description = description_for(model)\n assert compare_tables(model, description)\n assert not logger.caplog.record_tuples", "def _get_mismatches(self) -> None:\r\n # Add column to list if it contains a mismatch.\r\n mis = [col for col in self._df_s.columns if not self._df_t[col].equals(self._df_s[col])]\r\n if mis:\r\n self._msg.column_mismatches(columns=self._col_mismatches)\r\n else:\r\n self._msg.column_mismatches_none()\r\n self._col_mismatches = mis" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression check that chi2 doesn't change
def test_chi2(self): chi2 = self.Result.Posterior.best_model["chi2"] self.assertTrue(np.isclose(chi2, 2568.7, atol=0.2), msg=str(chi2))
[ "def test_chi2(self):\n chi2 = self.Result.Posterior.best_model[\"chi2\"]\n self.assertTrue(np.isclose(chi2, 2522.7, atol=0.2), msg=str(chi2))", "def set_chi2(self):\n\n y_observed = self._y\n y_expected = []\n #i = 0\n for x in self._x:\n #y_expected.append(self.value(x) - y_observed[i])\n y_expected.append(self.value(x))\n #i += 1\n if y_observed and y_observed != [] and y_expected and y_expected != []:\n self._chi2 = chi2(y_observed, y_expected)\n else:\n self._chi2 = None", "def testConvergence(self):\n synthetic_test = Synthetic()\n # Silence output of fit\n save_stdout = sys.stdout\n sys.stdout = open( os.devnull, 'w' )\n synthetic_test.fit(0.001, n_iters = 10**3)\n sys.stdout.close()\n sys.stdout = save_stdout\n # Discard burn in\n loss_storage = np.array( synthetic_test.lr.training_loss[1:] )\n loss_var = np.var( loss_storage )\n self.assertTrue( loss_var < 1 / float( synthetic_test.lr.N ) )", "def test_chi2lnlike_withcov():\n ### all all covariances\n data = np.array([[5,-4], [3,-2], [1,0] ])\n model = np.zeros(data.shape)\n jitter = np.zeros(data.shape)\n errs = np.array([[2,2], [2,2], [2,2]])\n covs = np.array([1, 0.25, 0.25])\n corrs = covs/errs[:,0]/errs[:,1]\n\n chi2s = lnlike.chi2_lnlike(data, errs, corrs, model, jitter, [])\n\n residuals = data - model\n for res, err, cov, chi2 in zip(residuals, errs, covs, chi2s):\n cov_matrix = np.array([[err[0]**2, cov], [cov, err[1]**2]])\n cov_inv = np.linalg.inv(cov_matrix)\n cov_inv_dot_diff = np.dot(cov_inv, res)\n logdet = np.linalg.slogdet(cov_matrix)[1]\n res_cov_res = res.dot(cov_inv_dot_diff)\n numpy_chi2 = -0.5 * (res_cov_res + logdet + 2 * np.log(2 * np.pi)) \n\n assert np.sum(chi2) == pytest.approx(numpy_chi2)\n\n ### only one covariance term\n covs = np.array([1, np.nan, np.nan])\n corrs = covs/errs[:,0]/errs[:,1]\n new_chi2s = lnlike.chi2_lnlike(data, errs, corrs, model, jitter, [])\n\n assert chi2s[0] == pytest.approx(new_chi2s[0])", "def chi2(self):\n\n x2 = None\n\n if self._fit:\n x2 = self._fit.chi2()\n else:\n raise NotDefined('Fit has not been defined')\n\n return x2", "def calc_chi2(models_test):\r\n cross_true = np.load('data/X-exp.npy') # true xsec (experimental)\r\n dx_true = np.load('data/dX-exp.npy') # error in true xsec\r\n\r\n for model in models_test:\r\n my_cross = np.load('data/X-pre-%s.npy'%model) #predicted xsec\r\n res = (cross_true - np.mean(my_cross,axis=0))\r\n res = (res/dx_true)\r\n chi2 = np.sum(res**2)\r\n chi2 = chi2/my_cross.shape[1]\r\n print('model:',model,'chi2:',chi2) #prints chi2 value with model name\r", "def chi2(self):\n\n return self._chi2", "def test_chi_squared(logging_mixin: Any) -> None:\n # Setup\n h = histogram.Histogram1D(\n bin_edges=np.array(np.arange(-0.5, 5.5)), y=np.array(np.ones(5)), errors_squared=np.ones(5),\n )\n chi_squared = cost_function.ChiSquared(f=func_1, data=h)\n\n # Check that it's set up properly\n assert chi_squared.func_code.co_varnames == [\"a\", \"b\"]\n\n # Calculate the chi_squared for the given parameters.\n result = chi_squared(np.array(range(-1, -6, -1)), np.zeros(5))\n # Each term is (1 - -1)^2 / 1^2 = 4\n assert result == 4 * 5", "def test_robust(self):\n methods = ['huber','least-absolute-residual']\n opts = ['osqp','scipy'] \n f = lambda x: (-0.3*x**4 -3*x**3 +0.6*x**2 +2.4*x - 0.5)\n\n N = 50 # number of training points (note, some will be removed below)\n n = 4 # degree of polynomial\n state = 15 # random seed\n \n # Add some noise\n noise_var = 0.1\n x = np.sort(np.random.RandomState(state).uniform(-1,1,N))\n y = f(x) + np.random.RandomState(state).normal(0,noise_var,size=N).T\n \n # delete training points between 0 < x < 0.3\n pos = ((x>0)*(x<0.3)).nonzero()[0]\n x = np.delete(x,pos)\n y = np.delete(y,pos)\n\n # Add some outliers\n randrange = range(10,17)\n y[randrange] = y[randrange]+np.random.RandomState(1).normal(0,4**2,len(randrange))\n \n # Test data\n x = x.reshape(-1,1)\n xtest = np.linspace(-1,1,100).reshape(-1,1)\n ytest = f(xtest)\n\n # param and basis\n param = Parameter(distribution='uniform', lower=-1, upper=1, order=n)\n basis = Basis('univariate')\n\n # Test Poly regressions\n for method in methods:\n for opt in opts:\n if method != 'huber' and opt != 'scipy': # TODO - remove this if statement once scipy huber regression implemented\n poly = Poly(parameters=param, basis=basis, method=method,\n sampling_args= {'mesh': 'user-defined', 'sample-points':x.reshape(-1,1), 'sample-outputs': y.reshape(-1,1)},\n solver_args={'M':0.2**2,'verbose':False,'optimiser':opt})\n poly.set_model()\n _,r2 = poly.get_polyscore(X_test=xtest,y_test=ytest)\n self.assertTrue(r2 > 0.997,msg='Poly method = %a, optimiser = %a' %(method,opt))", "def chi_squared_feature_selection(X,y,k=80):\n\n # need to min max scale data first because Chi2 doesn't take negative values\n min_max_scaler = MinMaxScaler()\n X_scaled = min_max_scaler.fit_transform(X)\n transformer = SelectKBest(chi2,k=k)\n X_transformed = transformer.fit_transform(X_scaled,y)\n return (X_transformed,transformer)", "def test_l2_estimator():\n estimator = L2Estimator(10, 10)\n (res, answer, error) = check_error(estimator, TEST_FILE, metric=\"l2\")\n print(\"multiplicative error:\", error)", "def test_2x2_analytical_solution():\n residuals = np.array([[5,-4], [3,-2], [1,0] ])\n\n errs = np.array([[2,2], [2,2], [2,2]])\n covs = np.array([1, 0.25, 0.25])\n corrs = covs/errs[:,0]/errs[:,1]\n\n chi2s = lnlike._chi2_2x2cov(np.array([residuals]), np.array([errs**2]), corrs)\n\n # compare to numpy solution\n for res, err, cov, chi2 in zip(residuals, errs, covs, chi2s[0]):\n cov_matrix = np.array([[err[0]**2, cov], [cov, err[1]**2]])\n cov_inv = np.linalg.inv(cov_matrix)\n cov_inv_dot_diff = np.dot(cov_inv, res)\n logdet = np.linalg.slogdet(cov_matrix)[1]\n res_cov_res = res.dot(cov_inv_dot_diff)\n numpy_chi2 = -0.5 * (res_cov_res + logdet + 2 * np.log(2 * np.pi)) \n\n assert np.sum(chi2) == pytest.approx(numpy_chi2)", "def test_chi_squared():\n assert frequency.chi_squared({'a': 2, 'b': 3}, {'a': 2, 'b': 3}) == 0", "def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))", "def test_LinearRegression_err():\n np.random.seed(0)\n X = np.random.random((10, 1))\n y = np.random.random(10) + 1\n dy = 0.1\n\n y = np.random.normal(y, dy)\n\n clf1 = LinearRegression().fit(X, y, dy)\n clf2 = skLinearRegression().fit(X / dy, y / dy)\n\n assert_allclose(clf1.coef_[1:], clf2.coef_)\n assert_allclose(clf1.coef_[0], clf2.intercept_ * dy)", "def chi_square(self):\n for docid in self.ent_train:\n content = self.ent_train[docid][0] + self.ent_train[docid][1]\n content = self.getTerms(content)\n content = list(set(content))\n for term in content:\n if not term in self.score['ent']:\n n11 = float(self.df[term][0])\n n10 = float(self.df[term][1] + self.df[term][2])\n n01 = float(self.num_ent_train - n11)\n n00 = float(self.num_bus_train + self.num_pol_train)\n a = n11 + n10 + n01 + n00\n b = math.pow(((n11 * n00) - (n10 * n01)), 2)\n c = (n11 + n01) * (n11 + n10) * (n10 + n00) * (n01 + n00)\n chi = (a * b) / c\n self.score['ent'][term] = chi\n for docid in self.bus_train:\n content = self.bus_train[docid][0] + self.bus_train[docid][1]\n content = self.getTerms(content)\n content = list(set(content))\n for term in content:\n if not term in self.score['bus']:\n n11 = float(self.df[term][1])\n n10 = float(self.df[term][0] + self.df[term][2])\n n01 = float(self.num_bus_train - n11)\n n00 = float(self.num_ent_train + self.num_pol_train)\n a = n11 + n10 + n01 + n00\n b = math.pow(((n11 * n00) - (n10 * n01)), 2)\n c = (n11 + n01) * (n11 + n10) * (n10 + n00) * (n01 + n00)\n chi = (a * b) / c\n self.score['bus'][term] = chi \n for docid in self.pol_train:\n content = self.pol_train[docid][0] + self.pol_train[docid][1]\n content = self.getTerms(content)\n content = list(set(content))\n for term in content:\n if not term in self.score['pol']:\n n11 = float(self.df[term][2])\n n10 = float(self.df[term][1] + self.df[term][0])\n n01 = float(self.num_pol_train - n11)\n n00 = float(self.num_ent_train + self.num_pol_train)\n a = n11 + n10 + n01 + n00\n b = math.pow(((n11 * n00) - (n10 * n01)), 2)\n c = (n11 + n01) * (n11 + n10) * (n10 + n00) * (n01 + n00)\n chi = (a * b) / c\n self.score['pol'][term] = chi", "def test_single_linear_regression_r_squared(reg_model):\n assert(pytest.approx(reg_model.r_squared(), 0.01) == 0.52)", "def test_single_linear_regression_fit(reg_model):\n assert(pytest.approx(reg_model.b1, 0.01) == 1.14)\n assert(pytest.approx(reg_model.b0, 0.01) == 0.43)", "def test_chi2lnlike():\n # test with a single model\n model = np.zeros((3, 2))\n jitter = np.zeros((3, 2))\n data = np.ones((3, 2))\n errors = np.ones((3, 2))\n\n seppa_indices = [np.array([1])]\n\n chi2 = lnlike.chi2_lnlike(data, errors, None, model, jitter, seppa_indices)\n assert chi2.shape == (3, 2)\n assert chi2 == pytest.approx(\n -0.5 * np.ones((3, 2)) - np.log(np.sqrt(2*np.pi*np.ones((3, 2))))\n )\n\n # test with multiple models\n model = np.zeros((3, 2, 5))\n jitter = np.zeros((3, 2, 5))\n data = np.ones((3, 2))\n errors = np.ones((3, 2))\n\n seppa_indices = [np.array([1])]\n\n chi2 = lnlike.chi2_lnlike(data, errors, None, model, jitter, seppa_indices)\n assert chi2.shape == (3, 2, 5)\n assert chi2 == pytest.approx(\n -0.5 * np.ones((3, 2, 5)) - np.log(np.sqrt(2*np.pi*np.ones((3, 2, 5))))\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression check that chi2 doesn't change
def test_chi2(self): chi2 = self.Result.Posterior.best_model["chi2"] self.assertTrue(np.isclose(chi2, 2522.7, atol=0.2), msg=str(chi2))
[ "def test_chi2(self):\n chi2 = self.Result.Posterior.best_model[\"chi2\"]\n self.assertTrue(np.isclose(chi2, 2568.7, atol=0.2), msg=str(chi2))", "def set_chi2(self):\n\n y_observed = self._y\n y_expected = []\n #i = 0\n for x in self._x:\n #y_expected.append(self.value(x) - y_observed[i])\n y_expected.append(self.value(x))\n #i += 1\n if y_observed and y_observed != [] and y_expected and y_expected != []:\n self._chi2 = chi2(y_observed, y_expected)\n else:\n self._chi2 = None", "def testConvergence(self):\n synthetic_test = Synthetic()\n # Silence output of fit\n save_stdout = sys.stdout\n sys.stdout = open( os.devnull, 'w' )\n synthetic_test.fit(0.001, n_iters = 10**3)\n sys.stdout.close()\n sys.stdout = save_stdout\n # Discard burn in\n loss_storage = np.array( synthetic_test.lr.training_loss[1:] )\n loss_var = np.var( loss_storage )\n self.assertTrue( loss_var < 1 / float( synthetic_test.lr.N ) )", "def test_chi2lnlike_withcov():\n ### all all covariances\n data = np.array([[5,-4], [3,-2], [1,0] ])\n model = np.zeros(data.shape)\n jitter = np.zeros(data.shape)\n errs = np.array([[2,2], [2,2], [2,2]])\n covs = np.array([1, 0.25, 0.25])\n corrs = covs/errs[:,0]/errs[:,1]\n\n chi2s = lnlike.chi2_lnlike(data, errs, corrs, model, jitter, [])\n\n residuals = data - model\n for res, err, cov, chi2 in zip(residuals, errs, covs, chi2s):\n cov_matrix = np.array([[err[0]**2, cov], [cov, err[1]**2]])\n cov_inv = np.linalg.inv(cov_matrix)\n cov_inv_dot_diff = np.dot(cov_inv, res)\n logdet = np.linalg.slogdet(cov_matrix)[1]\n res_cov_res = res.dot(cov_inv_dot_diff)\n numpy_chi2 = -0.5 * (res_cov_res + logdet + 2 * np.log(2 * np.pi)) \n\n assert np.sum(chi2) == pytest.approx(numpy_chi2)\n\n ### only one covariance term\n covs = np.array([1, np.nan, np.nan])\n corrs = covs/errs[:,0]/errs[:,1]\n new_chi2s = lnlike.chi2_lnlike(data, errs, corrs, model, jitter, [])\n\n assert chi2s[0] == pytest.approx(new_chi2s[0])", "def chi2(self):\n\n x2 = None\n\n if self._fit:\n x2 = self._fit.chi2()\n else:\n raise NotDefined('Fit has not been defined')\n\n return x2", "def calc_chi2(models_test):\r\n cross_true = np.load('data/X-exp.npy') # true xsec (experimental)\r\n dx_true = np.load('data/dX-exp.npy') # error in true xsec\r\n\r\n for model in models_test:\r\n my_cross = np.load('data/X-pre-%s.npy'%model) #predicted xsec\r\n res = (cross_true - np.mean(my_cross,axis=0))\r\n res = (res/dx_true)\r\n chi2 = np.sum(res**2)\r\n chi2 = chi2/my_cross.shape[1]\r\n print('model:',model,'chi2:',chi2) #prints chi2 value with model name\r", "def chi2(self):\n\n return self._chi2", "def test_chi_squared(logging_mixin: Any) -> None:\n # Setup\n h = histogram.Histogram1D(\n bin_edges=np.array(np.arange(-0.5, 5.5)), y=np.array(np.ones(5)), errors_squared=np.ones(5),\n )\n chi_squared = cost_function.ChiSquared(f=func_1, data=h)\n\n # Check that it's set up properly\n assert chi_squared.func_code.co_varnames == [\"a\", \"b\"]\n\n # Calculate the chi_squared for the given parameters.\n result = chi_squared(np.array(range(-1, -6, -1)), np.zeros(5))\n # Each term is (1 - -1)^2 / 1^2 = 4\n assert result == 4 * 5", "def test_robust(self):\n methods = ['huber','least-absolute-residual']\n opts = ['osqp','scipy'] \n f = lambda x: (-0.3*x**4 -3*x**3 +0.6*x**2 +2.4*x - 0.5)\n\n N = 50 # number of training points (note, some will be removed below)\n n = 4 # degree of polynomial\n state = 15 # random seed\n \n # Add some noise\n noise_var = 0.1\n x = np.sort(np.random.RandomState(state).uniform(-1,1,N))\n y = f(x) + np.random.RandomState(state).normal(0,noise_var,size=N).T\n \n # delete training points between 0 < x < 0.3\n pos = ((x>0)*(x<0.3)).nonzero()[0]\n x = np.delete(x,pos)\n y = np.delete(y,pos)\n\n # Add some outliers\n randrange = range(10,17)\n y[randrange] = y[randrange]+np.random.RandomState(1).normal(0,4**2,len(randrange))\n \n # Test data\n x = x.reshape(-1,1)\n xtest = np.linspace(-1,1,100).reshape(-1,1)\n ytest = f(xtest)\n\n # param and basis\n param = Parameter(distribution='uniform', lower=-1, upper=1, order=n)\n basis = Basis('univariate')\n\n # Test Poly regressions\n for method in methods:\n for opt in opts:\n if method != 'huber' and opt != 'scipy': # TODO - remove this if statement once scipy huber regression implemented\n poly = Poly(parameters=param, basis=basis, method=method,\n sampling_args= {'mesh': 'user-defined', 'sample-points':x.reshape(-1,1), 'sample-outputs': y.reshape(-1,1)},\n solver_args={'M':0.2**2,'verbose':False,'optimiser':opt})\n poly.set_model()\n _,r2 = poly.get_polyscore(X_test=xtest,y_test=ytest)\n self.assertTrue(r2 > 0.997,msg='Poly method = %a, optimiser = %a' %(method,opt))", "def chi_squared_feature_selection(X,y,k=80):\n\n # need to min max scale data first because Chi2 doesn't take negative values\n min_max_scaler = MinMaxScaler()\n X_scaled = min_max_scaler.fit_transform(X)\n transformer = SelectKBest(chi2,k=k)\n X_transformed = transformer.fit_transform(X_scaled,y)\n return (X_transformed,transformer)", "def test_l2_estimator():\n estimator = L2Estimator(10, 10)\n (res, answer, error) = check_error(estimator, TEST_FILE, metric=\"l2\")\n print(\"multiplicative error:\", error)", "def test_2x2_analytical_solution():\n residuals = np.array([[5,-4], [3,-2], [1,0] ])\n\n errs = np.array([[2,2], [2,2], [2,2]])\n covs = np.array([1, 0.25, 0.25])\n corrs = covs/errs[:,0]/errs[:,1]\n\n chi2s = lnlike._chi2_2x2cov(np.array([residuals]), np.array([errs**2]), corrs)\n\n # compare to numpy solution\n for res, err, cov, chi2 in zip(residuals, errs, covs, chi2s[0]):\n cov_matrix = np.array([[err[0]**2, cov], [cov, err[1]**2]])\n cov_inv = np.linalg.inv(cov_matrix)\n cov_inv_dot_diff = np.dot(cov_inv, res)\n logdet = np.linalg.slogdet(cov_matrix)[1]\n res_cov_res = res.dot(cov_inv_dot_diff)\n numpy_chi2 = -0.5 * (res_cov_res + logdet + 2 * np.log(2 * np.pi)) \n\n assert np.sum(chi2) == pytest.approx(numpy_chi2)", "def test_chi_squared():\n assert frequency.chi_squared({'a': 2, 'b': 3}, {'a': 2, 'b': 3}) == 0", "def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))", "def test_LinearRegression_err():\n np.random.seed(0)\n X = np.random.random((10, 1))\n y = np.random.random(10) + 1\n dy = 0.1\n\n y = np.random.normal(y, dy)\n\n clf1 = LinearRegression().fit(X, y, dy)\n clf2 = skLinearRegression().fit(X / dy, y / dy)\n\n assert_allclose(clf1.coef_[1:], clf2.coef_)\n assert_allclose(clf1.coef_[0], clf2.intercept_ * dy)", "def chi_square(self):\n for docid in self.ent_train:\n content = self.ent_train[docid][0] + self.ent_train[docid][1]\n content = self.getTerms(content)\n content = list(set(content))\n for term in content:\n if not term in self.score['ent']:\n n11 = float(self.df[term][0])\n n10 = float(self.df[term][1] + self.df[term][2])\n n01 = float(self.num_ent_train - n11)\n n00 = float(self.num_bus_train + self.num_pol_train)\n a = n11 + n10 + n01 + n00\n b = math.pow(((n11 * n00) - (n10 * n01)), 2)\n c = (n11 + n01) * (n11 + n10) * (n10 + n00) * (n01 + n00)\n chi = (a * b) / c\n self.score['ent'][term] = chi\n for docid in self.bus_train:\n content = self.bus_train[docid][0] + self.bus_train[docid][1]\n content = self.getTerms(content)\n content = list(set(content))\n for term in content:\n if not term in self.score['bus']:\n n11 = float(self.df[term][1])\n n10 = float(self.df[term][0] + self.df[term][2])\n n01 = float(self.num_bus_train - n11)\n n00 = float(self.num_ent_train + self.num_pol_train)\n a = n11 + n10 + n01 + n00\n b = math.pow(((n11 * n00) - (n10 * n01)), 2)\n c = (n11 + n01) * (n11 + n10) * (n10 + n00) * (n01 + n00)\n chi = (a * b) / c\n self.score['bus'][term] = chi \n for docid in self.pol_train:\n content = self.pol_train[docid][0] + self.pol_train[docid][1]\n content = self.getTerms(content)\n content = list(set(content))\n for term in content:\n if not term in self.score['pol']:\n n11 = float(self.df[term][2])\n n10 = float(self.df[term][1] + self.df[term][0])\n n01 = float(self.num_pol_train - n11)\n n00 = float(self.num_ent_train + self.num_pol_train)\n a = n11 + n10 + n01 + n00\n b = math.pow(((n11 * n00) - (n10 * n01)), 2)\n c = (n11 + n01) * (n11 + n10) * (n10 + n00) * (n01 + n00)\n chi = (a * b) / c\n self.score['pol'][term] = chi", "def test_single_linear_regression_r_squared(reg_model):\n assert(pytest.approx(reg_model.r_squared(), 0.01) == 0.52)", "def test_single_linear_regression_fit(reg_model):\n assert(pytest.approx(reg_model.b1, 0.01) == 1.14)\n assert(pytest.approx(reg_model.b0, 0.01) == 0.43)", "def test_chi2lnlike():\n # test with a single model\n model = np.zeros((3, 2))\n jitter = np.zeros((3, 2))\n data = np.ones((3, 2))\n errors = np.ones((3, 2))\n\n seppa_indices = [np.array([1])]\n\n chi2 = lnlike.chi2_lnlike(data, errors, None, model, jitter, seppa_indices)\n assert chi2.shape == (3, 2)\n assert chi2 == pytest.approx(\n -0.5 * np.ones((3, 2)) - np.log(np.sqrt(2*np.pi*np.ones((3, 2))))\n )\n\n # test with multiple models\n model = np.zeros((3, 2, 5))\n jitter = np.zeros((3, 2, 5))\n data = np.ones((3, 2))\n errors = np.ones((3, 2))\n\n seppa_indices = [np.array([1])]\n\n chi2 = lnlike.chi2_lnlike(data, errors, None, model, jitter, seppa_indices)\n assert chi2.shape == (3, 2, 5)\n assert chi2 == pytest.approx(\n -0.5 * np.ones((3, 2, 5)) - np.log(np.sqrt(2*np.pi*np.ones((3, 2, 5))))\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure dereddening attributes added to Result object.
def test_dereddening_result_attributes(self): self.assertTrue(self.Result.deredden) self.assertTrue(self.Result.propagate_dered_errors)
[ "def set_attributes_all_required(instance, attrs, res):\r\n for attr in attrs:\r\n attr_val = res.get(attr)\r\n # all attributes are required\r\n if not attr_val:\r\n print(attr)\r\n abort(400)\r\n setattr(instance, attr, attr_val)\r\n return instance", "def _populate_attributes(self, config, record, context, data):\n search_return_attributes = config['search_return_attributes']\n for attr in search_return_attributes.keys():\n if attr in record[\"attributes\"]:\n if record[\"attributes\"][attr]:\n data.attributes[search_return_attributes[attr]] = record[\"attributes\"][attr]\n satosa_logging(\n logger,\n logging.DEBUG,\n \"Setting internal attribute {} with values {}\".format(\n search_return_attributes[attr],\n record[\"attributes\"][attr]\n ),\n context.state\n )\n else:\n satosa_logging(\n logger,\n logging.DEBUG,\n \"Not setting internal attribute {} because value {} is null or empty\".format(\n search_return_attributes[attr],\n record[\"attributes\"][attr]\n ),\n context.state\n )", "def check_all_set(self):\n if not self.all_set():\n logger.critical('All attributes on the %s object must be set. '\n 'Missing attributes: %s', self.__class__.__name__,\n self.get_missing_attributes())\n sys.exit(1)", "def test_sanity(self):\r\n\r\n _values = (0, 1, 'Test Method', 'Test Boundary Conditions',\r\n 'Test Remarks')\r\n\r\n self.DUT.set_attributes(_values)\r\n _result = self.DUT.get_attributes()\r\n self.assertEqual(_result, _values)", "def __hasattr__(self, name):\n return name in self.result or hasattr(self.result, name)", "def __init__(self):\n super(AnalyzerResult, self).__init__()\n self.analyzer_name = None\n self.attribute_name = None\n self.attribute_value = None", "def test_to_dict_contains_added_attributes(self):\n b = BaseModel()\n attrs = [\"id\", \"created_at\", \"updated_at\", \"__class__\"]\n b.name = \"Firdaus\"\n b.email = \"firduas@gmail.com\"\n attrs.extend([\"name\", \"email\"])\n for attr in attrs:\n self.assertIn(attr, b.to_dict())", "def testmissingrequiredattributes(self) -> None:\r\n for missing_attribute in MPA.required_attrs:\r\n self.parse()\r\n delattr(self.data, missing_attribute)\r\n for method_class in self.methods:\r\n with pytest.raises(MissingAttributeError):\r\n self.calculate(method_class)", "def _validate(self):\n self._validate_data_type()\n self._validate_characters()\n self._validate_regexp()\n logger.debug(\"Instance attributes passed validation.\")", "def build_attributes(self):\n pass", "def test_set_good_attributes(self):\r\n\r\n _values = (0, 1, 'Test Method', 'Test Boundary Conditions',\r\n 'Test Remarks')\r\n\r\n (_error_code,\r\n _error_msg) = self.DUT.set_attributes(_values)\r\n self.assertEqual(_error_code, 0)", "def create_result(self):\n raise NotImplementedError(\"Abstract Method:create_result.\")", "def testattr(self):\n self.assertTrue(hasattr(self.basemodel, \"created_at\"))\n self.assertTrue(hasattr(self.basemodel, \"id\"))\n self.assertFalse(hasattr(self.basemodel, \"updated_at\"))\n self.assertFalse(hasattr(self.basemodel, \"random_attr\"))\n self.assertFalse(hasattr(self.basemodel, \"name\"))\n self.basemodel.name = \"Betty\"\n self.basemodel.age = 89\n self.assertTrue(hasattr(self.basemodel, \"name\"))\n self.assertEqual(self.basemodel.name, \"Betty\")\n self.assertTrue(hasattr(self.basemodel, \"age\"))\n delattr(self.basemodel, \"name\")\n self.assertFalse(hasattr(self.basemodel, \"name\"))\n self.assertEqual(self.basemodel.__class__.__name__, \"BaseModel\")", "def test_get_attributes(self):\r\n\r\n _values = (0, None, '', '', '')\r\n\r\n self.assertEqual(self.DUT.get_attributes(), _values)", "def test_attributes_exist(self):\n self.assertTrue(hasattr(User, 'email'))\n self.assertTrue(hasattr(User, 'password'))\n self.assertTrue(hasattr(User, 'first_name'))\n self.assertTrue(hasattr(User, 'last_name'))", "def testHasAttributes(self):\n self.assertTrue(hasattr(self.a, 'name'))\n self.assertTrue(hasattr(self.a, 'id'))\n self.assertTrue(hasattr(self.a, 'created_at'))\n self.assertTrue(hasattr(self.a, 'updated_at'))", "def test_missingattribute(self, get_user_class_representation):\n with pytest.raises(AttributeError):\n get_user_class_representation.missingattribute", "def ensure_has_attrs(self, *args):\n for attr in args:\n if attr not in self:\n raise self.mk_except('Block(ty=' + self.block_type + ') ' +\n 'missing attr \"' + attr + '\"')", "def test_get_custom_attributes(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test check the parameter estimate is as expected.
def test_parameter_estimates(self): DF_est = self.Result.Posterior.DF_estimates # DataFrame p0_est = DF_est.loc["p0", "Estimate"] self.assertTrue(np.isclose(p0_est, self.expected_p0, atol=1))
[ "def test_parameter_estimates(self):\n DF_est = self.Result.Posterior.DF_estimates\n self.assertTrue(all(p in DF_est.index for p in self.params))\n # Tolerance for distance between gridpoint we chose and the estimate:\n grid_sep_frac = 0.1 # Allowed fraction of distance between gridpoints\n for p, test_ind in zip(self.params, self.test_gridpoint):\n tol = np.diff(self.val_arrs[p])[0] * grid_sep_frac\n value = self.val_arrs[p][test_ind] # Expected parameter value\n est = DF_est.loc[p, \"Estimate\"] # NebulaBayes estimate\n self.assertTrue(np.isclose(est, value, atol=tol))", "def test_single_linear_regression_fit(reg_model):\n assert(pytest.approx(reg_model.b1, 0.01) == 1.14)\n assert(pytest.approx(reg_model.b0, 0.01) == 0.43)", "def test_estimate_bounds_checks(self):\n DF = self.Result.Posterior.DF_estimates # Parameter estimate table\n for p in [\"12 + log O/H\", \"log P/k\", \"log U\"]:\n for col in [\"Est_in_CI68?\", \"Est_in_CI95?\"]:\n self.assertTrue(DF.loc[p,col] == \"Y\")\n for col in [\"Est_at_lower?\", \"Est_at_upper?\", \"P(lower)>50%?\",\n \"P(upper)>50%?\"]:\n self.assertTrue(DF.loc[p,col] == \"N\")\n self.assertTrue(DF.loc[p,\"n_local_maxima\"] == 1)", "def test_set_params_Reg_feature_selector():\n feature_selector = Reg_feature_selector()\n feature_selector.set_params(strategy=\"variance\")\n assert feature_selector.strategy == \"variance\"\n feature_selector.set_params(threshold=0.2)\n assert feature_selector.threshold == 0.2\n with pytest.warns(UserWarning) as record:\n feature_selector.set_params(wrong_strategy=\"wrong_strategy\")\n assert len(record) == 1", "def test_single_linear_regression_rmse(reg_model):\n assert(pytest.approx(reg_model.root_mean_squared_error(), 0.02) == 0.31)", "def test_params_module():\n # Get the inputs required by the Scales object\n (profile, disp_phases, z0) = get_sim_data()\n\n\n # Test that the governing parameters are computed correctly\n # First, test a single dispersed phase\n model = params.Scales(profile, disp_phases[1])\n check_get_variables(model, z0, 0.15, 0.21724144538674975,\n 0.001724100901081246, 0.22611661456807244, 0.15)\n\n # Second, try a list of dispersed phases, where the dominant phase is\n # not the first one\n particles = [disp_phases[1], disp_phases[0], disp_phases[2]]\n model = params.Scales(profile, particles)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Third, make sure we get the same answer as the previous case if the\n # particles are in a different order (i.e., the original order)\n model = params.Scales(profile, disp_phases)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Using the latest Scales object, check that the other methods return\n # the correct results. Since these methods only depend on the values\n # of B, N, and us computed by the get_variables() method, only one case\n # needs to be tested\n assert_approx_equal(model.h_T(z0), 346.40139518559153, significant=6)\n assert_approx_equal(model.h_P(z0), 627.57408319500291, significant=6)\n assert_approx_equal(model.h_S(z0, 0.15), 295.45365120553163,\n significant=6)\n assert_approx_equal(model.lambda_1(z0, 0), 0.74523735215223819,\n significant=6)\n assert_approx_equal(model.u_inf_crit(z0), 0.063723667111426671,\n significant=6)", "def testDomainParamEstimateVomm(marklist,domlist,paramdict,sortmarkers,sidecoefs,nodecounts,params,varcount):\n compcount = 1\n if params['model'] in [\"linear\",\"binary\"] and params[\"order\"] == 1:\n [Xdom,ydom,solx] = sidecoefs\n elif params['model'] in [\"linear\",\"binary\"]:\n [Xdom,ydom,lincoefs,logcoefs,solx,termobjval,muvec] = sidecoefs\n elif params['model'] == \"nonparam\":\n [lincoefs,logcoefs,solx,objval,muvec,compcount] = sidecoefs\n assert TestSEDFMest.testPreParamDataVomm(marklist,domlist,sortmarkers,nodecounts,params)\n assert TestSEDFMest.testParamDict(paramdict,params['model'],params['width'],compcount)\n assert TestSEDFMest.compareSol2dictVomm(solx,paramdict,sortmarkers,params['width'],compcount)\n return True\n if params['model'] == \"nonparam\": \n assert TestSEDFMest.testPreNonParamData(marklist,domlist,sortmarkers,varcount,nodecounts,params,compcount,logcoefs) \n estobjval = TestSEDFMest.estNonParamObj(paramdict,marklist,domlist,sortmarkers,params,nodecount,compcount,stmuvec,endmuvec)\n assert abs(stobjval + endobjval - estobjval) <= 0.1\n elif params['model'] in [\"binary\",\"linear\"] and params[\"order\"] != 1:\n #def temploglikeEst(paramx,lincoefs,logcoefs,tmuvec):\n # \"\"\"negative log likelihood obj + penalty\n # \"\"\"\n # tobjval = np.dot(paramx,lincoefs)\n # templist = [np.dot(paramx,logcoef) for logcoef in logcoefs]\n # tobjval += sum([tval if tval >= 10 else math.log(1.0 + math.exp(tval)) for tval in templist])\n # tobjval += estPenaltyParam(paramx,tmuvec,singcount,width,curlam)\n # return tobjval \n #print \"start\" \n #print temploglikeEst(res['x'],lincoefs,logcoefs,muvec)\n #print res['fun']\n #assert abs(temploglikeEst(res['x'],lincoefs,logcoefs,muvec) - res['fun']) < 0.1 \n for lcoef in lincoefs:\n assert lcoef >= 0\n estobjval = TestSEDFMest.estParamEstObj(marklist,domlist,paramdict,sortmarkers,nodecounts,params)\n print estobjval\n exit(1)\n return True", "def test_predict(self):\n assert 2 == 2", "def test_estimate(self):\n expectedResult = 0.926\n credibility = TestCredibility.credibilityEstimator.estimate(self.warp)\n self.assertCredibilityEstimation(credibility, expectedResult)", "def test_reestimate_params(self):\n pdf_matrix = self.cluster_obj_4.compute_pdf_matrix()\n posterior_matrix = self.cluster_obj_4.compute_posterior(pdf_matrix)\n self.cluster_obj_4.reestimate_params(posterior_matrix)\n self.assertEqual(round(self.cluster_obj_4.mean[0], 2), 0.24)\n self.assertEqual(round(self.cluster_obj_4.variance[0], 2), 0.02)\n self.assertEqual(round(self.cluster_obj_4.weight[0], 2), 0.13)", "def test_robust(self):\n methods = ['huber','least-absolute-residual']\n opts = ['osqp','scipy'] \n f = lambda x: (-0.3*x**4 -3*x**3 +0.6*x**2 +2.4*x - 0.5)\n\n N = 50 # number of training points (note, some will be removed below)\n n = 4 # degree of polynomial\n state = 15 # random seed\n \n # Add some noise\n noise_var = 0.1\n x = np.sort(np.random.RandomState(state).uniform(-1,1,N))\n y = f(x) + np.random.RandomState(state).normal(0,noise_var,size=N).T\n \n # delete training points between 0 < x < 0.3\n pos = ((x>0)*(x<0.3)).nonzero()[0]\n x = np.delete(x,pos)\n y = np.delete(y,pos)\n\n # Add some outliers\n randrange = range(10,17)\n y[randrange] = y[randrange]+np.random.RandomState(1).normal(0,4**2,len(randrange))\n \n # Test data\n x = x.reshape(-1,1)\n xtest = np.linspace(-1,1,100).reshape(-1,1)\n ytest = f(xtest)\n\n # param and basis\n param = Parameter(distribution='uniform', lower=-1, upper=1, order=n)\n basis = Basis('univariate')\n\n # Test Poly regressions\n for method in methods:\n for opt in opts:\n if method != 'huber' and opt != 'scipy': # TODO - remove this if statement once scipy huber regression implemented\n poly = Poly(parameters=param, basis=basis, method=method,\n sampling_args= {'mesh': 'user-defined', 'sample-points':x.reshape(-1,1), 'sample-outputs': y.reshape(-1,1)},\n solver_args={'M':0.2**2,'verbose':False,'optimiser':opt})\n poly.set_model()\n _,r2 = poly.get_polyscore(X_test=xtest,y_test=ytest)\n self.assertTrue(r2 > 0.997,msg='Poly method = %a, optimiser = %a' %(method,opt))", "def testConvergence(self):\n synthetic_test = Synthetic()\n # Silence output of fit\n save_stdout = sys.stdout\n sys.stdout = open( os.devnull, 'w' )\n synthetic_test.fit(0.001, n_iters = 10**3)\n sys.stdout.close()\n sys.stdout = save_stdout\n # Discard burn in\n loss_storage = np.array( synthetic_test.lr.training_loss[1:] )\n loss_var = np.var( loss_storage )\n self.assertTrue( loss_var < 1 / float( synthetic_test.lr.N ) )", "def test_single_linear_regression_r_squared(reg_model):\n assert(pytest.approx(reg_model.r_squared(), 0.01) == 0.52)", "def test_linear_regression(single_ts):\n results = linear_regression(single_ts, dim='time')\n for v in results.data_vars:\n assert results[v]", "def test_positive_pred(self,y):\n self.assertTrue((y>0).all())", "def test_model(self):\n power_ebsilon = -31.769\n power_tespy = round(\n self.nw.busses['total output power'].P.val / 1e6, 3)\n msg = (\n 'The total power calculated (' + str(power_tespy) + ') does not '\n 'match the power calculated with the EBSILON model (' +\n str(power_ebsilon) + ').')\n assert power_tespy == power_ebsilon, msg\n\n T_c79_ebsilon = 296.254\n T_c79_tespy = round(self.nw.get_conn('79').T.val, 3)\n msg = (\n 'The temperature at connection 79 calculated (' +\n str(T_c79_tespy) + ') does not match the temperature calculated '\n 'with the EBSILON model (' + str(T_c79_ebsilon) + ').')\n assert T_c79_tespy == T_c79_ebsilon, msg", "def test(self , regressor , df_train , df_test):\n if self.summary_ is None : \n self.summary_ = self.selections_.apply( _score_test \n , args = (regressor , df_train , df_test) \n , axis = 1)\n else : \n self.summary_ = pd.concat([self.summary_ , self.selections_.apply( _score_test , args = (regressor , df_train , df_test) , axis = 1)]\n , keys = ['Validation' , 'Test'] , axis = 1)\n return", "def test_sklearn_check_estimator(seco_estimator_class):\n check_estimator(seco_estimator_class)", "def test_mutable(data):\n (input_data, y, formula) = data\n model_prefit = gammy.BayesianGAM(formula)\n model_fitted = model_prefit.fit(input_data, y)\n assert_arrays_equal(\n model_prefit.mean_theta,\n model_fitted.mean_theta\n )\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test check likelihood is all zero.
def test_likelihood_all_zero(self): likelihood = self.Result.Likelihood.nd_pdf self.assertTrue(np.all(likelihood == 0))
[ "def test_likelihood_mostly_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.sum(likelihood != 0) < 65)", "def test_positive_pred(self,y):\n self.assertTrue((y>0).all())", "def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))", "def likelihood(self, x: np.ndarray) -> np.ndarray:", "def test_multiscale_zero_parallel(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x,\n self.x,\n njobs=6))", "def perplexity(model, test_data):\n return np.exp(-model.mean_log_likelihood(test_data))", "def test_basics(self):\n self.report('Testing adding data, evaluation and marginal likelihood.' +\n ' Probabilistic test, might fail.')\n num_coeffs_vals = [2, 1, 4, 5] * 5\n num_tests = 0\n num_successes = 0\n for dataset in self.datasets:\n for dist_type in self.dist_types:\n for kernel_type in self.kernel_types[dist_type]:\n curr_num_coeffs = num_coeffs_vals.pop(0)\n curr_gp = build_nngp_with_dataset(dataset, kernel_type, curr_num_coeffs,\n dist_type)\n # Predictions & Marginal likelihood\n curr_preds, _ = curr_gp.eval(dataset[2], 'std')\n curr_gp_err = compute_average_prediction_error(dataset, curr_preds)\n const_err = compute_average_prediction_error(dataset, dataset[1].mean())\n lml = curr_gp.compute_log_marginal_likelihood()\n is_success = curr_gp_err < const_err\n num_tests += 1\n num_successes += is_success\n self.report(('(%s, ntr=%d, nte=%d):: GP-lml=%0.4f, GP-err=%0.4f, ' +\n 'Const-err=%0.4f. succ=%d')%(dataset[-1][:5], len(dataset[0]),\n len(dataset[2]), lml, curr_gp_err, const_err, is_success),\n 'test_result')\n succ_frac = num_successes / float(num_tests)\n self.report('Summary: num_successes / num_floats = %d/%d = %0.4f'%(num_successes,\n num_tests, succ_frac), 'test_result')\n assert succ_frac > 0.5", "def test_single_linear_regression_fit(reg_model):\n assert(pytest.approx(reg_model.b1, 0.01) == 1.14)\n assert(pytest.approx(reg_model.b0, 0.01) == 0.43)", "def testConvergence(self):\n synthetic_test = Synthetic()\n # Silence output of fit\n save_stdout = sys.stdout\n sys.stdout = open( os.devnull, 'w' )\n synthetic_test.fit(0.001, n_iters = 10**3)\n sys.stdout.close()\n sys.stdout = save_stdout\n # Discard burn in\n loss_storage = np.array( synthetic_test.lr.training_loss[1:] )\n loss_var = np.var( loss_storage )\n self.assertTrue( loss_var < 1 / float( synthetic_test.lr.N ) )", "def test_student_t_log_likelihood_single(self):\n model = pints.toy.ConstantModel(1)\n parameters = [0]\n times = np.asarray([1, 2, 3])\n model.simulate(parameters, times)\n values = np.asarray([1.0, -10.7, 15.5])\n problem = pints.SingleOutputProblem(model, times, values)\n log_likelihood = pints.StudentTLogLikelihood(problem)\n # Test Student-t_logpdf(values|mean=0, df = 3, scale = 10) = -11.74..\n self.assertAlmostEqual(log_likelihood([0, 3, 10]), -11.74010919785115)", "def test_single_linear_regression_rmse(reg_model):\n assert(pytest.approx(reg_model.root_mean_squared_error(), 0.02) == 0.31)", "def likelihoods(self, step):", "def test_ll_nom(self):\n pars = list(self.spec.central)\n nominal = self.spec(pars)\n self.spec.set_data(nominal) # nominal data\n stats = np.array(self.spec.stats)\n # event with nominal, ll penalty from poisson normalization\n ll = 0 # log likelihood\n ll += np.sum(logPoisson(nominal, nominal, stats))\n self.assertAlmostEqual(ll, self.spec.ll(pars))", "def test_2D_fully_missing():\n\n # Define data\n N = 20\n D = 2\n U = np.random.uniform(size=(N, D))\n theta = np.vstack(np.array([1., 2.]))\n sigma = 0.1\n Y = U @ theta + sigma * np.vstack(np.random.randn(N))\n\n # Prior\n mu_z = np.array([0.])\n sigma_z = np.array([1.])\n\n # Initial guess\n theta_init = np.array([0.5, 0.5])\n\n # Create some missing data\n z_true = [U[2][0], U[2][1]]\n U[2][0] = np.nan\n U[2][1] = np.nan\n\n # Run linear regression\n lr = LinearReg_MissingData(U, Y, N, D, theta_init, mu_z, sigma_z, sigma)\n lr.train(Nitt=20)\n\n # Define the model\n def f(u, theta):\n return u @ theta\n\n # Define likelihood\n def likelihood(u, y, theta):\n p = norm(loc=y, scale=sigma)\n return p.pdf(f(u, theta))\n\n # Generate samples from the prior\n Ns = 10000\n prior_z = mvn(mean=np.repeat(mu_z, 2),\n cov=np.diag(np.repeat(sigma_z**2, 2)))\n prior_Z_samples = prior_z.rvs(Ns)\n\n # Importance sample from the z posterior\n w = np.zeros(Ns)\n for i in range(Ns):\n w[i] = likelihood(u=prior_Z_samples[i, :], y=Y[2], theta=lr.theta)\n wn = w / np.sum(w)\n\n # IS estimate of E[z] posterior mean\n EZ_IS = wn @ prior_Z_samples\n\n # IS estimate of E[z z^T]\n EZZ_IS = np.zeros([2, 2])\n for i in range(Ns):\n EZZ_IS += (np.array([prior_Z_samples[i, :]]).T @\n np.array([prior_Z_samples[i, :]]) * wn[i])\n\n # IS estimate of posterior covariance matrix\n prior_Z_samples -= EZ_IS # Remove mean\n COV_Z_IS = np.zeros([2, 2])\n for i in range(Ns):\n COV_Z_IS += (np.array([prior_Z_samples[i, :]]).T @\n np.array([prior_Z_samples[i, :]]) * wn[i])\n\n # Posterior over z from code\n posterior_z = lr.posterior_z[2]\n\n # Check that the importance sampled mean is close to closed-form\n # expression for the mean\n assert np.allclose(EZ_IS, posterior_z.mean, atol=0.1)\n\n # Check that the importance sampled estimate of z z^T is close to\n # closed-form expression\n assert np.allclose(EZZ_IS, lr.EUU[2], atol=0.2)\n\n # Check that the importance sampled covariance matrix is close to\n # closed-form expression for the covariance matrix\n assert np.allclose(COV_Z_IS, posterior_z.cov, atol=0.2)", "def test_nonnegative_samples(self):\n\n rng = np.random.RandomState([1,2,3])\n\n dim = self.dim\n\n num_trials = 3\n\n for trial in xrange(num_trials):\n mu = rng.randn(dim).astype(floatX)\n beta = rng.uniform(.1,10.,(dim,)).astype(floatX)\n self.p.mu.set_value(mu)\n mu = rng.randn(dim).astype(floatX)\n self.q.mu.set_value(mu)\n self.p.beta.set_value(beta)\n beta = rng.uniform(.1,10.,(dim,)).astype(floatX)\n self.q.beta.set_value(beta)\n\n kl = kl_divergence(self.q,self.p)\n\n kl = function([],kl)()\n\n if kl < 0.:\n raise AssertionError(\"KL divergence should \"\n \"be non-negative but is \"+\n str(kl))", "def test_same_zero(self):\n\n rng = np.random.RandomState([1,2,3])\n\n dim = self.dim\n\n num_trials = 3\n\n for trial in xrange(num_trials):\n mu = rng.randn(dim).astype(floatX)\n beta = rng.uniform(.1,10.,(dim,)).astype(floatX)\n\n self.p.mu.set_value(mu)\n self.q.mu.set_value(mu)\n self.p.beta.set_value(beta)\n self.q.beta.set_value(beta)\n\n kl = kl_divergence(self.q,self.p)\n\n kl = function([],kl)()\n\n tol = 1e-7\n # Second part of the check handles cases where kl is None, etc.\n if kl > tol or not (kl <= tol):\n raise AssertionError(\"KL divergence between two \"\n \"equivalent models should be 0 but is \"+\n str(kl))", "def test_nb_fit(self):\n P = np.array([[0.5],\n [0.3],\n [0.4]])\n R = np.array([[1.],\n [8.],\n [2.]])\n data, _ = simulation.generate_nb_data(P, R, 500)\n p, r = nb_fit(data)\n p_nans = np.isnan(p)\n r_nans = np.isnan(r)\n self.assertFalse(p_nans.any())\n self.assertFalse(r_nans.any())\n self.assertFalse(np.isinf(p).any())\n self.assertFalse(np.isinf(r).any())\n self.assertTrue(np.sum(np.abs(p - P.flatten())**2)/3 < 0.5)\n print(r)\n print(np.sqrt(np.sum(np.abs(r - R.flatten())**2))/3)\n self.assertTrue(np.sqrt(np.sum(np.abs(r - R.flatten())**2))/3 < 3)", "def test_linear_regression(single_ts):\n results = linear_regression(single_ts, dim='time')\n for v in results.data_vars:\n assert results[v]", "def test_gaussian_log_likelihoods_single_output(self):\n model = pints.toy.LogisticModel()\n parameters = [0.015, 500]\n sigma = 0.1\n times = np.linspace(0, 1000, 100)\n values = model.simulate(parameters, times)\n values += np.random.normal(0, sigma, values.shape)\n problem = pints.SingleOutputProblem(model, times, values)\n\n # Test if known/unknown give same result\n l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)\n l2 = pints.GaussianLogLikelihood(problem)\n self.assertAlmostEqual(l1(parameters), l2(parameters + [sigma]))\n\n # Test invalid constructors\n self.assertRaises(\n ValueError, pints.GaussianKnownSigmaLogLikelihood, problem, 0)\n self.assertRaises(\n ValueError, pints.GaussianKnownSigmaLogLikelihood, problem, -1)\n\n # known noise value checks\n model = pints.toy.ConstantModel(1)\n times = np.linspace(0, 10, 10)\n values = model.simulate([2], times)\n org_values = np.arange(10) / 5.0\n problem = pints.SingleOutputProblem(model, times, org_values)\n log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, 1.5)\n self.assertAlmostEqual(log_likelihood([-1]), -21.999591968683927)\n l, dl = log_likelihood.evaluateS1([3])\n self.assertAlmostEqual(l, -23.777369746461702)\n self.assertAlmostEqual(dl[0], -9.3333333333333321)\n self.assertEqual(len(dl), 1)\n\n # unknown noise value checks\n log_likelihood = pints.GaussianLogLikelihood(problem)\n self.assertAlmostEqual(log_likelihood([-3, 1.5]), -47.777369746461702)\n\n # unknown noise check sensitivity\n model = pints.toy.ConstantModel(1)\n times = np.linspace(0, 10, 10)\n values = model.simulate([2], times)\n org_values = np.arange(10) / 5.0\n problem = pints.SingleOutputProblem(model, times, org_values)\n log_likelihood = pints.GaussianLogLikelihood(problem)\n l, dl = log_likelihood.evaluateS1([7, 2.0])\n self.assertAlmostEqual(l, -63.04585713764618)\n self.assertAlmostEqual(dl[0], -15.25)\n self.assertAlmostEqual(dl[1], 41.925000000000004)\n\n # Test deprecated aliases\n l1 = pints.KnownNoiseLogLikelihood(problem, sigma)\n self.assertIsInstance(l1, pints.GaussianKnownSigmaLogLikelihood)\n\n l2 = pints.UnknownNoiseLogLikelihood(problem)\n self.assertIsInstance(l2, pints.GaussianLogLikelihood)\n\n # test multiple output unknown noise\n model = pints.toy.ConstantModel(3)\n parameters = [0, 0, 0]\n times = [1, 2, 3, 4]\n values = model.simulate([0, 0, 0], times)\n org_values = [[10.7, 3.5, 3.8],\n [1.1, 3.2, -1.4],\n [9.3, 0.0, 4.5],\n [1.2, -3, -10]]\n problem = pints.MultiOutputProblem(model, times, org_values)\n log_likelihood = pints.GaussianLogLikelihood(problem)\n # Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) +\n # Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) +\n # Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12)\n # = -50.5088...\n self.assertAlmostEqual(\n log_likelihood(parameters + [3.5, 1, 12]),\n -50.508848609684783\n )\n l, dl = log_likelihood.evaluateS1(parameters + [3.5, 1, 12])\n self.assertAlmostEqual(l, -50.508848609684783)\n self.assertAlmostEqual(dl[0], 1.820408163265306)\n self.assertAlmostEqual(dl[1], 3.7000000000000002)\n self.assertAlmostEqual(dl[2], -0.021527777777777774)\n self.assertAlmostEqual(dl[3], 3.6065306122448981)\n self.assertAlmostEqual(dl[4], 27.490000000000002)\n self.assertAlmostEqual(dl[5], -0.25425347222222222)\n\n # test multiple output model dimensions of sensitivities\n d = 20\n model = pints.toy.ConstantModel(d)\n parameters = [0 for i in range(d)]\n times = [1, 2, 3, 4]\n values = model.simulate(parameters, times)\n org_values = np.ones((len(times), d))\n extra_params = np.ones(d).tolist()\n problem = pints.MultiOutputProblem(model, times, org_values)\n log_likelihood = pints.GaussianLogLikelihood(problem)\n l = log_likelihood(parameters + extra_params)\n l1, dl = log_likelihood.evaluateS1(parameters + extra_params)\n self.assertTrue(np.array_equal(len(dl),\n len(parameters + extra_params)))\n self.assertEqual(l, l1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test check posterior is all zero.
def test_posterior_all_zero(self): posterior = self.Result.Posterior.nd_pdf self.assertTrue(np.all(posterior == 0))
[ "def test_positive_pred(self,y):\n self.assertTrue((y>0).all())", "def test_likelihood_all_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.all(likelihood == 0))", "def test_likelihood_mostly_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.sum(likelihood != 0) < 65)", "def test_parameter_estimates(self):\n DF_est = self.Result.Posterior.DF_estimates # DataFrame\n p0_est = DF_est.loc[\"p0\", \"Estimate\"]\n self.assertTrue(np.isclose(p0_est, self.expected_p0, atol=1))", "def test_post_mean():\n return abs(fit.mu.mean - mu_post)/fit.mu.se_mean < 3.", "def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))", "def test_multiscale_zero_parallel(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x,\n self.x,\n njobs=6))", "def perplexity(model, test_data):\n return np.exp(-model.mean_log_likelihood(test_data))", "def test_prediction(self): \n \n N = 100\n theta = np.random.normal(size = (N,))\n X = np.eye(N)\n dt = 0.1\n \n theta_0 = np.zeros(theta.shape)\n model = PPModel(X,coef = theta,dt = dt)\n Y = model.sampleEvents(theta)\n theta_MLE = model.fit(Y,theta_0).x\n Y_predicted = model.sampleEvents(theta_MLE)\n total = sum(Y+Y_predicted)\n if total != 0:\n error_rate = sum(np.abs(Y - Y_predicted)).astype('float64')/total\n else:\n error_rate = 0\n warnings.warn('No events observed.')\n \n tol = 1\n self.assertTrue(error_rate < tol)", "def test_prediction_with_negative_values(model_obj):\n X_test = [-2, -4, -6, -8]\n y_test = \"setosa\"\n y_preds = model_obj.classify(X_test)\n assert y_test == y_preds", "def zero_test():\n x, y , theta, t = simulate(Theta=0)\n if abs(x.max()) > 0 or abs(y.max()) > 0:\n\t\t print \"Error in the numerical scheme!\"\n else:\n\t\t print \"Theta = 0 and epsilon = 0 gives x = y = 0 for all times, as intended.\"", "def testConvergence(self):\n synthetic_test = Synthetic()\n # Silence output of fit\n save_stdout = sys.stdout\n sys.stdout = open( os.devnull, 'w' )\n synthetic_test.fit(0.001, n_iters = 10**3)\n sys.stdout.close()\n sys.stdout = save_stdout\n # Discard burn in\n loss_storage = np.array( synthetic_test.lr.training_loss[1:] )\n loss_var = np.var( loss_storage )\n self.assertTrue( loss_var < 1 / float( synthetic_test.lr.N ) )", "def _value_test(y, y_hat):\n \n zeros_test_array = y_hat >= 0\n ones_test_array = y_hat <= 1\n values_test_array = np.logical_or(zeros_test_array, ones_test_array)\n values_test = np.all(values_test_array)\n if not values_test:\n print(\"Values error for \\hat{y}: \", y_hat[~values_test])\n return False\n \n return True", "def test_predict(self):\n assert 2 == 2", "def result_has_ones(test):\n evecs = test.transform(test.trajs)\n if evecs[0][0, 0] > 0.0:\n sign = 1.0\n else:\n sign = -1.0\n for evec in evecs:\n assert np.allclose(sign * evec[:, 0], 1.0)", "def _normality_checking(series):\r\n\r\n JB_stat, p, _, __ = jarque_bera(series)\r\n\r\n print(\"\\n--------------------------------------------\\n\")\r\n print(\"Checking Normality of {}\".format(series.name))\r\n print(\"Test Statistic : %.2f, p value : %.5f\" % (JB_stat, p))\r\n\r\n alpha = 0.05\r\n\r\n if p > alpha:\r\n\r\n print(\"Data looks Gaussian: fail to reject the Null Hypothesis\")\r\n return False\r\n\r\n else:\r\n\r\n print(\"Data does not look Gaussian: we reject the Null Hypothesis\")\r\n return True", "def test_2D_fully_missing():\n\n # Define data\n N = 20\n D = 2\n U = np.random.uniform(size=(N, D))\n theta = np.vstack(np.array([1., 2.]))\n sigma = 0.1\n Y = U @ theta + sigma * np.vstack(np.random.randn(N))\n\n # Prior\n mu_z = np.array([0.])\n sigma_z = np.array([1.])\n\n # Initial guess\n theta_init = np.array([0.5, 0.5])\n\n # Create some missing data\n z_true = [U[2][0], U[2][1]]\n U[2][0] = np.nan\n U[2][1] = np.nan\n\n # Run linear regression\n lr = LinearReg_MissingData(U, Y, N, D, theta_init, mu_z, sigma_z, sigma)\n lr.train(Nitt=20)\n\n # Define the model\n def f(u, theta):\n return u @ theta\n\n # Define likelihood\n def likelihood(u, y, theta):\n p = norm(loc=y, scale=sigma)\n return p.pdf(f(u, theta))\n\n # Generate samples from the prior\n Ns = 10000\n prior_z = mvn(mean=np.repeat(mu_z, 2),\n cov=np.diag(np.repeat(sigma_z**2, 2)))\n prior_Z_samples = prior_z.rvs(Ns)\n\n # Importance sample from the z posterior\n w = np.zeros(Ns)\n for i in range(Ns):\n w[i] = likelihood(u=prior_Z_samples[i, :], y=Y[2], theta=lr.theta)\n wn = w / np.sum(w)\n\n # IS estimate of E[z] posterior mean\n EZ_IS = wn @ prior_Z_samples\n\n # IS estimate of E[z z^T]\n EZZ_IS = np.zeros([2, 2])\n for i in range(Ns):\n EZZ_IS += (np.array([prior_Z_samples[i, :]]).T @\n np.array([prior_Z_samples[i, :]]) * wn[i])\n\n # IS estimate of posterior covariance matrix\n prior_Z_samples -= EZ_IS # Remove mean\n COV_Z_IS = np.zeros([2, 2])\n for i in range(Ns):\n COV_Z_IS += (np.array([prior_Z_samples[i, :]]).T @\n np.array([prior_Z_samples[i, :]]) * wn[i])\n\n # Posterior over z from code\n posterior_z = lr.posterior_z[2]\n\n # Check that the importance sampled mean is close to closed-form\n # expression for the mean\n assert np.allclose(EZ_IS, posterior_z.mean, atol=0.1)\n\n # Check that the importance sampled estimate of z z^T is close to\n # closed-form expression\n assert np.allclose(EZZ_IS, lr.EUU[2], atol=0.2)\n\n # Check that the importance sampled covariance matrix is close to\n # closed-form expression for the covariance matrix\n assert np.allclose(COV_Z_IS, posterior_z.cov, atol=0.2)", "def test(self , regressor , df_train , df_test):\n if self.summary_ is None : \n self.summary_ = self.selections_.apply( _score_test \n , args = (regressor , df_train , df_test) \n , axis = 1)\n else : \n self.summary_ = pd.concat([self.summary_ , self.selections_.apply( _score_test , args = (regressor , df_train , df_test) , axis = 1)]\n , keys = ['Validation' , 'Test'] , axis = 1)\n return", "def test_zero_diff(self):\n diff = np.zeros_like(self.dm.D)\n self.assertEqual(0, self.dm._error(diff))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test check likelihood is mostly zero.
def test_likelihood_mostly_zero(self): likelihood = self.Result.Likelihood.nd_pdf self.assertTrue(np.sum(likelihood != 0) < 65)
[ "def test_likelihood_all_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.all(likelihood == 0))", "def perplexity(model, test_data):\n return np.exp(-model.mean_log_likelihood(test_data))", "def test_single_linear_regression_rmse(reg_model):\n assert(pytest.approx(reg_model.root_mean_squared_error(), 0.02) == 0.31)", "def testConvergence(self):\n synthetic_test = Synthetic()\n # Silence output of fit\n save_stdout = sys.stdout\n sys.stdout = open( os.devnull, 'w' )\n synthetic_test.fit(0.001, n_iters = 10**3)\n sys.stdout.close()\n sys.stdout = save_stdout\n # Discard burn in\n loss_storage = np.array( synthetic_test.lr.training_loss[1:] )\n loss_var = np.var( loss_storage )\n self.assertTrue( loss_var < 1 / float( synthetic_test.lr.N ) )", "def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))", "def test_basics(self):\n self.report('Testing adding data, evaluation and marginal likelihood.' +\n ' Probabilistic test, might fail.')\n num_coeffs_vals = [2, 1, 4, 5] * 5\n num_tests = 0\n num_successes = 0\n for dataset in self.datasets:\n for dist_type in self.dist_types:\n for kernel_type in self.kernel_types[dist_type]:\n curr_num_coeffs = num_coeffs_vals.pop(0)\n curr_gp = build_nngp_with_dataset(dataset, kernel_type, curr_num_coeffs,\n dist_type)\n # Predictions & Marginal likelihood\n curr_preds, _ = curr_gp.eval(dataset[2], 'std')\n curr_gp_err = compute_average_prediction_error(dataset, curr_preds)\n const_err = compute_average_prediction_error(dataset, dataset[1].mean())\n lml = curr_gp.compute_log_marginal_likelihood()\n is_success = curr_gp_err < const_err\n num_tests += 1\n num_successes += is_success\n self.report(('(%s, ntr=%d, nte=%d):: GP-lml=%0.4f, GP-err=%0.4f, ' +\n 'Const-err=%0.4f. succ=%d')%(dataset[-1][:5], len(dataset[0]),\n len(dataset[2]), lml, curr_gp_err, const_err, is_success),\n 'test_result')\n succ_frac = num_successes / float(num_tests)\n self.report('Summary: num_successes / num_floats = %d/%d = %0.4f'%(num_successes,\n num_tests, succ_frac), 'test_result')\n assert succ_frac > 0.5", "def test_multiscale_zero_parallel(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x,\n self.x,\n njobs=6))", "def test_single_linear_regression_fit(reg_model):\n assert(pytest.approx(reg_model.b1, 0.01) == 1.14)\n assert(pytest.approx(reg_model.b0, 0.01) == 0.43)", "def likelihood(self, x: np.ndarray) -> np.ndarray:", "def test_positive_pred(self,y):\n self.assertTrue((y>0).all())", "def test_model_performance(self):\n\t\tself.load_data()\n\t\tself.load_model()\n\t\tthreshold = 0.78 #0.78 to pass - change to 0.90 to deliberate fail test and therefore faild cloud build\n\t\tscore = self.model.score(self.X_test, self.y_test)\n\t\tis_above_threshold = True if score >= threshold else False\n\t\tassert is_above_threshold is True", "def likelihoods(self, step):", "def test_student_t_log_likelihood_single(self):\n model = pints.toy.ConstantModel(1)\n parameters = [0]\n times = np.asarray([1, 2, 3])\n model.simulate(parameters, times)\n values = np.asarray([1.0, -10.7, 15.5])\n problem = pints.SingleOutputProblem(model, times, values)\n log_likelihood = pints.StudentTLogLikelihood(problem)\n # Test Student-t_logpdf(values|mean=0, df = 3, scale = 10) = -11.74..\n self.assertAlmostEqual(log_likelihood([0, 3, 10]), -11.74010919785115)", "def test_lm(snps,pheno, covs=None, test='lrt',verbose=None):\n\tlm = test_lmm(snps=snps,pheno=pheno,K=None,covs=covs, test=test,verbose=verbose, NumIntervalsDelta0=100,NumIntervalsDeltaAlt=100,searchDelta=False)\n\treturn lm", "def test_ll_nom(self):\n pars = list(self.spec.central)\n nominal = self.spec(pars)\n self.spec.set_data(nominal) # nominal data\n stats = np.array(self.spec.stats)\n # event with nominal, ll penalty from poisson normalization\n ll = 0 # log likelihood\n ll += np.sum(logPoisson(nominal, nominal, stats))\n self.assertAlmostEqual(ll, self.spec.ll(pars))", "def evaluate(self, params: np.ndarray) -> float:\n kl = 0\n for sample in self.data:\n kl += np.log(self.vgbs.prob_sample(params, sample))\n return -kl / self.nr_samples", "def test_nLLeval_2(self):\n \n model = getLMM()\n model.setG(G0=self._G0, G1=self._G1 ,a2=self._a2)\n model.setX(self._X)\n model.sety(self._y)\n result = model.nLLeval(REML=True, delta=1.0)\n\n target_result = {'scale': 1.0, 'h2': 0.0, 'beta': NP.array([ 0.05863443]), 'a2': 0.4, 'REML': True, 'nLL': 90.940636012858121, 'sigma2': 0.96761436076968987}\n # make sure results are the same\n for key in result.keys():\n self.assertAlmostEqual(result[key], target_result[key])", "def dataLikelihood(self, step):", "def test_LinearRegression_err():\n np.random.seed(0)\n X = np.random.random((10, 1))\n y = np.random.random(10) + 1\n dy = 0.1\n\n y = np.random.normal(y, dy)\n\n clf1 = LinearRegression().fit(X, y, dy)\n clf2 = skLinearRegression().fit(X / dy, y / dy)\n\n assert_allclose(clf1.coef_[1:], clf2.coef_)\n assert_allclose(clf1.coef_[0], clf2.intercept_ * dy)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test check posterior is all zero.
def test_posterior_all_zero(self): posterior = self.Result.Posterior.nd_pdf self.assertTrue(np.all(posterior == 0))
[ "def test_positive_pred(self,y):\n self.assertTrue((y>0).all())", "def test_likelihood_all_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.all(likelihood == 0))", "def test_likelihood_mostly_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.sum(likelihood != 0) < 65)", "def test_parameter_estimates(self):\n DF_est = self.Result.Posterior.DF_estimates # DataFrame\n p0_est = DF_est.loc[\"p0\", \"Estimate\"]\n self.assertTrue(np.isclose(p0_est, self.expected_p0, atol=1))", "def test_post_mean():\n return abs(fit.mu.mean - mu_post)/fit.mu.se_mean < 3.", "def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))", "def test_multiscale_zero_parallel(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x,\n self.x,\n njobs=6))", "def perplexity(model, test_data):\n return np.exp(-model.mean_log_likelihood(test_data))", "def test_prediction(self): \n \n N = 100\n theta = np.random.normal(size = (N,))\n X = np.eye(N)\n dt = 0.1\n \n theta_0 = np.zeros(theta.shape)\n model = PPModel(X,coef = theta,dt = dt)\n Y = model.sampleEvents(theta)\n theta_MLE = model.fit(Y,theta_0).x\n Y_predicted = model.sampleEvents(theta_MLE)\n total = sum(Y+Y_predicted)\n if total != 0:\n error_rate = sum(np.abs(Y - Y_predicted)).astype('float64')/total\n else:\n error_rate = 0\n warnings.warn('No events observed.')\n \n tol = 1\n self.assertTrue(error_rate < tol)", "def test_prediction_with_negative_values(model_obj):\n X_test = [-2, -4, -6, -8]\n y_test = \"setosa\"\n y_preds = model_obj.classify(X_test)\n assert y_test == y_preds", "def zero_test():\n x, y , theta, t = simulate(Theta=0)\n if abs(x.max()) > 0 or abs(y.max()) > 0:\n\t\t print \"Error in the numerical scheme!\"\n else:\n\t\t print \"Theta = 0 and epsilon = 0 gives x = y = 0 for all times, as intended.\"", "def testConvergence(self):\n synthetic_test = Synthetic()\n # Silence output of fit\n save_stdout = sys.stdout\n sys.stdout = open( os.devnull, 'w' )\n synthetic_test.fit(0.001, n_iters = 10**3)\n sys.stdout.close()\n sys.stdout = save_stdout\n # Discard burn in\n loss_storage = np.array( synthetic_test.lr.training_loss[1:] )\n loss_var = np.var( loss_storage )\n self.assertTrue( loss_var < 1 / float( synthetic_test.lr.N ) )", "def _value_test(y, y_hat):\n \n zeros_test_array = y_hat >= 0\n ones_test_array = y_hat <= 1\n values_test_array = np.logical_or(zeros_test_array, ones_test_array)\n values_test = np.all(values_test_array)\n if not values_test:\n print(\"Values error for \\hat{y}: \", y_hat[~values_test])\n return False\n \n return True", "def test_predict(self):\n assert 2 == 2", "def result_has_ones(test):\n evecs = test.transform(test.trajs)\n if evecs[0][0, 0] > 0.0:\n sign = 1.0\n else:\n sign = -1.0\n for evec in evecs:\n assert np.allclose(sign * evec[:, 0], 1.0)", "def _normality_checking(series):\r\n\r\n JB_stat, p, _, __ = jarque_bera(series)\r\n\r\n print(\"\\n--------------------------------------------\\n\")\r\n print(\"Checking Normality of {}\".format(series.name))\r\n print(\"Test Statistic : %.2f, p value : %.5f\" % (JB_stat, p))\r\n\r\n alpha = 0.05\r\n\r\n if p > alpha:\r\n\r\n print(\"Data looks Gaussian: fail to reject the Null Hypothesis\")\r\n return False\r\n\r\n else:\r\n\r\n print(\"Data does not look Gaussian: we reject the Null Hypothesis\")\r\n return True", "def test_2D_fully_missing():\n\n # Define data\n N = 20\n D = 2\n U = np.random.uniform(size=(N, D))\n theta = np.vstack(np.array([1., 2.]))\n sigma = 0.1\n Y = U @ theta + sigma * np.vstack(np.random.randn(N))\n\n # Prior\n mu_z = np.array([0.])\n sigma_z = np.array([1.])\n\n # Initial guess\n theta_init = np.array([0.5, 0.5])\n\n # Create some missing data\n z_true = [U[2][0], U[2][1]]\n U[2][0] = np.nan\n U[2][1] = np.nan\n\n # Run linear regression\n lr = LinearReg_MissingData(U, Y, N, D, theta_init, mu_z, sigma_z, sigma)\n lr.train(Nitt=20)\n\n # Define the model\n def f(u, theta):\n return u @ theta\n\n # Define likelihood\n def likelihood(u, y, theta):\n p = norm(loc=y, scale=sigma)\n return p.pdf(f(u, theta))\n\n # Generate samples from the prior\n Ns = 10000\n prior_z = mvn(mean=np.repeat(mu_z, 2),\n cov=np.diag(np.repeat(sigma_z**2, 2)))\n prior_Z_samples = prior_z.rvs(Ns)\n\n # Importance sample from the z posterior\n w = np.zeros(Ns)\n for i in range(Ns):\n w[i] = likelihood(u=prior_Z_samples[i, :], y=Y[2], theta=lr.theta)\n wn = w / np.sum(w)\n\n # IS estimate of E[z] posterior mean\n EZ_IS = wn @ prior_Z_samples\n\n # IS estimate of E[z z^T]\n EZZ_IS = np.zeros([2, 2])\n for i in range(Ns):\n EZZ_IS += (np.array([prior_Z_samples[i, :]]).T @\n np.array([prior_Z_samples[i, :]]) * wn[i])\n\n # IS estimate of posterior covariance matrix\n prior_Z_samples -= EZ_IS # Remove mean\n COV_Z_IS = np.zeros([2, 2])\n for i in range(Ns):\n COV_Z_IS += (np.array([prior_Z_samples[i, :]]).T @\n np.array([prior_Z_samples[i, :]]) * wn[i])\n\n # Posterior over z from code\n posterior_z = lr.posterior_z[2]\n\n # Check that the importance sampled mean is close to closed-form\n # expression for the mean\n assert np.allclose(EZ_IS, posterior_z.mean, atol=0.1)\n\n # Check that the importance sampled estimate of z z^T is close to\n # closed-form expression\n assert np.allclose(EZZ_IS, lr.EUU[2], atol=0.2)\n\n # Check that the importance sampled covariance matrix is close to\n # closed-form expression for the covariance matrix\n assert np.allclose(COV_Z_IS, posterior_z.cov, atol=0.2)", "def test(self , regressor , df_train , df_test):\n if self.summary_ is None : \n self.summary_ = self.selections_.apply( _score_test \n , args = (regressor , df_train , df_test) \n , axis = 1)\n else : \n self.summary_ = pd.concat([self.summary_ , self.selections_.apply( _score_test , args = (regressor , df_train , df_test) , axis = 1)]\n , keys = ['Validation' , 'Test'] , axis = 1)\n return", "def test_zero_diff(self):\n diff = np.zeros_like(self.dm.D)\n self.assertEqual(0, self.dm._error(diff))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the marginalised 1D pdfs are as expected
def test_marginalised_1D_pdf(self): m_1D = self.NB_nd_pdf_1.marginalised_1D self.assertEqual(len(m_1D), 2) # Scale the pdfs to compare despite the m_1D PDFs being normalised m_1D["log U"] /= m_1D["log U"].max() m_1D["12 + log O/H"] /= m_1D["12 + log O/H"].max() expected_x_pdf = self.marginalised_x / self.marginalised_x.max() expected_y_pdf = self.marginalised_y / self.marginalised_y.max() self.assertTrue(np.allclose(m_1D["log U"], expected_x_pdf, atol=1e-12, rtol=0)) self.assertTrue(np.allclose(m_1D["12 + log O/H"], expected_y_pdf, atol=1e-12, rtol=0)) # May have swapped x and y, but it's all symmetric anyway...
[ "def test_compute_pdf_matrix(self):\n pdf_matrix = self.cluster_obj_2.compute_pdf_matrix()\n self.assertEqual(round(pdf_matrix[0,0], 3), 0.044)\n self.assertEqual(round(pdf_matrix[0,1], 3), 0.038)", "def test_posterior_all_zero(self):\n posterior = self.Result.Posterior.nd_pdf\n self.assertTrue(np.all(posterior == 0))", "def test_compute_pdf(self):\n pdf = self.cluster_obj_1.compute_pdf(self.X[0], 1)\n self.assertEqual(round(pdf, 3), 0.038)", "def test_nd_pdf(self):\n pdf = self.NB_nd_pdf_1.nd_pdf\n scaled_raw_nd_pdf = self.raw_pdf / self.raw_pdf.max()\n self.assertTrue(np.array_equal(pdf / pdf.max(), scaled_raw_nd_pdf))", "def test_equal_apportionment_zero_children(self):\r\n\r\n self.assertTrue(self.DUT.equal_apportionment(0, 0.95))", "def test_norm_log_pdf_id_cov_sanity():\n x = np.array([4., 5.])\n mean = np.array([3., 2.])\n cov_scale = 5.0\n nose.tools.assert_almost_equals(\n norm_log_pdf(x, mean, cov_scale * np.eye(2)),\n norm_log_pdf_id_cov(x, mean, cov_scale), 10)", "def tests_compute_posterior(self):\n pdf_matrix = self.cluster_obj_3.compute_pdf_matrix()\n posterior_matrix = self.cluster_obj_3.compute_posterior(pdf_matrix)\n self.assertEqual(round(posterior_matrix[0,0], 2), 0.37)\n self.assertEqual(round(posterior_matrix[0,1], 2), 0.63)", "def test_memory_level_1(self):\n memory = np.array([[1.0j, 1.0, 0.5 + 0.5j], [0.5 + 0.5j, 1.0, 1.0j]], dtype=complex)\n result = marginal_memory(memory, [0, 2])\n expected = np.array([[1.0j, 0.5 + 0.5j], [0.5 + 0.5j, 1.0j]], dtype=complex)\n np.testing.assert_array_equal(result, expected)", "def test_likelihood_all_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.all(likelihood == 0))", "def test_likelihood_mostly_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.sum(likelihood != 0) < 65)", "def test_norm_pdf():\n # Note: Neither my code nor theirs check that cov is symmetric, and they\n # will give different results if cov is asymmetric.\n x = np.array([4., 5.])\n mean = np.array([3., 2.])\n cov = np.array([[3., 2.], [2., 4.]])\n ref_p = 0.017161310176083477\n # nose.tools.assert_almost_equals(\n # scipy.stats.multivariate_normal.pdf(x, mean, cov), ref_p, 10)\n nose.tools.assert_almost_equals(\n norm_pdf(x, mean, cov), ref_p, 10)", "def test_marginalize_memory(self):\n memory = [hex(ii) for ii in range(8)]\n res = marginal_memory(memory, indices=[0])\n self.assertEqual(res, [bin(ii % 2)[2:] for ii in range(8)])", "def test_equal_apportionment_zero_goal(self):\r\n\r\n self.assertTrue(self.DUT.equal_apportionment(5, 0.0))", "def test_Bordoloi_pdf():\n\n #make a normalised pdf\n h = np.histogram(np.random.normal(size=5e6)*0.1 + 2, bins=800)\n dist = h[0]\n bn = h[1][1:] - (h[1][1] - h[1][0]) / 2.0\n pdf = pval.normalisepdfs(dist, bn)\n\n ngals = 1e5\n pdfs = np.tile(pdf, [ngals, 1])\n\n specz = np.random.normal(size=ngals)*0.1 + 2\n\n gini = pval.Bordoloi_pdf_test(pdfs, bn, specz)\n\n print gini\n np.testing.assert_almost_equal(gini, 0, decimal=1)", "def marginal_stdevs(self):\n pass", "def test_margin_boxes():\r\n _ = as_pixel(b'\\xff\\xff\\xff\\xff') # white\r\n R = as_pixel(b'\\xff\\x00\\x00\\xff') # red\r\n G = as_pixel(b'\\x00\\xff\\x00\\xff') # green\r\n B = as_pixel(b'\\x00\\x00\\xff\\xff') # blue\r\n g = as_pixel(b'\\x00\\x80\\x00\\xff') # half green\r\n b = as_pixel(b'\\x00\\x00\\x80\\xff') # half blue\r\n assert_pixels('margin_boxes', 15, 15, [\r\n _+_+_+_+_+_+_+_+_+_+_+_+_+_+_,\r\n _+G+G+G+_+_+_+_+_+_+B+B+B+B+_,\r\n _+G+G+G+_+_+_+_+_+_+B+B+B+B+_,\r\n _+_+_+_+_+_+_+_+_+_+_+_+_+_+_,\r\n _+_+_+_+_+R+R+R+R+_+_+_+_+_+_,\r\n _+_+_+_+_+R+R+R+R+_+_+_+_+_+_,\r\n _+_+_+_+_+R+R+R+R+_+_+_+_+_+_,\r\n _+_+_+_+_+R+R+R+R+_+_+_+_+_+_,\r\n _+_+_+_+_+_+_+_+_+_+_+_+_+_+_,\r\n _+b+b+b+_+_+_+_+_+_+g+g+g+g+_,\r\n _+b+b+b+_+_+_+_+_+_+g+g+g+g+_,\r\n _+b+b+b+_+_+_+_+_+_+g+g+g+g+_,\r\n _+b+b+b+_+_+_+_+_+_+g+g+g+g+_,\r\n _+b+b+b+_+_+_+_+_+_+g+g+g+g+_,\r\n _+_+_+_+_+_+_+_+_+_+_+_+_+_+_,\r\n ], '''\r\n <style>\r\n html { height: 100% }\r\n body { background: #f00; height: 100% }\r\n @page {\r\n size: 15px;\r\n margin: 4px 6px 7px 5px;\r\n background: white;\r\n\r\n @top-left-corner {\r\n margin: 1px;\r\n content: \" \";\r\n background: #0f0;\r\n }\r\n @top-right-corner {\r\n margin: 1px;\r\n content: \" \";\r\n background: #00f;\r\n }\r\n @bottom-right-corner {\r\n margin: 1px;\r\n content: \" \";\r\n background: #008000;\r\n }\r\n @bottom-left-corner {\r\n margin: 1px;\r\n content: \" \";\r\n background: #000080;\r\n }\r\n }\r\n </style>\r\n <body>\r\n ''')", "def save_to_pdf(data1, sources1, data2, sources2, dirname=None):\n\n ncolors = np.max(data2) + 1\n prng = np.random.RandomState(1234)\n h = prng.uniform(low=0.0, high=1.0, size=ncolors)\n s = prng.uniform(low=0.2, high=0.7, size=ncolors)\n v = prng.uniform(low=0.5, high=1.0, size=ncolors)\n hsv = np.dstack((h, s, v))\n\n rgb = np.squeeze(colors.hsv_to_rgb(hsv))\n rgb[0] = (0, 0, 0)\n cmap = colors.ListedColormap(rgb)\n\n mk_patch = lambda xy, r, c, lw, fill: patch.Circle(xy=xy,\n radius=r,\n color=c,\n fill=fill,\n lw=lw)\n norm = ImageNormalize(data1,\n stretch=LogStretch(),\n vmin=0,\n vmax=1000)\n # interval=ZScaleInterval())\n\n outname = 'centroid_comparison.pdf'\n print('Total number of sources {}'.format(len(sources1)))\n with PdfPages(outname) as pdf:\n num_pages = int(np.ceil(len(sources1) / 16))\n # num_pages = 20\n start_idx = 0\n for i in range(num_pages):\n start_idx += 8\n sources1_to_plot = sources1[start_idx: start_idx + 8]\n sources2_to_plot = sources2[start_idx: start_idx + 8]\n # Initalize the plot, each axes list will contain 8 plots\n # The plots should be organize by columns, i.e. two plots in\n # the same column correspond to the same image\n fig, axes00, axes10 = mk_grid()\n\n\n\n # i will run from 0 to 15 (i.e. 16 elements)\n for j in range(len(sources1_to_plot)):\n cr1 = sources1_to_plot[j]\n cr2 = sources2_to_plot[j]\n # limits for the first star\n # print(j)\n if j < 4:\n # print(j%4)\n ax1 = axes00[j]\n else:\n # print(j%4)\n ax1 = axes10[j % 4]\n\n cutout_size = 40\n # Limits for the first plot\n xlimits1 = cr1['xcenter'] - cutout_size / 2, \\\n cr1['xcenter'] + cutout_size / 2\n ylimits1 = cr1['ycenter'] - cutout_size / 2, \\\n cr1['ycenter'] + cutout_size / 2\n\n if j < 4:\n # print(j%4 + 4)\n ax2 = axes00[j % 4 + 4]\n else:\n # print(j%4 + 4)\n ax2 = axes10[j % 4 + 4]\n\n # Limits for the second plot\n xlimits2 = cr2['xcenter'] - cutout_size/2, \\\n cr2['xcenter'] + cutout_size/2\n ylimits2 = cr2['ycenter'] - cutout_size/2, \\\n cr2['ycenter'] + cutout_size/2\n\n\n\n im1 = ax1.imshow(data1, norm=norm, cmap='gray', origin='lower')\n im2 = ax2.imshow(data2, cmap=cmap, origin='lower')\n\n # Add axes for color bar to show scale\n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"8%\", pad=0.05)\n\n\n cbar = fig.colorbar(im1, cax=cax1)\n n = len(cbar.ax.get_yticklabels())\n\n labels_to_hide = [n-3, n - 2]\n loop = zip(cbar.ax.get_yticklabels(),\n cbar.ax.yaxis.get_major_ticks())\n for i, (label, tick) in enumerate(loop):\n if i in labels_to_hide:\n label.set_visible(False)\n tick.set_visible(False)\n\n cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(),\n rotation=10,\n horizontalalignment='left',\n verticalalignment='center',\n fontsize=5\n )\n cbar.update_ticks()\n\n\n\n flux_max_patch = mk_patch((cr1['xmax'], cr1['ymax']),\n r=0.5,\n c='magenta',\n lw=1,\n fill=True)\n ax1.add_patch(flux_max_patch)\n\n flux_center_patch = mk_patch((cr1['xcenter'], cr1['ycenter']),\n r=0.5,\n c='red',\n lw=1.,\n fill=True)\n ax1.add_patch(flux_center_patch)\n geo_center_patch = mk_patch((cr2['xcenter'], cr2['ycenter']),\n r=0.5,\n c='blue',\n lw=1.,\n fill=True)\n\n ax1.add_patch(geo_center_patch)\n\n current_label_patch = mk_patch((cr2['xcenter'], cr2['ycenter']),\n r=4,\n c='white',\n lw=1.5,\n fill=False)\n\n\n ax2.add_patch(current_label_patch)\n\n ax1.set_title('Red: flux-weight\\n '\n 'Blue: uniform-weight \\n'\n 'Magneta: max value',\n fontsize='medium')\n ax2.set_title('Label', fontsize='medium')\n\n # Set the plot limits for star 1\n ax1.set_xlim(xlimits1[0], xlimits1[1])\n ax1.set_ylim(ylimits1[0], ylimits1[1])\n\n # Set the plot limits for star 2\n ax2.set_xlim(xlimits2[0], xlimits2[1])\n ax2.set_ylim(ylimits2[0], ylimits2[1])\n\n ax1.grid(False)\n ax2.grid(False)\n # add colorbar to outer grid\n\n pdf.savefig(fig)\n plt.close()", "def test_memory_level_0(self):\n memory = np.asarray(\n [\n # qubit 0 qubit 1 qubit 2\n [\n [-12974255.0, -28106672.0],\n [15848939.0, -53271096.0],\n [-18731048.0, -56490604.0],\n ], # shot 1\n [\n [-18346508.0, -26587824.0],\n [-12065728.0, -44948360.0],\n [14035275.0, -65373000.0],\n ], # shot 2\n [\n [12802274.0, -20436864.0],\n [-15967512.0, -37575556.0],\n [15201290.0, -65182832.0],\n ], # ...\n [[-9187660.0, -22197716.0], [-17028016.0, -49578552.0], [13526576.0, -61017756.0]],\n [[7006214.0, -32555228.0], [16144743.0, -33563124.0], [-23524160.0, -66919196.0]],\n ],\n dtype=complex,\n )\n result = marginal_memory(memory, [0, 2])\n expected = np.asarray(\n [\n [[-12974255.0, -28106672.0], [-18731048.0, -56490604.0]], # shot 1\n [[-18346508.0, -26587824.0], [14035275.0, -65373000.0]], # shot 2\n [[12802274.0, -20436864.0], [15201290.0, -65182832.0]], # ...\n [[-9187660.0, -22197716.0], [13526576.0, -61017756.0]],\n [[7006214.0, -32555228.0], [-23524160.0, -66919196.0]],\n ],\n dtype=complex,\n )\n np.testing.assert_array_equal(result, expected)", "def test_get_proportions_data():\n test_data = {'YEAR': [2012, 2012, 2012, 2012, 2013, 2013, 2013, 2013,\n 2014, 2014, 2014, 2014, 2015, 2015, 2015, 2015,\n 2016, 2016, 2016, 2016, 2017, 2017, 2017, 2017,\n 2018, 2018, 2018, 2018],\n 'TREAT_EARLY': [1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1,\n 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0],\n 'TREAT_LATE': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0,\n 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],\n 'GOOD_GENHLTH': [0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1,\n 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1],\n 'PHYS_DISTRESS': [0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,\n 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1],\n 'MENT_DISTRESS': [1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1,\n 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],\n 'POOR_OVR_HLTH': [0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0,\n 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1],\n 'HLTHPLN': [0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0],\n 'HAS_PERSDOC': [1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1,\n 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1],\n 'MEDCOST': [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1,\n 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],\n 'ANNUAL_CHECKUP': [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1,\n 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0]}\n\n expected_data = {'YEAR': [2012, 2012, 2012, 2012, 2013, 2013, 2013, 2013,\n 2014, 2014, 2014, 2014, 2015, 2015, 2015, 2015,\n 2016, 2016, 2016, 2016, 2017, 2017, 2017, 2017,\n 2018, 2018, 2018, 2018],\n 'TREAT_EARLY': [1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0],\n 'TREAT_LATE': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1,\n 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],\n 'GOOD_GENHLTH': [0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,\n 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0,\n 0, 1],\n 'PHYS_DISTRESS': [0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0,\n 0, 1],\n 'MENT_DISTRESS': [1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,\n 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1,\n 0, 0],\n 'POOR_OVR_HLTH': [0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1,\n 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n 0, 1],\n 'HLTHPLN': [0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1,\n 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0],\n 'HAS_PERSDOC': [1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1],\n 'MEDCOST': [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0,\n 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],\n 'ANNUAL_CHECKUP': [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0,\n 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0,\n 1, 0],\n 'PROP_GOOD_GENHLTH': [1/3, 1/1, 1/3, 1/3, 0/2, 1/1, 1/1,\n 0/2, 1/2, 1/2, 0/1, 1/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 1/1, 1/2,\n 1/2, 1/2, 1/2, 1/2, 1/2, 0/1, 1/1],\n 'PROP_PHYS_DISTRESS': [1/3, 0/1, 1/3, 1/3, 1/2, 0/1, 0/1,\n 1/2, 1/2, 1/2, 0/1, 1/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 0/1, 2/2,\n 1/2, 1/2, 2/2, 0/2, 0/2, 0/1, 1/1],\n 'PROP_MENT_DISTRESS': [2/3, 0/1, 2/3, 2/3, 0/2, 1/1, 1/1,\n 0/2, 0/2, 0/2, 1/1, 0/1, 2/2, 2/2,\n 2/2, 2/2, 0/1, 0/2, 0/2, 1/1, 2/2,\n 0/2, 0/2, 2/2, 2/2, 2/2, 0/1, 0/1],\n 'PROP_POOR_OVR_HLTH': [2/3, 1/1, 2/3, 2/3, 1/2, 0/1, 0/1,\n 1/2, 1/2, 1/2, 0/1, 0/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 0/1, 1/2,\n 1/2, 1/2, 1/2, 1/2, 1/2, 0/1, 1/1],\n 'PROP_HLTHPLN': [2/3, 0/1, 2/3, 2/3, 2/2, 1/1, 0/1, 2/2,\n 1/2, 1/2, 1/1, 1/1, 1/2, 1/2, 1/2, 1/2,\n 1/1, 1/2, 1/2, 0/1, 0/2, 2/2, 2/2, 0/2,\n 2/2, 2/2, 0/1, 0/1],\n 'PROP_HAS_PERSDOC': [3/3, 0/1, 3/3, 3/3, 2/2, 1/1, 0/1,\n 2/2, 1/2, 1/2, 1/1, 1/1, 2/2, 1/2,\n 2/2, 1/2, 0/1, 1/2, 1/2, 1/1, 0/2,\n 2/2, 2/2, 0/2, 0/2, 0/2, 1/1, 1/1],\n 'PROP_MEDCOST': [2/3, 1/1, 2/3, 2/3, 1/2, 0/1, 1/1, 1/2,\n 1/2, 1/2, 0/1, 0/1, 1/2, 2/2, 1/2, 2/2,\n 1/1, 0/2, 0/2, 1/1, 1/2, 1/2, 1/2, 1/2,\n 2/2, 2/2, 0/1, 0/1],\n 'PROP_ANNUAL_CHECKUP': [1/3, 0/1, 1/3, 1/3, 1/2, 1/1,\n 0/1, 1/2, 1/2, 1/2, 1/1, 0/1,\n 0/2, 2/2, 0/2, 2/2, 1/1, 0/2,\n 0/2, 2/2, 1/2, 1/2, 1/2, 1/2,\n 1/2, 1/2, 1/1, 0/1]}\n\n test_df = pd.DataFrame(test_data)\n expected_df = pd.DataFrame(expected_data)\n actual_df = get_proportions_data(test_df)\n actual_df = actual_df.drop(columns=['TREAT', 'ALL_PROP_GOOD_GENHLTH',\n 'ALL_PROP_PHYS_DISTRESS',\n 'ALL_PROP_MENT_DISTRESS',\n 'ALL_PROP_POOR_OVR_HLTH',\n 'ALL_PROP_HLTHPLN',\n 'ALL_PROP_HAS_PERSDOC',\n 'ALL_PROP_MEDCOST',\n 'ALL_PROP_ANNUAL_CHECKUP'])\n assert_frame_equal(expected_df, actual_df)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the normalised nd_pdf matches the input raw nd_pdf. We avoid doing a proper normalisation by comparing with a simple scaling.
def test_nd_pdf(self): pdf = self.NB_nd_pdf_1.nd_pdf scaled_raw_nd_pdf = self.raw_pdf / self.raw_pdf.max() self.assertTrue(np.array_equal(pdf / pdf.max(), scaled_raw_nd_pdf))
[ "def test_norm_pdf():\n # Note: Neither my code nor theirs check that cov is symmetric, and they\n # will give different results if cov is asymmetric.\n x = np.array([4., 5.])\n mean = np.array([3., 2.])\n cov = np.array([[3., 2.], [2., 4.]])\n ref_p = 0.017161310176083477\n # nose.tools.assert_almost_equals(\n # scipy.stats.multivariate_normal.pdf(x, mean, cov), ref_p, 10)\n nose.tools.assert_almost_equals(\n norm_pdf(x, mean, cov), ref_p, 10)", "def test_norm_log_pdf_sanity():\n # Note: Neither my code nor theirs check that cov is symmetric, and they\n # will give different results if cov is asymmetric.\n x = np.array([4., 5.])\n mean = np.array([3., 2.])\n cov = np.array([[3., 2.], [2., 4.]])\n ref_logp = -4.0650978372492634\n # nose.tools.assert_almost_equals(\n # scipy.stats.multivariate_normal.logpdf(x, mean, cov), ref_logp, 10)\n nose.tools.assert_almost_equals(\n norm_log_pdf(x, mean, cov), ref_logp, 10)", "def test_marginalised_1D_pdf(self):\n m_1D = self.NB_nd_pdf_1.marginalised_1D\n self.assertEqual(len(m_1D), 2)\n # Scale the pdfs to compare despite the m_1D PDFs being normalised\n m_1D[\"log U\"] /= m_1D[\"log U\"].max()\n m_1D[\"12 + log O/H\"] /= m_1D[\"12 + log O/H\"].max()\n expected_x_pdf = self.marginalised_x / self.marginalised_x.max()\n expected_y_pdf = self.marginalised_y / self.marginalised_y.max()\n self.assertTrue(np.allclose(m_1D[\"log U\"], expected_x_pdf,\n atol=1e-12, rtol=0))\n self.assertTrue(np.allclose(m_1D[\"12 + log O/H\"], expected_y_pdf,\n atol=1e-12, rtol=0))\n # May have swapped x and y, but it's all symmetric anyway...", "def test_norm_log_pdf_id_cov_sanity():\n x = np.array([4., 5.])\n mean = np.array([3., 2.])\n cov_scale = 5.0\n nose.tools.assert_almost_equals(\n norm_log_pdf(x, mean, cov_scale * np.eye(2)),\n norm_log_pdf_id_cov(x, mean, cov_scale), 10)", "def normalize(self):\n self._pdf / self.norm", "def test_compute_pdf(self):\n pdf = self.cluster_obj_1.compute_pdf(self.X[0], 1)\n self.assertEqual(round(pdf, 3), 0.038)", "def test_compute_pdf_matrix(self):\n pdf_matrix = self.cluster_obj_2.compute_pdf_matrix()\n self.assertEqual(round(pdf_matrix[0,0], 3), 0.044)\n self.assertEqual(round(pdf_matrix[0,1], 3), 0.038)", "def test(x_norm, x_unnorm):\n # NOTE: closes over x_np & x_norm_correct_np\n assert x_norm.dtype == x_norm_correct_np.dtype\n assert x_unnorm.dtype == x_np.dtype\n assert np.allclose(x_norm, x_norm_correct_np)\n assert not np.allclose(x_norm, x_np)\n assert np.all(np.max(x_norm, axis=(0,1)) > 1)\n assert np.all(np.max(x_norm, axis=(0,1)) < 255 - means)\n assert np.all(np.min(x_norm, axis=(0,1)) < 0)\n assert np.all(np.min(x_norm, axis=(0,1)) > 0 - means)\n assert np.allclose(x_unnorm, x_np, rtol=1e-4, atol=1e-7)", "def test_Bordoloi_pdf():\n\n #make a normalised pdf\n h = np.histogram(np.random.normal(size=5e6)*0.1 + 2, bins=800)\n dist = h[0]\n bn = h[1][1:] - (h[1][1] - h[1][0]) / 2.0\n pdf = pval.normalisepdfs(dist, bn)\n\n ngals = 1e5\n pdfs = np.tile(pdf, [ngals, 1])\n\n specz = np.random.normal(size=ngals)*0.1 + 2\n\n gini = pval.Bordoloi_pdf_test(pdfs, bn, specz)\n\n print gini\n np.testing.assert_almost_equal(gini, 0, decimal=1)", "def test_posterior_all_zero(self):\n posterior = self.Result.Posterior.nd_pdf\n self.assertTrue(np.all(posterior == 0))", "def ensure_normalization(dist):\n for key in dist.keys():\n prob_sum = sum(map(lambda x : np.power(2.0, x), dist[key]._data.tolist()))\n residual = abs(prob_sum - 1.0)\n if (residual > RENORMALIZATION_THRESH):\n pdb.set_trace()\n if (residual > ZERO_THRESH):\n log2_norm_factor = -np.log2(prob_sum)\n for samp in dist[key]._samples:\n dist[key].update(samp, dist[key].logprob(samp) + log2_norm_factor, True)", "def _normalize_distribution(x_plot, distribution, kde):\n pdf = distribution.prob(x_plot)\n highest_pdf = np.max(pdf)\n mean = distribution.mean().numpy().reshape(1, 1)\n kde_height = np.exp(kde.score_samples(mean))\n normalized_pdf = pdf * kde_height / highest_pdf\n return normalized_pdf", "def reset_pdf(self, nbin, normalize=False):\n self.__dist_func.reset_pdf(nbin, normalize)\n self.pdf = self.__dist_func.pdf", "def _alpha_two_nlls_match_a_normal_distribution(self, float_dtype):\n x = np.linspace(-10., 10, 1000, dtype=float_dtype)\n scale = float_dtype(1.7)\n with tf.Session():\n nll = distribution.nllfun(x, float_dtype(2.), scale).eval()\n nll_true = -scipy.stats.norm(0., scale).logpdf(x)\n self.assertAllClose(nll, nll_true)", "def test(x_norm, x_unnorm):\n # NOTE: closes over x_np & x_norm_correct_np\n assert x_norm.dtype == x_norm_correct_np.dtype\n assert x_unnorm.dtype == x_np.dtype\n assert np.allclose(x_norm, x_norm_correct_np)\n assert not np.allclose(x_norm, x_np)\n assert np.all(np.max(x_norm, axis=(0,1)) <= 1)\n assert np.all(np.max(x_norm, axis=(0,1)) > 0)\n assert np.all(np.min(x_norm, axis=(0,1)) >= -1)\n assert np.all(np.min(x_norm, axis=(0,1)) < 0)\n assert np.allclose(x_unnorm, x_np, rtol=1e-4, atol=1e-7)", "def evaluate_pdf(self, value, norm=True):\n if norm:\n out = (self._numeric_pdf(value, *self.params.values()) /\n self.norm_constant)\n else:\n out = self._numeric_pdf(value, *self.params.values())\n return out", "def _alpha_two_samples_match_a_normal_distribution(self, float_dtype):\n num_samples = 16384\n scale = float_dtype(1.7)\n with tf.Session():\n samples = distribution.draw_samples(\n 2. * np.ones(num_samples, dtype=float_dtype),\n scale * np.ones(num_samples, dtype=float_dtype)).eval()\n # Perform the Kolmogorov-Smirnov test against a normal distribution.\n ks_statistic = scipy.stats.kstest(samples, 'norm', (0., scale)).statistic\n self.assertLess(ks_statistic, 0.01)", "def GetNormalizeAcrossScale(self) -> \"bool\":\n return _itkDiscreteGaussianDerivativeImageFilterPython.itkDiscreteGaussianDerivativeImageFilterISS2ISS2_GetNormalizeAcrossScale(self)", "def test_likelihood_all_zero(self):\n likelihood = self.Result.Likelihood.nd_pdf\n self.assertTrue(np.all(likelihood == 0))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check propagate_dered_errors values on Result object
def test_propagate_dered_errors(self): # Checks default value of False self.assertFalse(self.Result_dered1.propagate_dered_errors) self.assertTrue(self.Result_dered2.propagate_dered_errors)
[ "def test_dereddening_result_attributes(self):\n self.assertTrue(self.Result.deredden)\n self.assertTrue(self.Result.propagate_dered_errors)", "def _check_reproduced_expected_errors(self):\n if self.__reproduced_expected_errors:\n pytest.xfail(reason=self._get_reproduced_expected_errors_msg())", "def test_toResultError(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('a', '1', 'xxxx', 'val', 'hash'),\n ])\n r = w.toResultError('the err')\n self.assertEqual(r, ResultError('bob', 'a', '1', 'xxxx', 'the err', [\n ('a', '1', 'xxxx', 'hash'),\n ]))", "def test_no_flow_receivers():\n\n # instantiate a model grid, do not run flow accumulation on it\n\n mg = RasterModelGrid((30, 70))\n\n # test that the flow distance utility will fail because of a ValueError\n\n with pytest.raises(FieldError):\n calculate_flow__distance(mg)", "def test_error(self):\n simple = SillyResolverSimple()\n complex = SimpleResolverComplexifier(simple)\n receiver = ResultHolder(self)\n self.assertEqual(receiver._started, False)\n complex.resolveHostName(receiver, u\"example.com\")\n self.assertEqual(receiver._started, True)\n self.assertEqual(receiver._ended, False)\n self.assertEqual(receiver._addresses, [])\n simple._requests[0].errback(ZeroDivisionError(\"zow\"))\n self.assertEqual(len(self.flushLoggedErrors(ZeroDivisionError)), 1)\n self.assertEqual(receiver._ended, True)\n self.assertEqual(receiver._addresses, [])", "def test__get_transformers_raise_valueerror(self):\n # Run\n dtypes = {\n 'void': 'void'\n }\n with pytest.raises(ValueError):\n Metadata._get_transformers(dtypes, None)", "def test_perform_error():\n intent = Error(ValueError(\"foo\"))\n with raises(ValueError):\n sync_perform(TypeDispatcher({Error: perform_error}), Effect(intent))", "def test_failure(self):\n simple = SillyResolverSimple()\n complex = SimpleResolverComplexifier(simple)\n receiver = ResultHolder(self)\n self.assertEqual(receiver._started, False)\n complex.resolveHostName(receiver, u\"example.com\")\n self.assertEqual(receiver._started, True)\n self.assertEqual(receiver._ended, False)\n self.assertEqual(receiver._addresses, [])\n simple._requests[0].errback(DNSLookupError(\"nope\"))\n self.assertEqual(receiver._ended, True)\n self.assertEqual(receiver._addresses, [])", "def _parse_result(self, result):\n if result is not True:\n for section, errors in result.iteritems():\n for key, value in errors.iteritems():\n if value is not True:\n message = (\n '\"{0}\" option in [{1}] is invalid value. {2}'\n ''.format(key, section, value)\n )\n print(message)\n\n err_message = (\n 'Some options are invalid!!! Please see the log!!!'\n )\n raise validate.ValidateError(err_message)\n\n else:\n return True", "def _check_not_reproduced_expected_errors(self):\n if self.__not_reproduced_expected_errors:\n pytest.fail(msg=self._get_not_reproduced_expected_errors_msg() +\n self._get_reproduced_expected_errors_msg())", "def _get_query_edge_rx_errors(self):\n return self.__query_edge_rx_errors", "def any_build_failures(self):", "def check_result(res, msg=None):\n if not res.status:\n return\n\n # If there was an error, it should be the last operation.\n if res.resarray:\n resop = res.resarray[-1].resop\n else:\n resop = None\n raise BadCompoundRes(resop, res.status, msg)", "def report_and_learn_from_error(self):\n self._n.report_error(self._err)", "def __call__(self, *args, **kwargs):\n return self.error(*args, **kwargs)", "def pass_fail_check(self, test_result_dict):\n avg_throughput = test_result_dict['iperf_results']['avg_throughput']\n min_throughput = test_result_dict['iperf_results']['min_throughput']\n std_dev_percent = (\n test_result_dict['iperf_results']['std_deviation'] /\n test_result_dict['iperf_results']['avg_throughput']) * 100\n # Set blackbox metrics\n if self.publish_testcase_metrics:\n self.testcase_metric_logger.add_metric('avg_throughput',\n avg_throughput)\n self.testcase_metric_logger.add_metric('min_throughput',\n min_throughput)\n self.testcase_metric_logger.add_metric('std_dev_percent',\n std_dev_percent)\n # Evaluate pass/fail\n min_throughput_check = (\n (min_throughput / avg_throughput) *\n 100) > self.testclass_params['min_throughput_threshold']\n std_deviation_check = std_dev_percent < self.testclass_params[\n 'std_deviation_threshold']\n\n test_message = (\n 'Atten: {0:.2f}dB, RSSI: {1:.2f}dB. '\n 'Throughput (Mean: {2:.2f}, Std. Dev:{3:.2f}%, Min: {4:.2f} Mbps).'\n 'LLStats : {5}'.format(test_result_dict['attenuation'],\n test_result_dict['rssi'], avg_throughput,\n std_dev_percent, min_throughput,\n test_result_dict['llstats']))\n if min_throughput_check and std_deviation_check:\n asserts.explicit_pass('Test Passed.' + test_message)\n asserts.fail('Test Failed. ' + test_message)", "def failed(self):\n return not self.succeeded", "def validate_result(result, request_id):\n\n if VER_MODE[0] == config[VER_STRATEGY]:\n if result == result_map[request_id][0]:\n logging.info(\"PASS: Result from replica matches self \" +\n \"computed result- { RequestId: %s Result: %s }\",\n request_id, result, extra=logger.NODE_INFO)\n return True\n else:\n logging.info(\"FAIL: Result from replica matches self \" +\n \"computed result- { RequestId: %s Result: %s }\",\n request_id, result, extra=logger.NODE_INFO)\n return False", "def errors(self):\n return numpy.abs(self.expected - self.result)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test lines not included in likelihood calculation should still appear in the "best model" table.
def test_non_likelihood_lines_in_best_model_table(self): self.assertTrue(all(l in self.DF_best.index for l in self.exclude_lines))
[ "def test_In_lhood_field_in_best_model_table(self):\n correct = [(\"N\" if l in self.exclude_lines else \"Y\") for l in self.lines]\n self.assertTrue(self.DF_best[\"In_lhood?\"].values.tolist() == correct)", "def test_single_linear_regression_fit(reg_model):\n assert(pytest.approx(reg_model.b1, 0.01) == 1.14)\n assert(pytest.approx(reg_model.b0, 0.01) == 0.43)", "def test_single_linear_regression_rmse(reg_model):\n assert(pytest.approx(reg_model.root_mean_squared_error(), 0.02) == 0.31)", "def test_regression(self):\r\n #print(sys.path)\r\n stock_master = read_csv(\"regression_test_stock_master.csv\")\r\n rows, fieldnames = commodity_matcher.add_commodities_to_stocks(stock_master[:100])\r\n all_matched = True\r\n for i, row in enumerate(rows):\r\n if not row['Commodity'] == stock_master[i]['Commodity']:\r\n print(\"Did not match, row \"+str(i)+\", \"+row['text'])\r\n print(\"New: '\"+row['Commodity']+\"' vs. Original: '\"+stock_master[i]['Commodity']+\"'\")\r\n all_matched = False\r\n assert(len(rows) == 100)\r\n assert(sorted(rows[0].keys()) == sorted(fieldnames))\r\n assert(sorted(fieldnames) == sorted(stock_master[0].keys()))\r\n if not all_matched:\r\n print(\"WARNING: all of the rows did not match. If the above results look fine, you may want to update regression_test_stock_master.csv.\")", "def test_LinearRegression_err():\n np.random.seed(0)\n X = np.random.random((10, 1))\n y = np.random.random(10) + 1\n dy = 0.1\n\n y = np.random.normal(y, dy)\n\n clf1 = LinearRegression().fit(X, y, dy)\n clf2 = skLinearRegression().fit(X / dy, y / dy)\n\n assert_allclose(clf1.coef_[1:], clf2.coef_)\n assert_allclose(clf1.coef_[0], clf2.intercept_ * dy)", "def test_single_linear_regression_r_squared(reg_model):\n assert(pytest.approx(reg_model.r_squared(), 0.01) == 0.52)", "def train_test_rmse(feature_cols):\r\n # create X and y\r\n x_input = ENERGY[feature_cols]\r\n y_output = ENERGY.Global_active_power\r\n x_train, x_test, y_train, y_test = train_test_split(x_input, y_output, random_state=123)\r\n linreg = LinearRegression()\r\n linreg.fit(x_train, y_train)\r\n y_pred = linreg.predict(x_test)\r\n\r\n return (linreg, np.sqrt(metrics.mean_squared_error(y_test, y_pred)))", "def test_model_performance(self):\n\t\tself.load_data()\n\t\tself.load_model()\n\t\tthreshold = 0.78 #0.78 to pass - change to 0.90 to deliberate fail test and therefore faild cloud build\n\t\tscore = self.model.score(self.X_test, self.y_test)\n\t\tis_above_threshold = True if score >= threshold else False\n\t\tassert is_above_threshold is True", "def evaluate_models_on_testing(x, y, models):\n\tfor i in range(len(models)): # iterate each model\n\t\tplt.figure() # create a new plot\n\t\testyvals = np.polyval(models[i], x) # get the array of estimated y values\n\t\tplt.plot(x, y, 'bo', label='Data Points') # plot points that show original points\n\t\tplt.plot(x, estyvals, 'r-', label='Regression Curve') # plot the regression curve\n\t\tplt.xlabel('Years')\n\t\tplt.ylabel('Temperature (degrees Celsius)')\n\t\tdeg = len(models[i]) - 1 # get the degree by the number of coefficients in a model\n\t\tr = rmse(y, estyvals) # get the RSME\n\t\tplt.title('#{0} Model \\nwhen RMSE is {1:.6f} and degree is {2}'.format(i+1, r, deg))\n\t\tplt.legend()\n\t\tplt.show()", "def test_best_model_table_fields(self):\n correct_fields = [\"In_lhood?\", \"Obs\", \"Model\", \"Resid_Stds\", \"Obs_S/N\"]\n t_fields = self.DF_best.columns.tolist()\n self.assertTrue(t_fields == correct_fields, t_fields)", "def baseline_linear_model(train_input, train_target, test_input, test_target):\n\n def add_squared(data):\n \"\"\"\n append the sum of the coordinates squared to the test_input\n :input: numpy array containing the coordinates of each point\n :output: input appended with the sum of the square of the coordinates\n \"\"\"\n square = np.power(data,2)[:,0]+np.power(data,2)[:,1].reshape(1,-1)\n square = np.transpose(square)\n return np.append(data,square,axis=1)\n\n # train the model\n X = train_input.numpy()\n X = add_squared(X)\n Y = train_target.numpy()\n clf = linear_model.SGDClassifier(max_iter=5000)\n clf.fit(X, Y.ravel())\n\n # test the function\n test = test_input.numpy()\n test = add_squared(test)\n Y_test = test_target.numpy()\n nb_errors = 0\n for i,x in enumerate(clf.predict(test)):\n if x!= Y_test[i]:\n nb_errors += 1\n print('Baseline accuracy:',100-nb_errors/test.shape[0]*100,'%')", "def BaselineModel(df, y):\n X_train = df.drop(labels=y, axis=1)\n y_train = df[y]\n dummy = DummyRegressor() # by default this will use the mean\n\n dummy.fit(X_train, y_train)\n \n y_pred = dummy.predict(X_train)\n\n score = dummy.score(X_train, y_train) # the score of a regression model is the r-squared value\n \n dummy_rmse = mean_squared_error(y_train, y_pred, squared=False)\n \n return print(f'R-squared = {score}',\n '\\n',\n f'RMSE = {dummy_rmse}')", "def test(self , regressor , df_train , df_test):\n if self.summary_ is None : \n self.summary_ = self.selections_.apply( _score_test \n , args = (regressor , df_train , df_test) \n , axis = 1)\n else : \n self.summary_ = pd.concat([self.summary_ , self.selections_.apply( _score_test , args = (regressor , df_train , df_test) , axis = 1)]\n , keys = ['Validation' , 'Test'] , axis = 1)\n return", "def test_residual_data_no_model(self):\n dst = \"ngc5921.split.residualdatawoutmodel.ms\"\n ref = 'ngc5921_statwt_ref_test_residual_data_no_model.ms'\n data = \"residual_data\"\n # row_to_rows = []\n # for i in range(60):\n # row_to_rows.append([i, i+1])\n shutil.copytree(src, dst)\n self.assertTrue(mytb.open(\n dst, nomodify=False), \"unable to open table \" + dst\n )\n self.assertTrue(\n mytb.removecols(\"MODEL_DATA\"), \"unable to remove MODEL_DATA column\"\n )\n mytb.done()\n myms.open(dst, nomodify=False)\n myms.statwt(datacolumn=data)\n myms.done()\n # self._check_weights(\n # dst, row_to_rows, data, None, False, None, None\n # )\n self.compare(dst, ref)\n shutil.rmtree(dst)", "def evaluate_models_on_training(x, y, models):\n\tfor i in range(len(models)): # iterate each model\n\t\tplt.figure() # create a new plot\n\t\testyvals = np.polyval(models[i], x) # get the array of estimated y values\n\t\tplt.plot(x, y, 'bo', label='Data Points') # plot points that show original points\n\t\tplt.plot(x, estyvals, 'r-', label='Regression Curve') # plot the regression curve\n\t\tplt.xlabel('Years')\n\t\tplt.ylabel('Temperature (degrees Celsius)')\n\t\tdeg = len(models[i]) - 1 # get the degree by the number of coefficients in a model\n\t\tr2 = r2_score(y, estyvals) # get R2 value\n\t\tif deg > 1:\n\t\t\tplt.title('#{0} Model \\nwhen R2 value is {1:.6f} and degree is {2}'.format(i+1, r2, deg))\n\t\telse: # se_over_slope(x, y, estimated, model)\n\t\t\tplt.title('#{0} Model \\nwhen R2 value is {1:.6f} and degree is {2} \\nand the ratio of the standard error is {3:.6f}'.format(\n\t\t\t\ti+1, r2, deg, se_over_slope(x, y, estyvals, models[i])))\n\t\tplt.legend()\n\t\tplt.show()", "def testSummary(self, clf):\n summary1 = clf.summary()\n self.failUnless('not yet trained' in summary1)\n clf.train(datasets['uni2small'])\n summary = clf.summary()\n # It should get bigger ;)\n self.failUnless(len(summary) > len(summary1))\n self.failUnless(not 'not yet trained' in summary)", "def eval_lin_model(beta, X_test, y_test):\n w = beta.T[:, 1:]\n y = beta.T[:, 0] + np.dot(X_test, w.T)\n MSE = np.sum((y - y_test)**2)/y_test.shape[0]\n\n return MSE, y", "def test_multi_reg_plot_line_fails(pt_multiple_reg):\n with pytest.raises(\n AssertionError, match=\"linear-regression line not displayed properly\"\n ):\n pt_multiple_reg.assert_lines_of_type(\"linear-regression\")", "def test_reg_plot_slope_fails(pd_df_reg_data, pt_reg_data):\n _, intercept_exp, _, _, _ = stats.linregress(\n pd_df_reg_data.A, pd_df_reg_data.B\n )\n with pytest.raises(AssertionError, match=\"Expected line not displayed\"):\n pt_reg_data.assert_line(1, intercept_exp)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test check fields of best model table (we test for the case of no dereddening; field names are different with dereddening).
def test_best_model_table_fields(self): correct_fields = ["In_lhood?", "Obs", "Model", "Resid_Stds", "Obs_S/N"] t_fields = self.DF_best.columns.tolist() self.assertTrue(t_fields == correct_fields, t_fields)
[ "def test_get(self):\n correct_fields = {\n \"features\": self.features,\n \"num_features\": self.num_features,\n \"target\": self.target,\n \"method\": self.method,\n \"num_examples\": self.num_examples,\n }\n\n print(self.model)\n for field, ans in correct_fields.items():\n self.assertEqual(self.model._get(field), ans, \"{} failed\".format(field))", "def test_In_lhood_field_in_best_model_table(self):\n correct = [(\"N\" if l in self.exclude_lines else \"Y\") for l in self.lines]\n self.assertTrue(self.DF_best[\"In_lhood?\"].values.tolist() == correct)", "def test__list_fields(self):\n correct_fields = [\n \"classifier\",\n \"features\",\n \"num_features\",\n \"method\",\n \"num_examples\",\n \"target\",\n ]\n\n self.assertItemsEqual(self.model._list_fields(), correct_fields)", "def test_get(self):\n model = self.model\n for field in self.exposed_fields_ans:\n ans = model._get(field)\n self.assertTrue(\n self.get_ans[field](ans),\n \"\"\"Get failed in field {}. Output was {}.\"\"\".format(field, ans),\n )", "def test_no_extra_fields(self):\n fields = list(set(chain.from_iterable(\n (field.name, field.attname) if hasattr(field, 'attname') else\n (field.name,) for field in Macroinvertebrates._meta.get_fields()\n if not (field.many_to_one and field.related_model is None)\n )))\n self.assertEqual(sorted(fields), sorted(self.expected_fields.keys()))", "def test_all_field_opts_model(self, all_field_opts):\n for field in all_field_opts:\n api_keys = field.keys()\n # Tests if API and model have same number of keys\n assert len(self.model_keys) == len(api_keys)\n # Tests if the API and model keys and value types are equal\n for key in api_keys:\n assert key in self.model_keys\n assert type(field[key]) in field_opt_model[key]", "def test_best_model_dict_keys(self):\n expected_keys = sorted([\"table\", \"chi2\", \"extinction_Av_mag\",\n \"grid_location\"])\n key_list = sorted(list(self.Result.Posterior.best_model.keys()))\n self.assertEqual(key_list, expected_keys)", "def test_model(self):\n\n retrieved = Project.objects.get(pk=self.project.pk)\n self.assertEqual(\n retrieved.accounting_code.agency.name,\n 'General Services Administration'\n )\n self.assertEqual(retrieved.accounting_code.office, '18F')\n self.assertEqual(retrieved.start_date, datetime.date(2016, 1, 1))\n self.assertEqual(retrieved.end_date, datetime.date(2016, 2, 1))\n self.assertTrue(retrieved.accounting_code.billable)\n self.assertEqual(retrieved.profit_loss_account.name, 'PIF')\n self.assertEqual(\n str(retrieved.profit_loss_account),\n 'PIF - Revenue (10/2016 - 9/2017)'\n )", "def _check_model_params(self):", "def test_non_likelihood_lines_in_best_model_table(self):\n self.assertTrue(all(l in self.DF_best.index for l in self.exclude_lines))", "def test_compare_table_sanity_check(model, logger):\n description = description_for(model)\n assert compare_tables(model, description)\n assert not logger.caplog.record_tuples", "def test_split_data():\n\tlogging.info('Testing model training step...')\n\t#load features\n\tfinal_df = pd.read_csv('data/features.csv')\n\tY = np.log10(final_df['price'])\n\tX = final_df.drop(['price'], axis = 'columns', inplace = False)\n\t#Split into train and validation\n\tX_train, X_val, y_train, y_val = train_test_split(X, Y, test_size=0.33, random_state = 3)\n\t#split data using split function\n\t#raise AssertError if training set has smaller length than testing set\n\tassert len(X_train) > 2 * len(X_val)\n\tassert len(y_train) > 2 * len(y_val)\n\n\t\"\"\"Test if X has dropped price column\"\"\"\n\tassert X_train.columns[0] != 'price'\n\tassert X_val.columns[0] != 'price'\n\tassert X_train.columns[0] == 'yearOfRegistration'\n\tassert X_val.columns[0] == 'yearOfRegistration'", "def test_column_excluded(self):\n\n for case in self.CASE:\n\n if (\n (case['min_dt_version'] and case['min_dt_version'] > dt_version) or\n (case['max_dt_version'] and case['max_dt_version'] < dt_version)\n ):\n continue\n\n class Tab(case['table_clsss']):\n column_excluded = [\"first_name\", \"last_name\"]\n\n class Meta:\n model = Author\n\n table = Tab(Author.objects.all())\n request = RequestFactory().get('/fake/url')\n template = Template(\"\"\"\n {% load django_tables2 %}\n {% render_table table %}\n \"\"\")\n ctx = Context({\"table\": table, \"request\": request})\n render = template.render(ctx)\n\n self.assertTrue('data-td-class=\"id\"' in render)\n self.assertTrue('data-td-class=\"age\"' in render)\n self.assertFalse('data-td-class=\"first_name\"' in render)\n self.assertFalse('data-td-class=\"last_name\"' in render)", "def test_table_validation():\n with pytest.raises(MissingTableAttribute):\n\n class Model(DynaModel):\n class Table:\n name = \"table\"\n\n class Schema:\n foo = String(required=True)", "def validate_fields(meta):\n\tdef check_illegal_characters(fieldname):\n\t\tvalidate_column_name(fieldname)\n\n\tdef check_unique_fieldname(fieldname):\n\t\tduplicates = list(filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields)))\n\t\tif len(duplicates) > 1:\n\t\t\tfrappe.throw(_(\"Fieldname {0} appears multiple times in rows {1}\").format(fieldname, \", \".join(duplicates)))\n\n\tdef check_fieldname_length(fieldname):\n\t\tvalidate_column_length(fieldname)\n\n\tdef check_illegal_mandatory(d):\n\t\tif (d.fieldtype in no_value_fields) and d.fieldtype!=\"Table\" and d.reqd:\n\t\t\tfrappe.throw(_(\"Field {0} of type {1} cannot be mandatory\").format(d.label, d.fieldtype))\n\n\tdef check_link_table_options(d):\n\t\tif d.fieldtype in (\"Link\", \"Table\"):\n\t\t\tif not d.options:\n\t\t\t\tfrappe.throw(_(\"Options required for Link or Table type field {0} in row {1}\").format(d.label, d.idx))\n\t\t\tif d.options==\"[Select]\" or d.options==d.parent:\n\t\t\t\treturn\n\t\t\tif d.options != d.parent:\n\t\t\t\toptions = frappe.db.get_value(\"DocType\", d.options, \"name\")\n\t\t\t\tif not options:\n\t\t\t\t\tfrappe.throw(_(\"Options must be a valid DocType for field {0} in row {1}\").format(d.label, d.idx))\n\t\t\t\telif not (options == d.options):\n\t\t\t\t\tfrappe.throw(_(\"Options {0} must be the same as doctype name {1} for the field {2}\")\n\t\t\t\t\t\t.format(d.options, options, d.label))\n\t\t\t\telse:\n\t\t\t\t\t# fix case\n\t\t\t\t\td.options = options\n\n\tdef check_hidden_and_mandatory(d):\n\t\tif d.hidden and d.reqd and not d.default:\n\t\t\tfrappe.throw(_(\"Field {0} in row {1} cannot be hidden and mandatory without default\").format(d.label, d.idx))\n\n\tdef check_width(d):\n\t\tif d.fieldtype == \"Currency\" and cint(d.width) < 100:\n\t\t\tfrappe.throw(_(\"Max width for type Currency is 100px in row {0}\").format(d.idx))\n\n\tdef check_in_list_view(d):\n\t\tif d.in_list_view and (d.fieldtype in not_allowed_in_list_view):\n\t\t\tfrappe.throw(_(\"'In List View' not allowed for type {0} in row {1}\").format(d.fieldtype, d.idx))\n\n\tdef check_in_global_search(d):\n\t\tif d.in_global_search and d.fieldtype in no_value_fields:\n\t\t\tfrappe.throw(_(\"'In Global Search' not allowed for type {0} in row {1}\")\n\t\t\t\t.format(d.fieldtype, d.idx))\n\n\tdef check_dynamic_link_options(d):\n\t\tif d.fieldtype==\"Dynamic Link\":\n\t\t\tdoctype_pointer = list(filter(lambda df: df.fieldname==d.options, fields))\n\t\t\tif not doctype_pointer or (doctype_pointer[0].fieldtype not in (\"Link\", \"Select\")) \\\n\t\t\t\tor (doctype_pointer[0].fieldtype==\"Link\" and doctype_pointer[0].options!=\"DocType\"):\n\t\t\t\tfrappe.throw(_(\"Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'\"))\n\n\tdef check_illegal_default(d):\n\t\tif d.fieldtype == \"Check\" and d.default and d.default not in ('0', '1'):\n\t\t\tfrappe.throw(_(\"Default for 'Check' type of field must be either '0' or '1'\"))\n\t\tif d.fieldtype == \"Select\" and d.default and (d.default not in d.options.split(\"\\n\")):\n\t\t\tfrappe.throw(_(\"Default for {0} must be an option\").format(d.fieldname))\n\n\tdef check_precision(d):\n\t\tif d.fieldtype in (\"Currency\", \"Float\", \"Percent\") and d.precision is not None and not (1 <= cint(d.precision) <= 6):\n\t\t\tfrappe.throw(_(\"Precision should be between 1 and 6\"))\n\n\tdef check_unique_and_text(d):\n\t\tif meta.issingle:\n\t\t\td.unique = 0\n\t\t\td.search_index = 0\n\n\t\tif getattr(d, \"unique\", False):\n\t\t\tif d.fieldtype not in (\"Data\", \"Link\", \"Read Only\"):\n\t\t\t\tfrappe.throw(_(\"Fieldtype {0} for {1} cannot be unique\").format(d.fieldtype, d.label))\n\n\t\t\tif not d.get(\"__islocal\"):\n\t\t\t\ttry:\n\t\t\t\t\thas_non_unique_values = frappe.db.sql(\"\"\"select `{fieldname}`, count(*)\n\t\t\t\t\t\tfrom `tab{doctype}` where ifnull({fieldname}, '') != ''\n\t\t\t\t\t\tgroup by `{fieldname}` having count(*) > 1 limit 1\"\"\".format(\n\t\t\t\t\t\tdoctype=d.parent, fieldname=d.fieldname))\n\n\t\t\t\texcept (InternalError, OperationalError) as e:\n\t\t\t\t\tif e.args and e.args[0] == ER.BAD_FIELD_ERROR:\n\t\t\t\t\t\t# ignore if missing column, else raise\n\t\t\t\t\t\t# this happens in case of Custom Field\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\n\t\t\t\telse:\n\t\t\t\t\t# else of try block\n\t\t\t\t\tif has_non_unique_values and has_non_unique_values[0][0]:\n\t\t\t\t\t\tfrappe.throw(_(\"Field '{0}' cannot be set as Unique as it has non-unique values\").format(d.label))\n\n\t\tif d.search_index and d.fieldtype in (\"Text\", \"Long Text\", \"Small Text\", \"Code\", \"Text Editor\"):\n\t\t\tfrappe.throw(_(\"Fieldtype {0} for {1} cannot be indexed\").format(d.fieldtype, d.label))\n\n\tdef check_fold(fields):\n\t\tfold_exists = False\n\t\tfor i, f in enumerate(fields):\n\t\t\tif f.fieldtype==\"Fold\":\n\t\t\t\tif fold_exists:\n\t\t\t\t\tfrappe.throw(_(\"There can be only one Fold in a form\"))\n\t\t\t\tfold_exists = True\n\t\t\t\tif i < len(fields)-1:\n\t\t\t\t\tnxt = fields[i+1]\n\t\t\t\t\tif nxt.fieldtype != \"Section Break\":\n\t\t\t\t\t\tfrappe.throw(_(\"Fold must come before a Section Break\"))\n\t\t\t\telse:\n\t\t\t\t\tfrappe.throw(_(\"Fold can not be at the end of the form\"))\n\n\tdef check_search_fields(meta, fields):\n\t\t\"\"\"Throw exception if `search_fields` don't contain valid fields.\"\"\"\n\t\tif not meta.search_fields:\n\t\t\treturn\n\n\t\t# No value fields should not be included in search field\n\t\tsearch_fields = [field.strip() for field in (meta.search_fields or \"\").split(\",\")]\n\t\tfieldtype_mapper = { field.fieldname: field.fieldtype \\\n\t\t\tfor field in filter(lambda field: field.fieldname in search_fields, fields) }\n\n\t\tfor fieldname in search_fields:\n\t\t\tfieldname = fieldname.strip()\n\t\t\tif (fieldtype_mapper.get(fieldname) in no_value_fields) or \\\n\t\t\t\t(fieldname not in fieldname_list):\n\t\t\t\tfrappe.throw(_(\"Search field {0} is not valid\").format(fieldname))\n\n\tdef check_title_field(meta):\n\t\t\"\"\"Throw exception if `title_field` isn't a valid fieldname.\"\"\"\n\t\tif not meta.get(\"title_field\"):\n\t\t\treturn\n\n\t\tif meta.title_field not in fieldname_list:\n\t\t\tfrappe.throw(_(\"Title field must be a valid fieldname\"), InvalidFieldNameError)\n\n\t\tdef _validate_title_field_pattern(pattern):\n\t\t\tif not pattern:\n\t\t\t\treturn\n\n\t\t\tfor fieldname in re.findall(\"{(.*?)}\", pattern, re.UNICODE):\n\t\t\t\tif fieldname.startswith(\"{\"):\n\t\t\t\t\t# edge case when double curlies are used for escape\n\t\t\t\t\tcontinue\n\n\t\t\t\tif fieldname not in fieldname_list:\n\t\t\t\t\tfrappe.throw(_(\"{{{0}}} is not a valid fieldname pattern. It should be {{field_name}}.\").format(fieldname),\n\t\t\t\t\t\tInvalidFieldNameError)\n\n\t\tdf = meta.get(\"fields\", filters={\"fieldname\": meta.title_field})[0]\n\t\tif df:\n\t\t\t_validate_title_field_pattern(df.options)\n\t\t\t_validate_title_field_pattern(df.default)\n\n\tdef check_image_field(meta):\n\t\t'''check image_field exists and is of type \"Attach Image\"'''\n\t\tif not meta.image_field:\n\t\t\treturn\n\n\t\tdf = meta.get(\"fields\", {\"fieldname\": meta.image_field})\n\t\tif not df:\n\t\t\tfrappe.throw(_(\"Image field must be a valid fieldname\"), InvalidFieldNameError)\n\t\tif df[0].fieldtype != 'Attach Image':\n\t\t\tfrappe.throw(_(\"Image field must be of type Attach Image\"), InvalidFieldNameError)\n\n\tdef check_is_published_field(meta):\n\t\tif not meta.is_published_field:\n\t\t\treturn\n\n\t\tif meta.is_published_field not in fieldname_list:\n\t\t\tfrappe.throw(_(\"Is Published Field must be a valid fieldname\"), InvalidFieldNameError)\n\n\tdef check_timeline_field(meta):\n\t\tif not meta.timeline_field:\n\t\t\treturn\n\n\t\tif meta.timeline_field not in fieldname_list:\n\t\t\tfrappe.throw(_(\"Timeline field must be a valid fieldname\"), InvalidFieldNameError)\n\n\t\tdf = meta.get(\"fields\", {\"fieldname\": meta.timeline_field})[0]\n\t\tif df.fieldtype not in (\"Link\", \"Dynamic Link\"):\n\t\t\tfrappe.throw(_(\"Timeline field must be a Link or Dynamic Link\"), InvalidFieldNameError)\n\n\tdef check_sort_field(meta):\n\t\t'''Validate that sort_field(s) is a valid field'''\n\t\tif meta.sort_field:\n\t\t\tsort_fields = [meta.sort_field]\n\t\t\tif ',' in meta.sort_field:\n\t\t\t\tsort_fields = [d.split()[0] for d in meta.sort_field.split(',')]\n\n\t\t\tfor fieldname in sort_fields:\n\t\t\t\tif not fieldname in fieldname_list + list(default_fields):\n\t\t\t\t\tfrappe.throw(_(\"Sort field {0} must be a valid fieldname\").format(fieldname),\n\t\t\t\t\t\tInvalidFieldNameError)\n\n\tdef check_illegal_depends_on_conditions(docfield):\n\t\t''' assignment operation should not be allowed in the depends on condition.'''\n\t\tdepends_on_fields = [\"depends_on\", \"collapsible_depends_on\"]\n\t\tfor field in depends_on_fields:\n\t\t\tdepends_on = docfield.get(field, None)\n\t\t\tif depends_on and (\"=\" in depends_on) and \\\n\t\t\t\tre.match(\"\"\"[\\w\\.:_]+\\s*={1}\\s*[\\w\\.@'\"]+\"\"\", depends_on):\n\t\t\t\tfrappe.throw(_(\"Invalid {0} condition\").format(frappe.unscrub(field)), frappe.ValidationError)\n\n\tdef scrub_options_in_select(field):\n\t\t\"\"\"Strip options for whitespaces\"\"\"\n\n\t\tif field.fieldtype == \"Select\" and field.options is not None:\n\t\t\toptions_list = []\n\t\t\tfor i, option in enumerate(field.options.split(\"\\n\")):\n\t\t\t\t_option = option.strip()\n\t\t\t\tif i==0 or _option:\n\t\t\t\t\toptions_list.append(_option)\n\t\t\tfield.options = '\\n'.join(options_list)\n\n\tdef scrub_fetch_from(field):\n\t\tif hasattr(field, 'fetch_from') and getattr(field, 'fetch_from'):\n\t\t\tfield.fetch_from = field.fetch_from.strip('\\n').strip()\n\n\tfields = meta.get(\"fields\")\n\tfieldname_list = [d.fieldname for d in fields]\n\n\tnot_allowed_in_list_view = list(copy.copy(no_value_fields))\n\tnot_allowed_in_list_view.append(\"Attach Image\")\n\tif meta.istable:\n\t\tnot_allowed_in_list_view.remove('Button')\n\n\tfor d in fields:\n\t\tif not d.permlevel: d.permlevel = 0\n\t\tif d.fieldtype != \"Table\": d.allow_bulk_edit = 0\n\t\tif d.fieldtype == \"Barcode\": d.ignore_xss_filter = 1\n\t\tif not d.fieldname:\n\t\t\td.fieldname = d.fieldname.lower()\n\n\t\tcheck_illegal_characters(d.fieldname)\n\t\tcheck_unique_fieldname(d.fieldname)\n\t\tcheck_fieldname_length(d.fieldname)\n\t\tcheck_illegal_mandatory(d)\n\t\tcheck_link_table_options(d)\n\t\tcheck_dynamic_link_options(d)\n\t\tcheck_hidden_and_mandatory(d)\n\t\tcheck_in_list_view(d)\n\t\tcheck_in_global_search(d)\n\t\tcheck_illegal_default(d)\n\t\tcheck_unique_and_text(d)\n\t\tcheck_illegal_depends_on_conditions(d)\n\t\tscrub_options_in_select(d)\n\t\tscrub_fetch_from(d)\n\n\tcheck_fold(fields)\n\tcheck_search_fields(meta, fields)\n\tcheck_title_field(meta)\n\tcheck_timeline_field(meta)\n\tcheck_is_published_field(meta)\n\tcheck_sort_field(meta)\n\tcheck_image_field(meta)", "def test_unused_fields(self):\n self.wrapper.field1\n self.wrapper.field3\n self.assertEqual(set(self.wrapper.eraserhead_unused_fields), {'field2', 'field4'})", "def test_age_table(age, baraffe_model, age_interp):\n model_table, cols, __ = age_table(age, model=baraffe_model, age_interp=age_interp)\n\n assert isinstance(model_table, dict)\n assert isinstance(cols, list)\n\n for key in model_table:\n print(\"type of value\", type(model_table[key]))\n assert isinstance(model_table[key], np.ndarray)\n\n # check common columns are present\n common_cols = [\n \"M/Ms\",\n \"Teff\",\n \"L/Ls\",\n \"g\",\n \"Mv\",\n \"Mr\",\n \"Mi\",\n \"Mj\",\n \"Mh\",\n \"Mk\",\n \"Mll\",\n \"Mm\",\n ]\n for header in common_cols:\n assert header in cols", "def test_fields_should_not_be_blank(self):\n\n try:\n self.product.full_clean()\n\n except ValidationError as e:\n self.assertTrue('name' in e.message_dict)\n self.assertTrue('price' in e.message_dict)\n self.assertTrue('minimum' in e.message_dict)\n self.assertTrue('amount_per_package' in e.message_dict)\n self.assertTrue('max_availability' in e.message_dict)", "def test_fields(\n snowflake_deserializer: SnowflakeDeserializer,\n fields_metadata: List[Dict[str, str]],\n fields_metadata_sql: str,\n target_tables: List[str],\n fields: Dict[str, List[Field]],\n):\n # Mock `SnowflakeCursor` object and manipulate its results to match the model\n # metadata stored in `model_metadata.json`.\n cursor = snowflake_deserializer.database_connection.cursor\n cursor.return_value = MagicMock(SnowflakeCursor)\n cursor.return_value.__enter__().__iter__.return_value = iter(fields_metadata)\n calculated_fields = snowflake_deserializer._fields\n\n # Check if metadata query was called.\n cursor.return_value.__enter__().execute.assert_called_once_with(fields_metadata_sql)\n\n # Check that all tables have fields.\n assert len(calculated_fields.keys()) == len(target_tables)\n\n for table_name, table_fields in calculated_fields.items():\n # Check if the fields are the same as the expected result for the table.\n assert table_fields == fields[table_name]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test the "In_lhood?" field in the best model table should correctly identify if a line was used in the likelihood.
def test_In_lhood_field_in_best_model_table(self): correct = [("N" if l in self.exclude_lines else "Y") for l in self.lines] self.assertTrue(self.DF_best["In_lhood?"].values.tolist() == correct)
[ "def test_non_likelihood_lines_in_best_model_table(self):\n self.assertTrue(all(l in self.DF_best.index for l in self.exclude_lines))", "def test_ll_nom(self):\n pars = list(self.spec.central)\n nominal = self.spec(pars)\n self.spec.set_data(nominal) # nominal data\n stats = np.array(self.spec.stats)\n # event with nominal, ll penalty from poisson normalization\n ll = 0 # log likelihood\n ll += np.sum(logPoisson(nominal, nominal, stats))\n self.assertAlmostEqual(ll, self.spec.ll(pars))", "def test_single_linear_regression_fit(reg_model):\n assert(pytest.approx(reg_model.b1, 0.01) == 1.14)\n assert(pytest.approx(reg_model.b0, 0.01) == 0.43)", "def test_lm(snps,pheno, covs=None, test='lrt',verbose=None):\n\tlm = test_lmm(snps=snps,pheno=pheno,K=None,covs=covs, test=test,verbose=verbose, NumIntervalsDelta0=100,NumIntervalsDeltaAlt=100,searchDelta=False)\n\treturn lm", "def test_best_model_table_fields(self):\n correct_fields = [\"In_lhood?\", \"Obs\", \"Model\", \"Resid_Stds\", \"Obs_S/N\"]\n t_fields = self.DF_best.columns.tolist()\n self.assertTrue(t_fields == correct_fields, t_fields)", "def test_linear_regression(single_ts):\n results = linear_regression(single_ts, dim='time')\n for v in results.data_vars:\n assert results[v]", "def Linreg(df):\n X = df.drop(labels='price', axis=1)\n y = df.price\n \n X = sm.tools.tools.add_constant(X)\n king_model = sm.OLS(y, X).fit()\n \n return king_model.summary()", "def fit_followup_binary_regression(combined_df):\n assert combined_df['col_of_interest_t0'].map(lambda x:x in [0, 1]).all()\n assert combined_df['col_of_interest_t1'].map(lambda x:x in [0, 1]).all()\n assert not combined_df['koos_pain_subscore_t0'].map(lambda x:x in [0, 1]).all()\n assert not combined_df['koos_pain_subscore_t1'].map(lambda x:x in [0, 1]).all()\n assert not (combined_df['col_of_interest_t1'] == combined_df['col_of_interest_t0']).all()\n\n # predict binary pain at followup without any controls. \n yhat_model = sm.Logit.from_formula('col_of_interest_t1 ~ yhat_t0', data=combined_df).fit(\n cov_type='cluster', cov_kwds={'groups':combined_df['id'].astype(int).values})\n\n # predict binary pain at followup controlling for binary pain at t0. \n combined_model_binary_control = sm.Logit.from_formula('col_of_interest_t1 ~ yhat_t0 + col_of_interest_t0', data=combined_df).fit(\n cov_type='cluster', cov_kwds={'groups':combined_df['id'].astype(int).values})\n\n # predict binary pain at followup controlling for CONTINUOUS pain at t0. \n combined_model_continuous_control = sm.Logit.from_formula('col_of_interest_t1 ~ yhat_t0 + koos_pain_subscore_t0', data=combined_df).fit(\n cov_type='cluster', cov_kwds={'groups':combined_df['id'].astype(int).values})\n\n get_OR_and_CI = lambda m:'%2.3f (%2.3f, %2.3f)' % (np.exp(m.params['yhat_t0']), np.exp(m.conf_int().loc['yhat_t0', 0]), np.exp(m.conf_int().loc['yhat_t0', 1]))\n\n return {'OR (no control)':get_OR_and_CI(yhat_model), \n 'OR (binary control)':get_OR_and_CI(combined_model_binary_control),\n 'OR (continuous control)':get_OR_and_CI(combined_model_continuous_control)}", "def test_logistic_regression_importances(self):\n # Setting up classifier\n clf = LogisticRegression(C=1., solver='lbfgs')\n clf.fit(X, y)\n\n # Setting up lorax\n lrx = TheLorax(clf, data, id_col='entity_id')\n lrx_out = lrx.explain_example(idx=1, pred_class=1, graph=False)\n\n feature1_contrib = lrx_out.contribution.loc['feature1']\n feature5_contrib = lrx_out.contribution.loc['feature5']\n\n # Test cases for correct feature importances\n self.assertEqual(feature1_contrib, 2.186415806126551)\n self.assertEqual(feature5_contrib, -3.228614405467005)\n\n # Test case if we can recover lr prediction\n # Can't use all of sample because it now contains intercept as last element\n sample = lrx.X_test.loc[1, ].values[:-1]\n lr_pred = clf.predict_proba(sample.reshape(1, -1))[0][1]\n lrx_pred = 1 / (1 + np.exp(-lrx_out.contribution.sum()))\n\n self.assertEqual(lrx_pred, lr_pred)", "def train_regression_model_for_bmi(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, x1, y1, y1label, feature_headers, mrns, agex_low, agex_high, months_from, months_to, modelType='lasso', percentile=False, filterSTR=['Gender:1'], filterSTRThresh=[0.5], variablesubset=['Vital'],variable_exclude=['Trend'], num_clusters=16, num_iters=100, dist_type='euclidean', corr_vars_exclude=['Vital'], return_data_for_error_analysis=False, return_data=False, return_data_transformed=False, do_impute=True, mrnForFilter=[], add_time=False, bin_ix=[], do_normalize=True, binarize_diagnosis=True, subset=np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False])): #filterSTR='Gender:0 male'\n\n if any([len(x)==0 for x in (x1,y1,y1label,feature_headers,mrns)]):\n print('At least one required data not provided out of x1, y1, y1label, feature_headers, or mrns.')\n print('Creating data from data dictionaries')\n x1, y1, y1label, feature_headers, mrns = build_features.call_build_function(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, agex_low, agex_high, months_from, months_to, percentile, mrnsForFilter=mrnForFilter)\n original_data = (x1, y1, y1label, feature_headers, mrns)\n else:\n print('Using prepared raw data')\n\n if binarize_diagnosis:\n bin_ix = np.array([(h.startswith('Diagnosis:') or h.startswith('Maternal Diagnosis:') or h.startswith('Newborn Diagnosis:')) for h in feature_headers])\n print(bin_ix.sum(), 'features are binary')\n x1[:,bin_ix] = (x1[:,bin_ix] > 0) * 1.0\n\n ix, x2, y2, y2label, mrns = filter_training_set_forLinear(x1, y1, y1label, feature_headers, filterSTR, percentile, mrns, filterSTRThresh)\n\n if do_impute or do_normalize or add_time:\n x2, mux, stdx, bin_ix, unobserved = normalize(x2, bin_ix=bin_ix)\n\n if do_impute:\n x2 = autoencoder_impute(x2, bin_ix)\n\n if add_time:\n x2, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew = add_temporal_features(x2, feature_headers, num_clusters, num_iters, y2, y2label, dist_type, True, mux, stdx, do_impute, subset)\n else:\n centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew = ['NaN']*7\n\n corr_headers = np.array(feature_headers)\n corr_matrix = np.corrcoef(x2.transpose())\n corr_headers_filtered, corr_matrix_filtered, ix_corr_headers = filter_correlations_via(corr_headers, corr_matrix, corr_vars_exclude)\n print('corr matrix is filtered to size', corr_matrix_filtered.shape)\n\n if len(variablesubset) != 0:\n x2, feature_headers = variable_subset(x2, variablesubset, feature_headers)\n\n print ('output is: average:{0:4.3f}'.format(y2.mean()), ' min:', y2.min(), ' max:', y2.max())\n print ('normalizing output.'); y2 = (y2-y2.mean())/y2.std()\n\n print ('Predicting BMI at age:'+str(agex_low)+ ' to '+str(agex_high)+ 'years, from data in ages:'+ str(months_from)+'-'+str(months_to) + ' months')\n if filterSTR != '':\n print ('filtering patients with: ' , filterSTR)\n\n print ('total size',ix.sum())\n if (ix.sum() < 50):\n print('Not enough subjects. Next.')\n return (filterSTR, [])\n\n if modelType == 'lasso' or modelType == 'randomforest' or modelType == 'gradientboost' or modelType == 'lars':\n iters = 10\n model_weights_array = np.zeros((iters, x2.shape[1]), dtype=float)\n auc_test_list=np.zeros((iters), dtype=float); r2testlist = np.zeros((iters), dtype=float);\n for iteration in range(0, iters):\n randix = list(range(0, x2.shape[0]))\n random.shuffle(randix)\n randix = randix[0:int(len(randix)*0.9)]\n datax = x2[randix,:]; datay=y2[randix]; dataylabel = y2label[randix]; mrnx = mrns[randix]\n (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, mrnstrain, mrnstest) = train_regression(datax, datay, dataylabel, percentile, modelType, feature_headers, mrnx)\n model_weights_array[iteration, :] = model.coef_ if ((modelType == 'lasso') or (modelType == 'lars')) else model.feature_importances_\n auc_test_list[iteration] = auc_test; r2testlist[iteration] = r2test\n\n model_weights = model_weights_array.mean(axis=0)\n model_weights_std = model_weights_array.std(axis=0)\n model_weights_conf_term = (1.96/np.sqrt(iters)) * model_weights_std\n test_auc_mean = auc_test_list.mean()\n test_auc_mean_ste = (1.96/np.sqrt(iters)) * auc_test_list.std()\n r2test_mean = r2testlist.mean()\n r2test_ste = (1.96/np.sqrt(iters)) * r2testlist.std()\n\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n\n if return_data_for_error_analysis == True:\n print('lets analyse this')\n return (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew, mrnstrain, mrnstest, mrns)\n else:\n (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, mrnstrain, mrnstest) = train_regression(x2, y2, y2label, percentile, modelType, feature_headers, mrnx)\n model_weights_conf_term = np.zeros((x2.shape[1]), dtype=float)\n test_auc_mean = auc_test; r2test_mean= r2test;\n test_auc_mean_ste = 0; r2test_ste=0\n\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n if return_data_for_error_analysis == True:\n print('lets analyse this')\n return (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew, mrnstrain, mrnstest, mrns)\n\n if modelType == 'mlp':\n print ('you need to implement gradient to get top weights. ')\n return (filterSTR, [])\n\n sorted_ix = np.argsort(-1* abs(model_weights))\n weights = model_weights[sorted_ix]\n terms_sorted = model_weights_conf_term[sorted_ix]\n\n factors = np.array(feature_headers)[sorted_ix]\n x2_reordered = x2[:,sorted_ix]\n xtest_reordered = xtest[:, sorted_ix]\n\n ytestpred = model.predict(xtest)\n fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, ytestpred)\n operating_Thresholds = []\n operating_levels = [0, 0.0001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\n ix_level = 0\n\n for ix, thr in enumerate(thresholds):\n if fpr[ix] >= operating_levels[ix_level]:\n operating_Thresholds.append(thr)\n ix_level += 1\n if ix_level == len(operating_levels):\n break\n\n operating_Thresholds = thresholds\n report_metrics = 'Test set metrics:\\n'\n prec_list = []\n recall_list = []\n spec_list = []\n for t in operating_Thresholds:\n tp = ((ytestlabel > 0) & (ytestpred.ravel() > t)).sum()*1.0\n tn = ((ytestlabel == 0) & (ytestpred.ravel() <= t)).sum()*1.0\n fn = ((ytestlabel > 0) & (ytestpred.ravel() <= t)).sum()*1.0\n fp = ((ytestlabel == 0) & (ytestpred.ravel() > t)).sum()*1.0\n\n sens = tp / (tp + fn)\n spec = tn / (tn + fp)\n ppv = tp / (tp + fp)\n acc = (tp + tn) / (tp + tn + fp + fn)\n f1 = 2*tp / (2*tp + fp + fn)\n\n report_metrics += '@threshold:{0:4.3f}, sens:{1:4.3f}, spec:{2:4.3f}, ppv:{3:4.3f}, acc:{4:4.3f}, f1:{5:4.3f} total+:{6:4.3f}\\n'.format(t, sens, spec, ppv, acc, f1, tp+fp)\n prec_list.append(ppv)\n recall_list.append(sens)\n spec_list.append(spec)\n\n print('total variables', x2.sum(axis=0).shape, ' and total subjects:', x2.shape[0])\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n print(report_metrics)\n\n occurances = (x2 != 0).sum(axis=0)[sorted_ix]\n zip_weights = {}\n sig_headers = []\n feature_categories = {}\n for i in range(0, (abs(model_weights)>0).sum()):\n fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, xtest_reordered[:,i].ravel())\n feature_auc_indiv = metrics.auc(fpr, tpr)\n corrs = corr_matrix_filtered[sorted_ix[i],:].ravel()\n top_corr_ix = np.argsort(-1*abs(corrs))\n corr_string = 'Correlated most with:\\n'+' '.join( [str(corr_headers_filtered[top_corr_ix[j]])+ ':' + \"{0:4.3f}\\n\".format(corrs[top_corr_ix[j]]) for j in range(0,10)] )\n\n tp = ((y2label > 0) & (x2_reordered[:,i].ravel() > 0)).sum()*1.0\n tn = ((y2label == 0) & (x2_reordered[:,i].ravel() <= 0)).sum()*1.0\n fn = ((y2label > 0) & (x2_reordered[:,i].ravel() <= 0)).sum()*1.0\n fp = ((y2label == 0) & (x2_reordered[:,i].ravel() > 0)).sum()*1.0\n\n if fp*fn*tp*tn == 0:\n oratio = np.nan\n low_OR = np.nan\n high_OR = np.nan\n else:\n oratio = tp*tn/(fp*fn)\n se = np.sqrt(1/tp + 1/fp + 1/tn + 1/fn)\n low_OR = np.exp(np.log(oratio) - 1.96 * se)\n high_OR = np.exp(np.log(oratio) + 1.96 * se)\n try:\n feature_categories[factors[i].split(':')[0]] += weights[i]\n except:\n feature_categories[factors[i].split(':')[0]] = weights[i]\n\n star = ' '\n if (low_OR > 1 or high_OR < 1): #or (weights[i]+terms_sorted[i]) < 0 or (weights[i]-terms_sorted[i]) > 0\n sig_headers.append(factors[i])\n star = '*'\n print(\"{8} {3} | coef {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}] | OR_adj {9:4.3f} [{10:4.3f} {11:4.3f}] | occ: {4} | OR_unadj: {5:4.3f} [{6:4.3f} {7:4.3f}] | indivs AUC:{12:4.3f}\".format(weights[i], weights[i]-terms_sorted[i], weights[i]+terms_sorted[i], factors[i], occurances[i], oratio, low_OR, high_OR, star, np.exp(weights[i]), np.exp(weights[i]-terms_sorted[i]), np.exp(weights[i]+terms_sorted[i]), feature_auc_indiv))\n print(corr_string)\n\n for k in feature_categories:\n print (k, \":\", feature_categories[k])\n\n if return_data and return_data_transformed:\n return (x2, y1, y1label, feature_headers, mrns, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)\n elif return_data and not return_data_transformed:\n return (original_data[0], original_data[1], original_data[2], original_data[3], original_data[4], filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)\n else:\n return (feature_headers, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)", "def lnlike(modelpatch,data,sig_smooth,sig_L2,sig_one,w_L2):\n \n # Likelihood given current psf model\n lnlike = 0.0\n for ii in range(data.npatches):\n patch = np.ravel(data.patches[ii])\n flux = np.dot(modelpatch.T,patch)/np.dot(modelpatch.T,modelpatch)\n model = modelpatch*flux\n lnlike += np.sum(0.5*(patch-model) ** 2\n / data.bkg_sigmas[ii]**2. + \\\n 0.5 * np.log(data.bkg_sigmas[ii]**2.))\n\n # Smoothness constraint\n if sig_smooth!=0:\n filt = np.array([[False,True,False],\n [True,True,True],\n [False,True,False]])\n nearest = ndimage.generic_filter(np.reshape(modelpatch,data.patchshape),\n sq_nearest, footprint=filt)\n lnlike += np.sum(nearest) * sig_smooth\n\n # L2 norm\n if sig_L2!=0:\n lnlike += np.sum((modelpatch*w_L2)**2.) * sig_L2\n\n # PSF total ~ 1\n if sig_one!=0:\n lnlike += (np.sum(modelpatch)-1)**2. * sig_one\n\n return lnlike", "def dataLikelihood(self, step):", "def test_tal1_lmo2(self, model):\n\n \"prepare dataloader\"\n data_loader = self.prepare_tal1_lmo2()\n\n \"test model\"\n self.cfg.full_test = True\n self.cfg.compute_pca = False\n self.cfg.get_zero_pred = False\n _, _, _, pred_df, _ = model.test(data_loader)\n\n \"save predictions\"\n pred_df.to_csv(self.cfg.output_directory + \"hiclstm_%s_predictions_chr%s.csv\" % (self.cell, str(self.chr)),\n sep=\"\\t\")\n return pred_df", "def test_line_search_should_stop(self): # noqa: E501\n p1 = torch.tensor([0.1])\n p2 = torch.tensor([0.1])\n params = [p1, p2]\n optimizer = ConjugateGradientOptimizer(params, 0.01)\n expected_num_steps = 1\n\n loss_calls = 0\n first_time = True\n\n def f_loss():\n nonlocal loss_calls, first_time\n if first_time:\n first_time = False\n else:\n loss_calls += 1\n\n return -torch.tensor(loss_calls)\n\n kl_calls = 0\n\n def f_constrint():\n nonlocal kl_calls\n kl_calls += 1\n return -torch.tensor(kl_calls)\n\n descent_step = torch.tensor([0.05, 0.05])\n optimizer._backtracking_line_search(params, descent_step, f_loss,\n f_constrint)\n\n assert loss_calls == expected_num_steps\n assert kl_calls == expected_num_steps", "def test_single_linear_regression_r_squared(reg_model):\n assert(pytest.approx(reg_model.r_squared(), 0.01) == 0.52)", "def test_ridge_warning_in_fit_mle(self):\n # Bundle the arguments used to construct the nested logit model\n constructor_args = [self.fake_df,\n self.alt_id_col,\n self.obs_id_col,\n self.choice_col,\n self.fake_specification,\n self.fake_names]\n # Bundle the kwargs for constructing the nested_logit_model\n constructor_kwargs = {\"nest_spec\": self.fake_nest_spec}\n\n # Create the mnl model object whose coefficients will be estimated.\n base_nl = nl.NestedLogit(*constructor_args, **constructor_kwargs)\n\n # Create a variable for the fit_mle function's kwargs.\n # The print_res = False arguments are to make sure strings aren't\n # printed to the console unnecessarily.\n fit_kwargs = {\"constrained_pos\": [1],\n \"ridge\": 0.5,\n \"print_res\": False}\n\n # Test to make sure that the ridge warning message is printed when\n # using the ridge keyword argument\n with warnings.catch_warnings(record=True) as w:\n # Use this filter to always trigger the UserWarnings\n warnings.simplefilter('always', UserWarning)\n\n base_nl.fit_mle(self.fake_all_params, **fit_kwargs)\n self.assertGreaterEqual(len(w), 1)\n self.assertIsInstance(w[0].category, type(UserWarning))\n self.assertIn(nl._ridge_warning_msg, str(w[0].message))\n\n return None", "def bestfit_line(ptrm, nrm):\n stat = sufficient_statistics(ptrm, nrm)\n\n w = .5*(stat['S2xx'] - stat['S2yy'])/stat['S2xy']\n m = -w-np.sqrt(w**2+1)\n b = stat['ybar']-m*stat['xbar']\n return {'slope': m, 'intercept': b }", "def check_sgd_lr_fit(X, y):\n from sklearn.linear_model import LogisticRegression\n alpha = 0.01\n start = time.time()\n model = LogisticRegression(C=0.01, fit_intercept=True)\n model.fit(X, y)\n end = time.time()\n print \"params for lr with sgd: %s\" % np.hstack((model.intercept_, model.coef_[0]))\n print \"cost time %f seconds\" % (end - start)\n \n probs = model.predict_proba(X)\n logprob = 0\n for i in xrange(len(y)):\n logprob += np.log(probs[i][y[i]])\n print \"logprob=%f\" % logprob", "def bayesian_information_criteria(self):\n self.max_likelihood('diff_evo')\n l_hat = optimize.ln_likelihood(self.variable_medians, self.function, self.abscissa, self.ordinate)\n return np.log(self.x.size) * self.len_parameters - 2 * l_hat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regression test the order of the input lines should not affect the results. There was a real bug introduced with the "likelihood_lines" feature this test fails on NB version 0.9.6 and 0.9.7!
def test_permuting_input_line_order(self): n = len(self.lines) for i, ind_tuple in enumerate(itertools.permutations(range(n))): # There are 5! = 120 permutations, so only check one in five: if i % 5 != 2: continue obs_fluxes = [self.obs_fluxes[j] for j in ind_tuple] obs_errs = [self.obs_errs[j] for j in ind_tuple] lines = [self.lines[j] for j in ind_tuple] Result_i = self.NB_Model_1(obs_fluxes, obs_errs, lines, likelihood_lines=self.likelihood_lines, **self.kwargs) P_i = Result_i.Posterior estimate_Z_i = P_i.DF_estimates.loc["12 + log O/H", "Estimate"] self.assertEqual(estimate_Z_i, self.estimate_Z)
[ "def test_non_likelihood_lines_in_best_model_table(self):\n self.assertTrue(all(l in self.DF_best.index for l in self.exclude_lines))", "def dataLikelihood(self, step):", "def likelihoods(self, step):", "def priorLikelihood(self, step):", "def likelihood(self, x: np.ndarray) -> np.ndarray:", "def test_line_search_should_stop(self): # noqa: E501\n p1 = torch.tensor([0.1])\n p2 = torch.tensor([0.1])\n params = [p1, p2]\n optimizer = ConjugateGradientOptimizer(params, 0.01)\n expected_num_steps = 1\n\n loss_calls = 0\n first_time = True\n\n def f_loss():\n nonlocal loss_calls, first_time\n if first_time:\n first_time = False\n else:\n loss_calls += 1\n\n return -torch.tensor(loss_calls)\n\n kl_calls = 0\n\n def f_constrint():\n nonlocal kl_calls\n kl_calls += 1\n return -torch.tensor(kl_calls)\n\n descent_step = torch.tensor([0.05, 0.05])\n optimizer._backtracking_line_search(params, descent_step, f_loss,\n f_constrint)\n\n assert loss_calls == expected_num_steps\n assert kl_calls == expected_num_steps", "def line_model(line_profile,*args):\n\t\tif (line_profile=='Gaussian'):\n\t\t\tline = gaussian(*args)\n\t\t\treturn line\n\t\telif (line_profile=='Lorentzian'):\n\t\t\tline = lorentzian(*args)\n\t\t\treturn line", "def test_ll_nom(self):\n pars = list(self.spec.central)\n nominal = self.spec(pars)\n self.spec.set_data(nominal) # nominal data\n stats = np.array(self.spec.stats)\n # event with nominal, ll penalty from poisson normalization\n ll = 0 # log likelihood\n ll += np.sum(logPoisson(nominal, nominal, stats))\n self.assertAlmostEqual(ll, self.spec.ll(pars))", "def same_mnb_loglin(eval_metrics, tx, ymean_true, num_train, trial, prop, model_type):\n model= load_model_trials(model_type, num_train, trial, prop=prop)\n log_post_probs = infer.mnb_mll_curve(tx, model)\n return after_log_post_probs(eval_metrics, log_post_probs)", "def insignificant(inp):\n for i in range(inp.no_grains):\n if inp.nrefl[i] < inp.fit['min_refl'] and i+1 not in inp.fit['skip']:\n inp.fit['skip'].append(i+1)\n inp.fit['skip'].sort()", "def test_LinearRegression_err():\n np.random.seed(0)\n X = np.random.random((10, 1))\n y = np.random.random(10) + 1\n dy = 0.1\n\n y = np.random.normal(y, dy)\n\n clf1 = LinearRegression().fit(X, y, dy)\n clf2 = skLinearRegression().fit(X / dy, y / dy)\n\n assert_allclose(clf1.coef_[1:], clf2.coef_)\n assert_allclose(clf1.coef_[0], clf2.intercept_ * dy)", "def test_In_lhood_field_in_best_model_table(self):\n correct = [(\"N\" if l in self.exclude_lines else \"Y\") for l in self.lines]\n self.assertTrue(self.DF_best[\"In_lhood?\"].values.tolist() == correct)", "def baseline_linear_model(train_input, train_target, test_input, test_target):\n\n def add_squared(data):\n \"\"\"\n append the sum of the coordinates squared to the test_input\n :input: numpy array containing the coordinates of each point\n :output: input appended with the sum of the square of the coordinates\n \"\"\"\n square = np.power(data,2)[:,0]+np.power(data,2)[:,1].reshape(1,-1)\n square = np.transpose(square)\n return np.append(data,square,axis=1)\n\n # train the model\n X = train_input.numpy()\n X = add_squared(X)\n Y = train_target.numpy()\n clf = linear_model.SGDClassifier(max_iter=5000)\n clf.fit(X, Y.ravel())\n\n # test the function\n test = test_input.numpy()\n test = add_squared(test)\n Y_test = test_target.numpy()\n nb_errors = 0\n for i,x in enumerate(clf.predict(test)):\n if x!= Y_test[i]:\n nb_errors += 1\n print('Baseline accuracy:',100-nb_errors/test.shape[0]*100,'%')", "def test_multi_reg_plot_line_fails(pt_multiple_reg):\n with pytest.raises(\n AssertionError, match=\"linear-regression line not displayed properly\"\n ):\n pt_multiple_reg.assert_lines_of_type(\"linear-regression\")", "def test_line_type_reg(pt_reg_data):\n pt_reg_data.assert_lines_of_type(\"linear-regression\")", "def test6(self):\n assert self.obj.doesLinesIntersect([1,1], [1,1]) == False, \"Co-Ordinates with dot is not a line\"", "def test_splines(t=[0.3, 0.5, 0.6], verbose=True):\n import numpy as np\n import matplotlib.pyplot as mp\n n = 41 #101\n x = np.arange(n)/(n-1.0)\n I, M = isplines(x, t, both=True, verbose=verbose)\n if len(t) == 3:\n a = np.array([1.2, 2.0, 1.2, 1.2, 3.0, 0.0])\n else:\n a = np.ones(len(t) + 3)\n a[-1] = 0.0\n ym = np.sum(a*M.T, axis=1)\n yi = np.sum(a*I.T, axis=1)/6.0\n mp.clf()\n mp.subplot(2,1,1)\n for i in range(len(I)):\n mp.plot(x, I[i], color='k')\n for v in t:\n mp.axvline(v, linestyle='dotted', color='k')\n mp.plot(x, yi, '.', color='k')\n mp.subplot(2,1,2)\n for i in range(len(M)):\n mp.plot(x, M[i], color='k')\n for v in t:\n mp.axvline(v, linestyle='dotted', color='k')\n mp.plot(x, ym, '.', color='k')", "def distance_to_line(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def test_line_ending_nva_evaluator(self):\n pos_evaluator = NounVerbAdjectiveLineEndingEvaluator()\n \n #comment with 2 lines that end in noun/verbs\n text = HaikuText(text=\"An old silent pond... A frog jumps into the pond. Splash! Silence again.\")\n haiku = text.get_haiku()\n #should score 66\n self.assertEqual(pos_evaluator(haiku), 66)\n\n # 1 verb, 1 noun, 1 pronoun\n text.set_text(\"Application is the most wonderful artist that man can show us\")\n haiku = text.get_haiku()\n #should score 66\n self.assertEqual(pos_evaluator(haiku), 2*100/3) \n \n #No verbs/nouns at line ends,\n text.set_text(\"They jumped ship on us the boat is very never that man can show us\")\n haiku = text.get_haiku()\n \n self.assertEqual(pos_evaluator(haiku), 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test correct error is raised if there are too few unique values for a grid parameter.
def test_bad_grid_parameter_with_too_few_unique_values(self): DF = pd.DataFrame({"p1": [4, 4, 4, 4], "p2": [1, 2, 3, 4], "l2": [5, 6, 7, 8]}) self.assertRaisesRE(ValueError, "3 unique values are required", NB_Model, DF, ["p1", "p2"])
[ "def test_is_grid_row_invalid():\n assert not sudoku.is_grid_valid(BAD_ROW_GRID)", "def test_grid_list_cell_outside_range_invalid():\n assert not sudoku.no_wrong_integers(BAD_INTEGER_OUTSIDE_RANGE)", "def test_check_unique_header(self):\n header = [\"GENE\", \"foo\", \"foo2\", \"foo3\"]\n self.assertTrue(DenseIngestor.check_unique_header(header))\n\n invalid_header = [\"GENE\", \"foo\", \"foo\", \"foo3\"]\n self.assertRaises(ValueError, DenseIngestor.check_unique_header, invalid_header)", "def test_param_invalid_input_array_param_length(self):\n\t\twith self.assertRaises(IndexError):\n\t\t\tresult = arrayfunc.takewhile('==', self.dataempty, self.dataout, 100.0)", "def _check_legal_index(self, row, col):\n return 0 <= row and row < self._size and\\\n 0 <= col and col < self._size", "def _assert_format_values_are_unique() -> None:\n values = [entry.value for entry in Format]\n if len(values) != len(set(values)):\n raise AssertionError(f\"Format values must be unique, but got: {values}\")", "def validate_grid(self) -> bool:\n if not self.grid:\n return False\n\n # grid type and length\n if type(self.grid) is not list or len(self.grid) != 9:\n return False\n\n for row in self.grid:\n # rows type and length\n if type(row) is not list or len(row) != 9:\n return False\n\n # numbers type and range\n for number in row:\n if type(number) is not int or number not in range(0, 10):\n return False\n\n # duplicates in row, column, box\n for i in range(1, 10):\n row = self.get_row(i)\n column = self.get_column(i)\n box = self.get_box(i)\n\n for number in range(1, 10):\n if row.count(number) > 1 or column.count(number) > 1 or \\\n box.count(number) > 1:\n return False\n\n return True", "def check_unique_results(db_results):\n result_count = len(db_results)\n unique_key_column_values = set([row[0] for row in db_results])\n key_count = len(unique_key_column_values)\n\n if result_count > key_count:\n raise ValueError('Values in the first column are not unique. Try \"add_list_info\" function instead.')\n\n return", "def test_param_invalid_output_array_param_length(self):\n\t\twith self.assertRaises(IndexError):\n\t\t\tresult = arrayfunc.takewhile('==', self.data, self.dataempty, 100.0)", "def test_check_num_elements_wrong_ndomains(self):\n component_grids = _ComponentGrids(self._GRID_LONGNAME)\n # In the following, there should be 3 elements, but we only specify 2\n gridinfo = {\"GLC_DOMAIN_MESH\": \"foo:bar\"}\n\n self.assertRaisesRegex(\n CIMEError,\n \"Unexpected number of colon-delimited elements\",\n component_grids.check_num_elements,\n gridinfo,\n )", "def test_init_params_tuple_too_long(self):\n with self.assertRaises(ValueError):\n insightiq_api.Parameters([(2,3,4,5)])", "def check(data):\n tmp_len = len(data[0])\n row_num = 0\n for row in data:\n row_num += 1\n if len(row) != tmp_len:\n raise ImageError(\n \"row number {0} has different length from first row\".format(\n row_num),\n 'bad_row_length'\n )\n for pixel in row:\n if type(pixel) == tuple:\n if len(pixel) < 3 or len(pixel) > 4:\n raise ImageError(\n \"'{0}' is not a valid pixel tuple\".format(pixel),\n 'bad_pixel_length'\n )\n elif type(pixel) != int and not re.match('.*numpy\\.[u]?int(8|16|32|64)',\n str(type(pixel))):\n raise ImageError(\n \"'{0}' is not a valid pixel value\".format(pixel),\n 'bad_pixel_value'\n )", "def test_error_ignored_args(coordinates_small, data_small, region):\n # Define sample equivalent sources and fit against synthetic data\n eqs = EquivalentSourcesGB(window_size=500).fit(coordinates_small, data_small)\n # Build a target grid\n grid_coords = vd.grid_coordinates(region=region, shape=(4, 4), extra_coords=2e3)\n # Try to grid passing kwarg arguments that will be ignored\n msg = \"The 'bla' arguments are being ignored.\"\n with pytest.warns(FutureWarning, match=msg):\n eqs.grid(coordinates=grid_coords, bla=\"bla\")", "def test_error_invalid_probability_units(probability_above_cube, interpreter):\n probability_above_cube.units = \"no_unit\"\n with pytest.raises(ValueError, match=\"Expected units of 1 on probability data\"):\n interpreter.run(probability_above_cube)", "def test_input_dim_exceptions(sample_ds_1d):\n with pytest.raises(ValueError) as e:\n BatchGenerator(sample_ds_1d, input_dims={\"x\": 110})\n assert len(e) == 1", "def checkGridN(n, grid):\n\n numbers = []\n for a in range((n%3)*3, (n%3 + 1)*3):\n for b in range((n//3)*3, (n//3 + 1)*3):\n numbers.append(grid[b][a])\n\n for num in range(1,10):\n if numbers.count(num) != 1:\n return False\n return True", "def check_param_values(model, **user_param):\n model_param = model.param\n # Check that the appropriate number of params are provided\n if len(user_param) != len(model_param):\n raise ValueError(f\"Invalid model parameters, expected {len(model_param)} \"\n f\"but {len(user_param)} were given\")\n\n # Check that user-requested params have valid units and values\n for (key, allowed_params), user_param in zip(model_param.items(), user_param.values()):\n # If both have units, check that the user param value is valid. If valid, continue. Else, error\n if type(user_param) == Quantity and type(allowed_params) == Quantity:\n if get_physical_type(user_param.unit) != get_physical_type(allowed_params.unit):\n raise UnitTypeError(f\"Incorrect units {user_param.unit} provided for parameter {key}, \"\n f\"expected {allowed_params.unit}\")\n elif user_param.to(allowed_params.unit).value in allowed_params.value:\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # If one only one has units, then error\n elif (type(user_param) == Quantity) ^ (type(allowed_params) == Quantity):\n # User param has units, model param is unitless\n if type(user_param) == Quantity:\n raise ValueError(f\"Invalid units {user_param.unit} for parameter {key} provided, expected None\")\n else:\n raise ValueError(f\"Missing units for parameter {key}, expected {allowed_params.unit}\")\n\n # Check that unitless user param value is valid. If valid, continue. Else, Error\n elif user_param in allowed_params:\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")", "def check_var_values_num(self):\n color_error = \"red\"\n color_valid = \"black\"\n valid = True\n error_message = \"\"\n message_sp_flux = \"The unit is already provided in the Sources (Fixed Parameter Configuration).\"\n\n for key in self.var_param_entries_num.keys():\n entries = self.var_param_entries_num[key]\n entry_min = entries[1]\n entry_max = entries[2]\n entry_steps = entries[3]\n entry_units = entries[4]\n\n entry_min.config(highlightbackground=color_valid, highlightcolor=color_valid, highlightthickness=0)\n entry_max.config(highlightbackground=color_valid, highlightcolor=color_valid, highlightthickness=0)\n entry_steps.config(highlightbackground=color_valid, highlightcolor=color_valid, highlightthickness=0)\n entry_units.config(highlightbackground=color_valid, highlightcolor=color_valid, highlightthickness=0)\n\n min_value = entry_min.get()\n max_value = entry_max.get()\n steps_value = entry_steps.get()\n units_value = entry_units.get()\n\n if not min_value.replace('.', '', 1).isdigit():\n if \"- Min must be numeric\" not in error_message:\n error_message += \"- Min must be numeric\" + \"\\n\"\n valid = False\n entry_min.config(highlightbackground=color_error, highlightcolor=color_error, highlightthickness=2)\n if not max_value.replace('.', '', 1).isdigit():\n if \"- Max must be numeric\" not in error_message:\n error_message += \"- Max must be numeric\" + \"\\n\"\n valid = False\n entry_max.config(highlightbackground=color_error, highlightcolor=color_error, highlightthickness=2)\n if not steps_value.isdigit():\n if \"- Steps must be a positive integer\" not in error_message:\n error_message += \"- Steps must be a positive integer\" + \"\\n\"\n valid = False\n entry_steps.config(highlightbackground=color_error, highlightcolor=color_error, highlightthickness=2)\n elif int(steps_value) < 2:\n if \"- Steps must be positive and greater than 1\" not in error_message:\n error_message += \"- Steps must be positive and greater than 1\" + \"\\n\"\n valid = False\n entry_steps.config(highlightbackground=color_error, highlightcolor=color_error, highlightthickness=2)\n if \"sp_flux\" in self.var_param_entries_num.keys():\n if units_value:\n if \"- Units for sp_flux must be empty. \" + message_sp_flux not in error_message:\n error_message += \"- Units for sp_flux must be empty. \" + message_sp_flux\n valid = False\n entry_units.config(highlightbackground=color_error, highlightcolor=color_error,\n highlightthickness=2)\n\n if not valid:\n self.valid_num = False\n tkMessageBox.showerror(\"Invalid Input\", error_message)\n else:\n self.valid_num = True", "def test_out_of_range(self):\n self.assert_initialize_driver()\n self.assert_set_exception(Parameter.CYCLE_TIME, 14)\n self.assert_set_exception(Parameter.CYCLE_TIME, 3601)\n\n # verify we can set read/write parameters\n self.assert_set(Parameter.CYCLE_TIME, 30)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function needs to be called manually to test the interactive plotting. from test_NB import interactive_plot_tests interactive_plot_tests()
def interactive_plot_tests(): lines = ["OII3726_29", "Hgamma", "OIII4363", "Hbeta", "OIII5007", "NI5200", "OI6300", "Halpha", "NII6583", "SII6716", "SII6731"] obs_fluxes = [1.22496, 0.3991, 0.00298, 1.0, 0.44942, 0.00766, 0.02923, 4.25103, 1.65312, 0.45598, 0.41482] obs_errs = [0.00303, 0.00142, 0.00078, 0.0017, 0.0012, 0.00059, 0.00052, 0.00268, 0.00173, 0.00102, 0.00099] obs_wavelengths = [3727.3, 4340.5, 4363.2, 4861.3, 5006.8, 5200.3, 6300.3, 6562.8, 6583.2, 6716.4, 6730.8] NB_Model_1 = NB_Model("HII", grid_params=None, line_list=lines, interpd_grid_shape=[50, 70, 50], grid_error=0.35) kwargs = {"deredden": True, "propagate_dered_errors": True, "obs_wavelengths": obs_wavelengths, "prior":[("SII6716","SII6731")], "plot_configs": [{"table_on_plot": True, "legend_fontsize": 5}]*4, } Result = NB_Model_1(obs_fluxes, obs_errs, lines, **kwargs) # Test both ways to make an interactive plot Result.Plotter.interactive(Result.Posterior) Result.Prior.show(Result.Plotter)
[ "def test_plot_instance_components(browser_backend):\n raw = _get_raw()\n picks = _get_picks(raw)\n ica = ICA(noise_cov=read_cov(cov_fname), n_components=2)\n with pytest.warns(RuntimeWarning, match=\"projection\"):\n ica.fit(raw, picks=picks)\n ica.exclude = [0]\n fig = ica.plot_sources(raw, title=\"Components\")\n keys = (\n \"home\",\n \"home\",\n \"end\",\n \"down\",\n \"up\",\n \"right\",\n \"left\",\n \"-\",\n \"+\",\n \"=\",\n \"d\",\n \"d\",\n \"pageup\",\n \"pagedown\",\n \"z\",\n \"z\",\n \"s\",\n \"s\",\n \"b\",\n )\n for key in keys:\n fig._fake_keypress(key)\n x = fig.mne.traces[0].get_xdata()[0]\n y = fig.mne.traces[0].get_ydata()[0]\n fig._fake_click((x, y), xform=\"data\")\n fig._click_ch_name(ch_index=0, button=1)\n fig._fake_keypress(\"escape\")\n browser_backend._close_all()\n\n epochs = _get_epochs()\n fig = ica.plot_sources(epochs, title=\"Components\")\n for key in keys:\n fig._fake_keypress(key)\n # Test a click\n x = fig.mne.traces[0].get_xdata()[0]\n y = fig.mne.traces[0].get_ydata()[0]\n fig._fake_click((x, y), xform=\"data\")\n fig._click_ch_name(ch_index=0, button=1)\n fig._fake_keypress(\"escape\")", "def main_simple_test():\n fig, axes, axesImage = generate_test_image()\n\n list_of_objs = generate_list_of_objects_for_axes(axes, axesImage)\n\n #Add one more object\n obj = DragLine() # call W/O parameters => object will be initialized at first mouse click\n add_obj_to_axes(obj, axes, list_of_objs)\n\n plt.get_current_fig_manager().window.move(50, 10)\n plt.show()", "def main_full_test():\n fig, axes, axesImage = generate_test_image()\n\n list_of_objs = generate_list_of_objects_for_axes(axes, axesImage)\n\n t = TestDragLine(fig, axes)\n t .set_list_of_objs(list_of_objs)\n\n plt.get_current_fig_manager().window.move(50, 10)\n plt.show()", "def turn_on_interactive_and_show():\n plt.ion()\n plt.show()", "def set_ipynb():\n ### TICKS\n tick_maj_size = 10\n tick_maj_pad = 5\n tick_min_size = 5\n tick_min_pad = 5\n tick_labelsize = 14\n tick_dict = {'major.size':tick_maj_size, 'major.pad':tick_maj_pad,\n 'minor.size':tick_min_size, 'minor.pad':tick_min_pad,\n 'labelsize':tick_labelsize}\n pl.rc('xtick', **tick_dict)\n pl.rc('ytick', **tick_dict)\n linewidth = 1\n axes_labelsize = 16\n ### AXES\n pl.rc('axes', lw=linewidth, labelsize=axes_labelsize)\n ### LINES\n pl.rc('lines', lw=linewidth, color='k', mew=linewidth) \n pl.rc('legend', numpoints=1, scatterpoints=1, frameon=False)\n pl.rc('patch', edgecolor='None')\n ### FIGURE\n pl.rc('figure', figsize=(8,6))\n pl.rc('figure.subplot', left=0.15, bottom=0.15, top=0.95, right=0.95)\n\n pl.rc('mathtext', default='regular')", "def testPlotWidgetWithItems(self):\n self.plot.addImage(((0, 1), (2, 3)), legend='image')\n self.plot.addScatter((3, 2, 1), (0, 1, 2), (0, 1, 2), legend='scatter')\n self.plot.addCurve((0, 1, 2), (0, 1, 2), legend='curve')\n self.plot.setActiveCurve('curve')\n\n selection = self.plot.selection()\n self.assertIsNotNone(selection.getCurrentItem())\n selected = selection.getSelectedItems()\n self.assertEqual(len(selected), 3)\n self.assertIn(self.plot.getActiveCurve(), selected)\n self.assertIn(self.plot.getActiveImage(), selected)\n self.assertIn(self.plot.getActiveScatter(), selected)", "def test_plotting():\n # To test that the plotting works\n current_test_parameters_1 = dict(temperature_degC=450, force_N=5, pressure_MPa=0.34, speed_mmpersecond=50, lubricant_thickness_micrometres=25, lubricant_volume_gperm2=23.2,\n mu0_lubricated = 1.69073, Q_lubricated = 9141.50683,\n mu0_dry = 10.94225, Q_dry = 9368.85126, eta_0 = 0.12,\n Q_eta = 11930, lambda_1 = 40, lambda_2 = 1.5, c = 0.00847,\n k_1 = 1.52, k_2 = 2.77, k_3 = 4.8)\n\n plotting_range = np.linspace(0, 100, 420)\n ans = solve_all(plotting_range, current_test_parameters_1['lubricant_thickness'], current_test_parameters_1, time_input=False)\n plt.plot(plotting_range, ans)\n plt.xlabel(\"Sliding Distance / mm\")\n plt.ylabel(\"Coefficient of Friction\")\n plt.xlim(0)\n plt.ylim(0)\n plt.grid()\n plt.show()", "def outflow_test_plot_nii(comp_dict_outflow,comp_dict_no_outflow,run_dir):\n\n\t# Creat plot window and axes\n\tfig = plt.figure(figsize=(14,11)) \n\tgs = gridspec.GridSpec(9,1)\n\tax1 = fig.add_subplot(gs[0:3,0]) # No outflow\n\tax2 = fig.add_subplot(gs[3:4,0]) # No outflow residuals\n\tax3 = fig.add_subplot(gs[5:8,0]) # Outflow\n\tax4 = fig.add_subplot(gs[8:9,0]) # Outflow residuals\n\tgs.update(wspace=0.0, hspace=0.0) # set the spacing between axes. \n\t# No outflow model (ax1,ax2)\n\tnorm = np.median(comp_dict_no_outflow['data']['comp'])\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['data']['comp'] , color='xkcd:white' , linewidth=0.5, linestyle='-' , label='Data' ) \n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['model']['comp'] , color='xkcd:red' , linewidth=1.0, linestyle='-' , label='Model' ) \n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['host_galaxy']['comp'] , color='xkcd:lime green' , linewidth=1.0, linestyle='-' , label='Galaxy' )\n\tif ('power' in comp_dict_no_outflow):\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['power']['comp'] , color='xkcd:orange red' , linewidth=1.0, linestyle='--', label='AGN Cont.' )\n\tif ('na_feii_template' in comp_dict_no_outflow) and ('br_feii_template' in comp_dict_no_outflow):\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_feii_template']['comp'], color='xkcd:yellow' , linewidth=1.0, linestyle='-' , label='Na. FeII' )\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['br_feii_template']['comp'], color='xkcd:orange' , linewidth=1.0, linestyle='-' , label='Br. FeII' )\n\telif ('F_feii_template' in comp_dict_no_outflow) and ('S_feii_template' in comp_dict_no_outflow) and ('G_feii_template' in comp_dict_no_outflow) and ('Z_feii_template' in comp_dict_no_outflow):\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['F_feii_template']['comp'], color='xkcd:yellow' , linewidth=1.0, linestyle='-' , label='F-transition FeII' )\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['S_feii_template']['comp'], color='xkcd:mustard' , linewidth=1.0, linestyle='-' , label='S_transition FeII' )\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['G_feii_template']['comp'], color='xkcd:orange' , linewidth=1.0, linestyle='-' , label='G_transition FeII' )\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['Z_feii_template']['comp'], color='xkcd:rust' , linewidth=1.0, linestyle='-' , label='Z_transition FeII' )\n\tif ('br_Ha' in comp_dict_no_outflow):\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['br_Ha']['comp'] , color='xkcd:turquoise' , linewidth=1.0, linestyle='-' , label='Br. H-alpha' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_Ha_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' , label='Core comp.' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_nii6549_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_nii6585_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_sii6718_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_sii6732_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax1.axvline(6549.86, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax1.axvline(6564.61, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax1.axvline(6585.27, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax1.axvline(6718.29, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\tax1.axvline(6732.67, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\t# ax1.plot(comp_dict_no_outflow['wave']['comp'], 1*comp_dict_no_outflow['noise']['comp'], color='xkcd:dodger blue' , linewidth=0.5, linestyle='--')\n\t# ax1.plot(comp_dict_no_outflow['wave']['comp'], 2*comp_dict_no_outflow['noise']['comp'], color='xkcd:lime green' , linewidth=0.5, linestyle='--')\n\t# ax1.plot(comp_dict_no_outflow['wave']['comp'], 3*comp_dict_no_outflow['noise']['comp'], color='xkcd:orange red' , linewidth=0.5, linestyle='--')\n\tax1.set_ylabel(r'$f_\\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\\rm{\\AA}^{-1}$)')\n\tax1.set_xticklabels([])\n\tax1.legend(loc='upper left',fontsize=6)\n\tax1.set_xlim(np.min(comp_dict_outflow['wave']['comp']),np.max(comp_dict_outflow['wave']['comp']))\n\tax1.set_ylim(0.0,np.max(comp_dict_no_outflow['model']['comp'])+3*np.median(comp_dict_no_outflow['noise']['comp']))\n\tax1.set_title('No Outflow Model')\n\t# No Outflow Residuals\n\tax2.plot(comp_dict_no_outflow['wave']['comp'],3*(comp_dict_no_outflow['data']['comp']-comp_dict_no_outflow['model']['comp']), color='xkcd:white' , linewidth=0.5, linestyle='-')\n\tax2.axvline(6549.86, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax2.axvline(6564.61, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax2.axvline(6585.27, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax2.axvline(6718.29, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\tax2.axvline(6732.67, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\tax2.axhline(0.0, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax2.plot(comp_dict_no_outflow['wave']['comp'], 3*1*comp_dict_no_outflow['noise']['comp'], color='xkcd:bright aqua' , linewidth=0.5, linestyle='-')\n\t# ax2.plot(comp_dict_no_outflow['wave']['comp'], 3*2*comp_dict_no_outflow['noise']['comp'], color='xkcd:lime green' , linewidth=0.5, linestyle='--')\n\t# ax2.plot(comp_dict_no_outflow['wave']['comp'], 3*3*comp_dict_no_outflow['noise']['comp'], color='xkcd:orange red' , linewidth=0.5, linestyle='--')\n\tax2.set_xlabel(r'$\\lambda_{\\rm{rest}}$ ($\\rm{\\AA}$)')\n\tax2.set_ylabel(r'$\\Delta f_\\lambda$')\n\tax2.set_xlim(np.min(comp_dict_outflow['wave']['comp']),np.max(comp_dict_outflow['wave']['comp']))\n\tax2.set_ylim(0.0-9*np.std(comp_dict_no_outflow['resid']['comp']),ax1.get_ylim()[1])\n # Outlfow models (ax3,ax4)\n\tnorm = np.median(comp_dict_outflow['data']['comp'])\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['data']['comp'] , color='xkcd:white' , linewidth=0.5, linestyle='-' , label='Data' ) \n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['model']['comp'] , color='xkcd:red' , linewidth=1.0, linestyle='-' , label='Model' ) \n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['host_galaxy']['comp'] , color='xkcd:lime green' , linewidth=1.0, linestyle='-' , label='Galaxy' )\n\tif ('power' in comp_dict_outflow):\n\t\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['power']['comp'] , color='xkcd:orange red' , linewidth=1.0, linestyle='--', label='AGN Cont.' )\n\tif ('na_feii_template' in comp_dict_outflow) and ('br_feii_template' in comp_dict_outflow):\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_feii_template']['comp'], color='xkcd:yellow' , linewidth=1.0, linestyle='-' , label='Na. FeII' )\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['br_feii_template']['comp'], color='xkcd:orange' , linewidth=1.0, linestyle='-' , label='Br. FeII' )\n\telif ('F_feii_template' in comp_dict_outflow) and ('S_feii_template' in comp_dict_outflow) and ('G_feii_template' in comp_dict_outflow) and ('Z_feii_template' in comp_dict_outflow):\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['F_feii_template']['comp'], color='xkcd:yellow' , linewidth=1.0, linestyle='-' , label='F-transition FeII' )\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['S_feii_template']['comp'], color='xkcd:mustard' , linewidth=1.0, linestyle='-' , label='S_transition FeII' )\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['G_feii_template']['comp'], color='xkcd:orange' , linewidth=1.0, linestyle='-' , label='G_transition FeII' )\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['Z_feii_template']['comp'], color='xkcd:rust' , linewidth=1.0, linestyle='-' , label='Z_transition FeII' )\n\tif ('br_Ha' in comp_dict_outflow):\n\t\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['br_Ha']['comp'] , color='xkcd:turquoise' , linewidth=1.0, linestyle='-' , label='Br. H-alpha' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_Ha_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' , label='Core comp.' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_nii6549_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_nii6585_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_sii6718_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_sii6732_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_Ha_outflow']['comp'] , color='xkcd:magenta' , linewidth=1.0, linestyle='-', label='Outflow comp.')\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_nii6549_outflow']['comp'], color='xkcd:magenta' , linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_nii6585_outflow']['comp'], color='xkcd:magenta' , linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_sii6718_outflow']['comp'], color='xkcd:magenta' , linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_sii6732_outflow']['comp'], color='xkcd:magenta' , linewidth=1.0, linestyle='-' )\n\tax3.axvline(6549.86, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax3.axvline(6564.61, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax3.axvline(6585.27, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax3.axvline(6718.29, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\tax3.axvline(6732.67, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\t# ax3.plot(comp_dict_outflow['wave']['comp'], 1*comp_dict_outflow['noise']['comp'], color='xkcd:dodger blue' , linewidth=0.5, linestyle='--')\n\t# ax3.plot(comp_dict_outflow['wave']['comp'], 2*comp_dict_outflow['noise']['comp'], color='xkcd:lime green' , linewidth=0.5, linestyle='--')\n\t# ax3.plot(comp_dict_outflow['wave']['comp'], 3*comp_dict_outflow['noise']['comp'], color='xkcd:orange red' , linewidth=0.5, linestyle='--')\n\tax3.set_ylabel(r'$f_\\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\\rm{\\AA}^{-1}$)')\n\tax3.set_xticklabels([])\n\tax3.legend(loc='upper left',fontsize=6)\n\tax3.set_xlim(np.min(comp_dict_outflow['wave']['comp']),np.max(comp_dict_outflow['wave']['comp']))\n\tax3.set_ylim(0.0,np.max(comp_dict_outflow['model']['comp'])+3*np.median(comp_dict_outflow['noise']['comp']))\n\tax3.set_title('Outflow Model')\n\t# Outflow Residuals\n\tax4.plot(comp_dict_outflow['wave']['comp'],3*(comp_dict_outflow['data']['comp']-comp_dict_outflow['model']['comp']), color='xkcd:white' , linewidth=0.5, linestyle='-')\n\tax4.axvline(6549.86, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axvline(6564.61, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axvline(6585.27, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axvline(6718.29, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axvline(6732.67, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axhline(0.0, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.plot(comp_dict_outflow['wave']['comp'], 3*1*comp_dict_outflow['noise']['comp'], color='xkcd:bright aqua' , linewidth=0.5, linestyle='-')\n\t# ax4.plot(comp_dict_outflow['wave']['comp'], 3*2*comp_dict_outflow['noise']['comp'], color='xkcd:lime green' , linewidth=0.5, linestyle='--')\n\t# ax4.plot(comp_dict_outflow['wave']['comp'], 3*3*comp_dict_outflow['noise']['comp'], color='xkcd:orange red' , linewidth=0.5, linestyle='--')\n\tax4.set_xlabel(r'$\\lambda_{\\rm{rest}}$ ($\\rm{\\AA}$)')\n\tax4.set_ylabel(r'$\\Delta f_\\lambda$')\n\tax4.set_xlim(np.min(comp_dict_outflow['wave']['comp']),np.max(comp_dict_outflow['wave']['comp']))\n\tax4.set_ylim(0.0-9*np.std(comp_dict_outflow['resid']['comp']),ax3.get_ylim()[1])\n \n\tfig.tight_layout()\n\tplt.savefig(run_dir+'outflow_test.pdf',fmt='pdf',dpi=150)\n\n\tplt.close()\n\t# Collect garbage\n\tdel ax1\n\tdel ax2\n\tdel ax3\n\tdel ax4\n\tdel fig \n\tdel comp_dict_outflow\n\tdel comp_dict_no_outflow\n\tgc.collect()\n\n\treturn None", "def test_plotexample(self):\n for directed in (True, False):\n net = example_network(directed=directed, bigger=True)\n for full in (True, False):\n if full:\n target = \"example_plot_{0}.png\".format(\n \"directed\" if directed else \"undirected\"\n )\n net.plot(target)\n else:\n net.cxneighborhood(\"donald_duck\", plot=\"pdf\")", "def test_plot_fit_not_implemented():\n plot_fit(display=False, fittype='not implemented')", "def test_learning_curve_plots(self):\n # Test to validate learning curve output\n self.make_learning_curve_data()\n\n config_template_path = config_dir / \"test_learning_curve.template.cfg\"\n config_path = fill_in_config_paths(config_template_path)\n\n # run the learning curve experiment\n run_configuration(config_path, quiet=True, local=True)\n outprefix = \"test_learning_curve\"\n\n # make sure that the four PNG files (two per featureset) are created\n for featureset_name in [\"test_learning_curve1\", \"test_learning_curve2\"]:\n path_score = output_dir / f\"{outprefix}_{featureset_name}.png\"\n path_time = output_dir / f\"{outprefix}_{featureset_name}_times.png\"\n self.assertTrue(path_score.exists())\n self.assertTrue(path_time.exists())", "def experiment(self, x_train, x_test, y_train, y_test, **kwargs):\n\n print('\\n--------------------------')\n self.plot_model_complexity(x_train, y_train, **kwargs)\n self.plot_learning_curve(x_train, y_train, **kwargs)\n self.fit(x_train, y_train)\n self.evaluate(x_test, y_test)", "def showPlot2():\n title(\"Clean time for 25x25 vs Number of Robots\")\n xlabel(\"Number of Robots\")\n ylabel(\"Average time\")\n means = []\n for i in range(1, 11):\n means.append(runSimulation(i, 1.0, 25, 25, 0.75, 30, Robot, False))\n num_robots = []\n for i in range(1,11):\n num_robots.append(i)\n plot(num_robots, means)", "def test_plotting():\n # To test that the plotting works\n current_test_parameters_1 = dict(T=250, F=5, P=0.34, v=50, h0=25, V=23.2,\n mu0_lubricated = 1.69073, Q_lubricated = 9141.50683,\n mu0_dry = 10.94225, Q_dry = 9368.85126, eta_0 = 0.12,\n Q_eta = 11930, lambda_1 = 20, lambda_2 = 1.1, c = 0.012,\n k_1 = 2.05, k_2 = 0.7, k_3 = 5.3)\n\n plotting_range = np.linspace(0, 1000000, 420)\n ans = solve_all(plotting_range, current_test_parameters_1['h0'], current_test_parameters_1, time_input=False)\n plt.plot(plotting_range, ans)\n plt.xlabel(\"Sliding Distance / mm\")\n plt.ylabel(\"Coefficient of Friction\")\n plt.xlim(0)\n plt.ylim(0)\n plt.grid()\n plt.show()", "def performAnalysis(self) :\n self.lowIncomePlot()\n self.boxPlot()", "def test_notebook(path):\n import nbconvert\n print('Running ' + path + ' ... ', end='')\n sys.stdout.flush()\n\n # Load notebook, convert to python\n e = nbconvert.exporters.PythonExporter()\n code, __ = e.from_filename(path)\n\n # Remove coding statement, if present\n ipylines = ['ipython', 'show(']\n code = '\\n'.join([x for x in code.splitlines() if not 'ipython' in x])\n for x in code.splitlines():\n if not any(s in ipylines for s in x):\n code += '\\n'.join([x])\n # print(code)\n\n # Tell matplotlib not to produce any figures\n env = os.environ.copy()\n env['MPLBACKEND'] = 'Template'\n\n # Run in subprocess\n start = time.time()\n cmd = [sys.executable, '-c', code]\n try:\n p = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env\n )\n stdout, stderr = p.communicate()\n # TODO: Use p.communicate(timeout=3600) if Python3 only\n if p.returncode != 0:\n # Show failing code, output and errors before returning\n print('ERROR')\n # print('-- script ' + '-' * (79 - 10))\n # for i, line in enumerate(code.splitlines()):\n # j = str(1 + i)\n # print(j + ' ' * (5 - len(j)) + line)\n print('-- stdout ' + '-' * (79 - 10))\n print(stdout)\n print('-- stderr ' + '-' * (79 - 10))\n print(stderr)\n print('-' * 79)\n return False\n except KeyboardInterrupt:\n p.terminate()\n stop = time.time()\n print('ABORTED after', round(stop-start,4), \"s\")\n sys.exit(1)\n\n # Successfully run\n stop = time.time()\n print('ok. Run took ', round(stop-start,4), \"s\")\n return True", "def test_learning_curve_plots_with_objectives(self):\n # Test to validate learning curve output\n self.make_learning_curve_data()\n\n config_template_path = config_dir / \"test_learning_curve.template.cfg\"\n config_path = fill_in_config_paths(config_template_path)\n\n # run the learning curve experiment\n run_configuration(config_path, quiet=True, local=True)\n outprefix = \"test_learning_curve\"\n\n # make sure that the four PNG files (two per featureset) are created\n for featureset_name in [\"test_learning_curve1\", \"test_learning_curve2\"]:\n path_score = output_dir / f\"{outprefix}_{featureset_name}.png\"\n path_time = output_dir / f\"{outprefix}_{featureset_name}_times.png\"\n self.assertTrue(path_score.exists())\n self.assertTrue(path_time.exists())", "def plot_iteration(line: str):\n args = parse_argstring(plot_iteration, line)\n\n items = []\n\n from ipywidgets.widgets.interaction import show_inline_matplotlib_plots\n\n global __instances, __instantiated_experiments\n for exp_instance, exp_config in zip(__instances, __instantiated_experiments):\n out = Output()\n items.append(out)\n with out:\n # clear_output(wait=True)\n figures = __iteration_plot_functions[args.plotter_name](exp_instance, args.args)\n show_inline_matplotlib_plots()\n if args.save_figures:\n if args.format is None:\n args.format = plt.rcParams['savefig.format']\n os.makedirs('plots/{}'.format(exp_config['name']), exist_ok=True)\n for i, f in enumerate(figures):\n filename = 'plots/{}/{}figure_{}.{}'.format(exp_config['name'], args.prefix, i, args.format)\n if args.format == 'tikz':\n try:\n from matplotlib2tikz import save as tikz_save\n with Output():\n tikz_save(filename, figureheight='\\\\figureheight', figurewidth='\\\\figurewidth')\n except ModuleNotFoundError:\n warnings.warn('Saving figure as tikz requires the module matplotlib2tikz.')\n else:\n f.savefig(filename, format=args.format)\n\n if len(items) > 1:\n tabs = Tab(children=items)\n for i, exp in enumerate(__instantiated_experiments):\n if args.tab_title:\n if (args.tab_title[0] == args.tab_title[-1]) and args.tab_title.startswith((\"'\", '\"')):\n selectors = args.tab_title[1:-1]\n else:\n selectors = args.tab_title\n selectors = selectors.split(' ')\n values = [reduce(lambda a, b: a[b], [exp['params'], *selector.split('.')]) for selector in\n selectors]\n tabs.set_title(i, ' '.join(map(str, values)))\n else:\n tabs.set_title(i, '...' + exp['name'][-15:])\n display(tabs)\n elif len(items) == 1:\n return items[0]\n else:\n warnings.warn('No plots available for {} with args {}'.format(args.plotter_name, args.args))", "def test_plots(self):\n # Make CLEAR obs\n clear = specialsoss.SossExposure(self.uncal)\n\n # No results\n clear.plot_results()\n\n # Extract some results\n clear.extract('sum', 'uncal')\n\n # Test plot_frames\n fig = clear.plot_frames(draw=False)\n\n # Test result plot\n fig = clear.plot_results(draw=False)\n\n # Bad name\n self.assertRaises(ValueError, clear.plot_results, name='FOO', draw=True)\n\n # Bad dtype\n self.assertRaises(ValueError, clear.plot_results, dtype='FOO', draw=True)\n\n # Test comparison plot\n fig = clear.compare_results(dtype='counts', draw=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Commands to migrate records from legacy.
def migrate():
[ "def Migrate(self):\n\n # TODO(amoser): This doesn't do anything yet.\n pass", "def record(recid):\n click.echo(f\"Migrating record {recid} from INSPIRE legacy\")\n migrate_record_from_legacy(recid)", "def migrate() -> None:\n run_migration()", "def migrate(params='', do_backup=False):\n # if do_backup:\n # dump_db()\n return django_cmd('migrate --noinput {}'.format(params))", "def migrate():\n print(\"database\")", "def migrate(self, source):\n raise NotImplementedError", "def migrate_tasks(source, dest, migrate=..., app=..., queues=..., **kwargs):\n ...", "def upgrade_legacy(self):\n # Transform to version 1\n if not hasattr(self, 'version'):\n self._models = [\n self.conv1,\n self.conv2,\n self.conv3,\n self.conv4\n ]\n\n self.version = '1'", "def migration(*args):\n return func(*args)", "def syncdb_and_migrate():\n output = manage('syncdb --noinput')\n # check for south message in output\n if \"use ./manage.py to migrate these\" in output:\n output = manage('migrate')", "def migrate(self, epoch: int):\n ordered_players = self.migration_order_policy(self.players)\n\n for p in ordered_players:\n p.migrate()", "def migrate(djangoenv='develop'):\n with shell_env(DJANGOENV=djangoenv):\n _manage('migrate --noinput')", "def migrate_db(db_in, db_out):\n # first, create a staging database\n if not call_process('createdb {}'.format(db_out)):\n print('Error creating database {}, exiting...'.format(db_out))\n sys.exit(1)\n\n # next, dump the production database schemas\n if not call_process('pg_dump --schema-only --format tar -f ../data/schemas.db {}'.format(db_in)):\n print('Error dumping schemas for {}, exiting...'.format(db_in))\n sys.exit(1)\n\n # next, load the schemas into the staging db\n if not call_process('pg_restore -d {} --schema-only ../data/schemas.db'.format(db_out)):\n print('Error loading schemas for {}, exiting...'.format(db_out))\n sys.exit(1)\n\n # establish connection with staging db\n staging_conn = connect_to_psql(db_out)\n staging_cursor = staging_conn.cursor()\n\n # Next, connect to the production database\n prod_conn = connect_to_psql(db_in)\n prod_cursor = prod_conn.cursor()\n\n # generic add data string\n sql = 'INSERT INTO {} VALUES ({})'\n\n # clean and migrate the data\n for db in ['account', 'address', 'statement']:\n prod_cursor.execute('SELECT * FROM {}'.format(db))\n \n for row in prod_cursor.fetchall():\n # de-identify the data\n if db == 'account':\n # remove the full name\n new_row = redact(row, 1)\n if db == 'address':\n # remove the street address\n new_row = redact(row, 2)\n elif db == 'statement':\n new_row = row\n \n # format the generic sql command for this row and table\n cmd = sql.format(db, ', '.join(['%s'] * len(new_row)))\n staging_cursor.execute(cmd, new_row)\n\n staging_conn.commit()\n\n # close the connection to the databases\n staging_conn.close()\n prod_conn.close()", "def migrate_defined_models(self):\n # find any models referred to in old models.yaml\n conf = OmegaConf.load(self.root_directory / \"configs/models.yaml\")\n\n for model_name, stanza in conf.items():\n try:\n passthru_args = {}\n\n if vae := stanza.get(\"vae\"):\n try:\n passthru_args[\"vae\"] = str(self._vae_path(vae))\n except Exception as e:\n logger.warning(f'Could not find a VAE matching \"{vae}\" for model \"{model_name}\"')\n logger.warning(str(e))\n\n if config := stanza.get(\"config\"):\n passthru_args[\"config\"] = config\n\n if description := stanza.get(\"description\"):\n passthru_args[\"description\"] = description\n\n if repo_id := stanza.get(\"repo_id\"):\n logger.info(f\"Migrating diffusers model {model_name}\")\n self.migrate_repo_id(repo_id, model_name, **passthru_args)\n\n elif location := stanza.get(\"weights\"):\n logger.info(f\"Migrating checkpoint model {model_name}\")\n self.migrate_path(Path(location), model_name, **passthru_args)\n\n elif location := stanza.get(\"path\"):\n logger.info(f\"Migrating diffusers model {model_name}\")\n self.migrate_path(Path(location), model_name, **passthru_args)\n\n except KeyboardInterrupt:\n raise\n except Exception as e:\n logger.error(str(e))", "def migrate_1to2(store):\n\n # migrate metadata\n from zarr import meta_v1\n meta = meta_v1.decode_metadata(store['meta'])\n del store['meta']\n\n # add empty filters\n meta['filters'] = None\n\n # migration compression metadata\n compression = meta['compression']\n if compression is None or compression == 'none':\n compressor_config = None\n else:\n compression_opts = meta['compression_opts']\n codec_cls = codec_registry[compression]\n if isinstance(compression_opts, dict):\n compressor = codec_cls(**compression_opts)\n else:\n compressor = codec_cls(compression_opts)\n compressor_config = compressor.get_config()\n meta['compressor'] = compressor_config\n del meta['compression']\n del meta['compression_opts']\n\n # store migrated metadata\n store[array_meta_key] = encode_array_metadata(meta)\n\n # migrate user attributes\n store[attrs_key] = store['attrs']\n del store['attrs']", "def migrate(self, irc, msg, args, vmname, hostname):\n username = self.user\n password = self.password\n vcenter = self.vcenter\n\n try:\n si = SmartConnect(host=vcenter, user=username, pwd=password, port=443)\n except:\n err_text = 'Error connecting to {0}'.format(vcenter)\n log.info(err_text)\n irc.reply(err_text)\n return\n\n if hostname:\n try:\n host = vmutils.get_host_by_name(si, hostname)\n hostname = host.name\n except:\n irc.reply('{0} not found'.format(hostname))\n return\n else:\n # hostname was not passed\n all_hosts = vmutils.get_hosts(si)\n host = vmutils.get_host_by_name(si, random.choice(all_hosts.values()))\n hostname = host.name\n\n # Finding source VM\n try:\n vm = vmutils.get_vm_by_name(si, vmname)\n except:\n irc.reply('{0} not found.'.format(vmname))\n return\n\n # relocate spec, to migrate to another host\n # this can do other things, like storage and resource pool\n # migrations\n relocate_spec = vim.vm.RelocateSpec(host=host)\n\n # does the actual migration to host\n vm.Relocate(relocate_spec)\n irc.reply('Migrating {0} to {1}'.format(vmname, hostname))\n\n Disconnect(si)", "def diff(self,old_db_col_names, new_db_col_names, batch_size=100000, steps=[\"content\",\"mapping\",\"reduce\",\"post\"], mode=None, exclude=[]):\n job = asyncio.ensure_future(self.diff_cols(old_db_col_names, new_db_col_names, batch_size, steps, mode, exclude))\n return job", "def migrateBulk(project, from_type, to_type):\n # Get field names from both types.\n from_fields = [str(field).rsplit('.', 1)[1] for field in from_type._meta.fields]\n to_fields = [str(field).rsplit('.', 1)[1] for field in to_type._meta.fields]\n\n # Find intersection between fields and remove id.\n fields = list(set(from_fields) & set(to_fields))\n fields.remove('id')\n\n # Get reverse lookup.\n if to_type == Media:\n reverse_lookup = {'media_polymorphic__isnull': True}\n elif to_type == Localization:\n reverse_lookup = {'localization_polymorphic__isnull': True}\n elif to_type == State:\n reverse_lookup = {'state_polymorphic__isnull': True}\n elif to_type == Leaf:\n reverse_lookup = {'leaf_polymorphic__isnull': True}\n elif to_type == Analysis:\n reverse_lookup = {'analysis_polymorphic__isnull': True}\n\n # Migrate objects in chunks.\n flat = []\n total = 0\n for obj in from_type.objects.filter(project=project).iterator():\n # Convert the objects and bulk create.\n if to_type.objects.filter(polymorphic=obj.id).exists():\n continue\n total+=1\n\n flat.append(convertObject(obj))\n if len(flat) == 1000:\n to_type.objects.bulk_create(flat)\n logger.info(f\"Migrated {total} records of {from_type.__name__} to {to_type.__name__}...\")\n flat = []\n\n if len(flat) > 0:\n to_type.objects.bulk_create(flat)\n logger.info(f\"Migrated {total} records of {from_type.__name__} to {to_type.__name__}...\")\n flat = []", "def handle_upgrade_1_1_to_1_2(self):\n try:\n logging.info(\"handle_upgrade_1_1_to_1_2: Start\")\n self.conn.create_function('name_from_uuid', 1, vmdk_utils.get_vm_name_by_uuid)\n # Alter vms table to add a new column name vm_name to store vm name\n # update all the existing records with the vm_name.\n # If vm_name is not resolved, it is populated as None and handled appropriately later.\n # Finally update the db schema version\n script = \"\"\"ALTER TABLE vms ADD COLUMN vm_name TEXT;\n UPDATE vms SET vm_name=name_from_uuid(vm_id);\n UPDATE versions SET major_ver = {}, minor_ver = {};\n \"\"\"\n sql_script = script.format(DB_MAJOR_VER, DB_MINOR_VER)\n self.conn.executescript(sql_script)\n\n logging.info(\"handle_upgrade_1_1_to_1_2: update vms table Done\")\n\n # update the tenants table to set \"default_datastore\" to \"__VM_DS\" if \"default_datastore\" is \"\"\n self.conn.execute(\"UPDATE OR IGNORE tenants SET default_datastore_url = ? where default_datastore_url = \\\"\\\"\",\n (auth_data_const.VM_DS_URL,))\n logging.info(\"handle_upgrade_1_1_to_1_2: update default_datastore in tenants table\")\n\n cur = self.conn.execute(\"SELECT * FROM tenants\")\n result = cur.fetchall()\n\n self.conn.execute(\"\"\"INSERT OR IGNORE INTO privileges(tenant_id, datastore_url, allow_create, max_volume_size, usage_quota)\n SELECT tenants.id, tenants.default_datastore_url, 1, 0, 0 FROM tenants\n \"\"\")\n logging.info(\"handle_upgrade_1_1_to_1_2: Insert privilege to default_datastore in privileges table\")\n\n cur = self.conn.execute(\"SELECT * FROM tenants WHERE id = ?\",\n (auth_data_const.DEFAULT_TENANT_UUID,)\n )\n\n result = cur.fetchall()\n logging.debug(\"handle_upgrade_1_1_to_1_2: Check DEFAULT tenant exist\")\n if result:\n # _DEFAULT tenant exists\n # insert full access privilege to \"__ALL_DS\" for \"_DEFAULT\" tenant\n all_ds_privilege = (auth_data_const.DEFAULT_TENANT_UUID, auth_data_const.ALL_DS_URL, 1, 0, 0)\n self.conn.execute(\"INSERT INTO privileges(tenant_id, datastore_url, allow_create, max_volume_size, usage_quota) VALUES (?, ?, ?, ?, ?)\",\n all_ds_privilege)\n logging.info(\"handle_upgrade_1_1_to_1_2: Insert privilege to __ALL_DS for _DEFAULT tenant in privileges table\")\n # remove access privilege to \"DEFAULT_DS\"\n self.conn.execute(\"DELETE FROM privileges WHERE tenant_id = ? AND datastore_url = ?\",\n [auth_data_const.DEFAULT_TENANT_UUID, auth_data_const.DEFAULT_DS_URL])\n logging.info(\"handle_upgrade_1_1_to_1_2: Remove privilege to _DEFAULT_DS for _DEFAULT tenant in privileges table\")\n self.conn.commit()\n return None\n except sqlite3.Error as e:\n error_msg = \"Error when upgrading auth DB table({})\".format(str(e))\n logging.error(\"handle_upgrade_1_1_to_1_2. %s\", error_msg)\n raise DbUpgradeError(self.db_path, error_msg)", "def success(self, migration):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Migrate the records in the provided file. The file can be an (optionallygzipped) XML file containing MARCXML, or a prodsync tarball.
def migrate_file(file_name, mirror_only=False, force=False, wait=False): halt_if_debug_mode(force=force) click.echo(f"Migrating records from file: {file_name}") populate_mirror_from_file(file_name) if not mirror_only: task = migrate_from_mirror() if wait: wait_for_all_tasks(task)
[ "def migrate(self,f):\n fs = f.status()\n if ( fs == FILE_CLOSED ):\n ret = self.reqDb.migrateFile(f.name(),f.path())\n if ( ret ):\n return self._setFileState(f, FILE_MIGRATING)\n return fail('Cannot migrate file:',f.name(),' to longterm storage.')\n elif ( fs == FILE_MIGRATING or fs == FILE_MIGRATED ):\n return (SUCCESS,)\n return fail('Cannot migrate file:',f.name(),' from state:',fs)", "def _convert_raw_file(self, raw_path, gzip_file):\n self.logger.debug('Beginning conversion of {0}'.format(raw_path))\n\n with self._zip.open(raw_path) as raw_data:\n without_carriage_returns = self._remove_crs(raw_data)\n csv_data = self._make_csv(without_carriage_returns)\n self._generate_rows(csv_data, gzip_file)\n\n self.logger.debug('Completed conversion of {0}'.format(raw_path))", "def __call__(self, infile, dbfile):\n\n print(f\"Converting {infile} to {dbfile}\")\n\n # Delete existing file\n if os.path.exists(dbfile):\n os.remove(dbfile)\n\n # Create new database\n db = sqlite3.connect(dbfile)\n\n # Create database tables if necessary\n self.create(db, XML2DB.QUESTIONS, \"questions\")\n self.create(db, XML2DB.ANSWERS, \"answers\")\n\n count = 0\n with open(infile, encoding=\"utf-8\") as xml:\n context, root = self.xmlstream(xml)\n\n for event, row in context:\n if event == \"end\":\n # Execute insert statement\n self.insert(db, row)\n\n count += 1\n if count % 10000 == 0:\n print(f\"Inserted {count} rows\")\n\n # Free memory\n root.clear()\n\n print(f\"Total rows inserted: {count}\")\n\n # Commit changes\n db.commit()", "def fromfiletorecord(self, *args, **kwargs):\n return _regionmanager.regionmanager_fromfiletorecord(self, *args, **kwargs)", "def migrate_data(filename):\n am = AcManager()\n am.build_db()\n tz = pytz.timezone(\"America/Los_Angeles\")\n\n with open(filename, 'rb') as ac_file:\n ac_data = pickle.load(ac_file)\n\n # create the users\n for user in ac_data['users']:\n userblob = ac_data['users'][user]\n new_user = am.add_user(user)\n new_user.update_fruit(userblob['fruit'])\n new_user.update_friend_code(userblob['friend_code'].replace('SW-', ''))\n\n # add the users turnip prices\n for entry in ac_data['turnips']:\n user = am.user_exists(entry['discord_id'])\n price = entry['price']\n old_time = entry['time']\n\n if user and price and old_time:\n time = tz.normalize(tz.localize(old_time)).astimezone(pytz.utc)\n user.add_price(price, time)", "def postprocess_file(filename):\n\n with open(filename, 'r') as source:\n contents = source.read()\n with open(filename, 'w') as destination:\n destination.writelines(postprocess_contents(contents))", "def parse_table_to_madx_sequence_file(self, filename: str) -> None:\n parse_table_to_madx_sequence_file(self.name, self.len, self.table, filename)", "def fromXmlFile(self, file='case.xml'):\n f = open(file, 'r')\n try:\n xml = f.read()\n finally:\n f.close()\n\n # Populate the entity data variables from the retrieved XML\n self.load(xml)", "def convert_file(in_file, out_file):\n sequences = SeqIO.parse(in_file, \"genbank\")\n g = open(out_file, \"w\")\n SeqIO.write(sequences, out_file, \"fasta\")", "def prepare_contacts(gds_file_path):\n\n now = datetime.now().isoformat()\n gds_table = etl.fromcsv(gds_file_path)\n gds_header = gds_table.fieldnames()\n\n gds_table \\\n .addfield('gds_import_data', partial(serialize_row, keys=gds_header)) \\\n .addfield('created_at', now) \\\n .addfield('updated_at', now) \\\n .addfield('address', concat_address) \\\n .rename({'NHSNumber': 'nhs_number',\n 'FirstName': 'first_name',\n 'MiddleName': 'middle_names',\n 'LastName': 'surname',\n 'Postcode': 'postcode',\n 'DOB': 'date_of_birth',\n 'Phone': 'telephone',\n 'Mobile': 'mobile'}) \\\n .convert('date_of_birth', parse_date) \\\n .cut('nhs_number',\n 'first_name',\n 'middle_names',\n 'surname',\n 'address',\n 'postcode',\n 'telephone',\n 'mobile',\n 'date_of_birth',\n 'created_at',\n 'updated_at',\n 'gds_import_data') \\\n .tocsv()", "def record(recid):\n click.echo(f\"Migrating record {recid} from INSPIRE legacy\")\n migrate_record_from_legacy(recid)", "def import_daily_shipment_report(file: TransactionFileUpload) -> None:\n for line in read_csv(file.original_csv.name, delimiter=\",\"):\n if sorted([k for k in line])[0] == \"Article Name\":\n keys = FieldsV0\n elif sorted([k for k in line])[0] == \"article_name\":\n keys = FieldsV1\n else:\n mlog.error(LOG, line)\n LOG.exception(\"unknown line format in daily shipment report\")\n return\n\n canceled = line[keys.CANCELLATION] == \"x\"\n returned = line[keys.RETURN] == \"x\"\n shipped = line[keys.SHIPMENT] == \"x\"\n\n price_in_cent = float(line[keys.PRICE]) * 100\n\n DailyShipmentReport.objects.get_or_create(\n article_number=line[keys.ARTICLE_NUMBER],\n cancel=canceled,\n channel_order_number=line[keys.CHANNEL_ORDER_NUMBER],\n order_created=line[keys.ORDER_CREATED],\n price_in_cent=price_in_cent,\n return_reason=line[keys.RETURN_REASON],\n returned=returned,\n shipment=shipped,\n )\n\n price, _created = Price.objects.get_or_create(\n sku=line[keys.ARTICLE_NUMBER],\n )\n\n marketplace_config = MarketplaceConfig.objects.get(\n name=Marketplace.ZALANDO, active=True\n )\n\n RawDailyShipmentReport.objects.get_or_create(\n price=price,\n article_number=line[keys.ARTICLE_NUMBER],\n cancel=canceled,\n channel_order_number=line[keys.CHANNEL_ORDER_NUMBER],\n order_created=line[keys.ORDER_CREATED],\n order_event_time=line[keys.ORDER_EVENT_TIME],\n price_in_cent=price_in_cent,\n return_reason=line[keys.RETURN_REASON],\n returned=returned,\n shipment=shipped,\n marketplace_config=marketplace_config,\n )\n\n file.processed = True\n file.save()", "def file_parse(self):\n for sample in self.samples:\n # Create attributes\n sample.fastq = sample.filepath.replace('.ab1', '.fastq')\n sample.rev_comp_fastq = sample.fastq.replace('.fastq', '_rev_comp.fastq')\n with open(sample.fastq, 'w') as fastq:\n # Read in the .ab1 file\n for record in SeqIO.parse(sample.filepath, 'abi'):\n # Store the string of the raw sequence\n sample.raw_seq = str(record.seq)\n # Output the record in FASTQ format\n SeqIO.write(record, fastq, 'fastq')", "def migrate_launch_file(self, launch_file_contents):\n self.xml_tree_root = etree.fromstring(launch_file_contents)\n self.convert_args()\n self.gather_global_params()\n self.launch_actions += self.convert_groups()\n self.launch_actions += self.convert_nodes(self.xml_tree_root)\n self.launch_actions += self.convert_includes()\n return self.generate_launch_file()", "def convert(file_uploaded):\r\n\r\n # Open Input CSV File\r\n input_file = open(file_uploaded, mode='r')\r\n csv_file = csv.DictReader(input_file)\r\n\r\n # Remove Existing File\r\n if os.path.exists(OUTPUT_FILE):\r\n os.remove(OUTPUT_FILE)\r\n\r\n # Open Output file\r\n output = open(OUTPUT_FILE, 'w')\r\n\r\n # Write Header\r\n output.write(\"date;paymode;info;payee;memo;amount;category;tags\\n\")\r\n\r\n # Parse out the Bank Statment\r\n for row in csv_file:\r\n # Detect Bank\r\n if csv_file.fieldnames[0] != \"Posted Account\":\r\n boi_line_parser(row, output)\r\n else:\r\n aib_line_parser(row, output)\r\n\r\n # Clean Up\r\n output.close()\r\n input_file.close()", "def TransformFile(file, transformmatrix):\n\n if file[-3:] == \"vtp\":\n reader = vtk.vtkXMLPolyDataReader()\n elif file[-3:] == \"ply\":\n reader = vtk.vtkPLYReader()\n else:\n print(\"Error: unreadable file.\")\n return 1\n reader.SetFileName(file)\n reader.Update()\n data = reader.GetOutput()\n\n pos_vtk = reader.GetOutput().GetPoints().GetData()\n pos = vtk_to_numpy(pos_vtk)\n nodes = vtk.vtkPoints()\n for point in pos:\n vec = numpy.array([[point[0]], [point[1]], [point[2]], [1]])\n position = numpy.dot(transformmatrix, vec)\n nodes.InsertNextPoint(position[:-1])\n data.SetPoints(nodes)\n\n # export to new file\n writer = vtk.vtkXMLPolyDataWriter()\n file = \"testingtrans.vtp\"\n writer.SetFileName(file)\n writer.SetInputData(data)\n writer.Write()", "def read_monthly_chase_file(self, file_name):\n '''\n 0 1 2 3\n CREDIT,20100216120000[0:GMT],\"Online Transfer from MMA XXXXXX6306 transaction#: 313944149\",19.79\n DEBIT,20100212120000[0:GMT],\"MCDONALD'S F109 BOULDER 02/11MCDONALD'\", -1.08\n CHECK,20100216120000[0:GMT],\"CHECK 1108\", -90.00\n trtype tdatetime payee tamt\n '''\n\n line_num = 0\n expected_fields = 4\n output_dict = dict()\n with open(file_name) as file_ptr:\n for line in file_ptr:\n line = line.strip()\n if not line:\n continue # ignore blank lines\n\n # Clear any commas inside quoted fields\n line = transferUtils.clear_commas_in_quotes(' ', line)\n\n # Look for in-line comments and keep them\n comment = ''\n idx = line.find('//')\n if idx >= 0:\n comment = line[idx+2:]\n line = line[:idx]\n\n # split the line into fields (comma-separated)\n fields = line.split(',')\n\n # parse the date field -- transaction date\n trans_date = fields[1][4:6]+'/'+fields[1][6:8]+'/'+fields[1][0:4]\n\n # parse the transaction reference\n # The reference will be the entire line stripped of commas and spaces\n trans_ref = line.replace(',', '').replace(' ', '')\n\n # transaction amount\n trans_amt = fields[3]\n\n # transaction payee\n # strip out extra spaces\n trans_payee = ' '.join(fields[2].split())\n\n # Lookup the default budget category from the payee DATABASE\n # defaults to 'UNKNOWN'\n bud_cat = self.lookup_payee_category(trans_payee, trans_date)\n\n # process the extra budget fields which may mean extra DATABASE\n # records\n budget_category_dict = transferUtils.process_budget_fields(\n fields[expected_fields:], trans_amt, bud_cat, trans_date, trans_ref)\n\n # insert the record(s) into the dictionary\n transferUtils.insert_entry_into_dict(\n budget_category_dict, trans_ref, trans_date, trans_payee, '', 'c', trans_amt, comment, output_dict)\n line_num += 1\n # end for\n self.logger.log('read_monthly_chase_file processed {} records from {}\\n'.\n format(line_num, file_name))\n return output_dict", "def Migrate(self):\n\n # TODO(amoser): This doesn't do anything yet.\n pass", "def parseMARC(records, marcRels):\n logger.info('Transforming MARCXML records into SFR objects')\n return list(filter(None, (transformMARC(r, marcRels) for r in records)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Migrate records from the mirror. By default, only records that have not been migrated yet are migrated.
def mirror(also_migrate=None, force=False, wait=False, date_from=None): halt_if_debug_mode(force=force) task = migrate_from_mirror( also_migrate=also_migrate, disable_external_push=True, date_from=date_from ) if wait: wait_for_all_tasks(task)
[ "def Migrate(self):\n\n # TODO(amoser): This doesn't do anything yet.\n pass", "def migrate(self, source):\n raise NotImplementedError", "def migrate(self, epoch: int):\n ordered_players = self.migration_order_policy(self.players)\n\n for p in ordered_players:\n p.migrate()", "def migrate_all(self):\n # Closing the connection prior to running any migrations to prevent the\n # current connection from locking the database\n self.connection.close()\n\n self.prepare_next_migration()\n while not self.current_version == -1:\n self.migrate()\n self.version = self.migration.version\n self.prepare_next_migration()\n self.connection = sqlite3.connect(self.db)", "def migrate_file(file_name, mirror_only=False, force=False, wait=False):\n halt_if_debug_mode(force=force)\n click.echo(f\"Migrating records from file: {file_name}\")\n\n populate_mirror_from_file(file_name)\n if not mirror_only:\n task = migrate_from_mirror()\n if wait:\n wait_for_all_tasks(task)", "def migrate() -> None:\n run_migration()", "def migrate_all(destination=None, verbose=False):\n if destination is None:\n destination = foc.VERSION\n\n migrate_database_if_necessary(destination=destination, verbose=verbose)\n\n for name in fo.list_datasets():\n migrate_dataset_if_necessary(\n name, destination=destination, verbose=verbose\n )", "def forward(self, amount=1):\n if amount == 0:\n return 0\n if amount < 0:\n raise ValueError(\"Cursor can only move forwards\")\n amount = int(amount)\n moved = 0\n v = self._result.protocol_version\n while moved != amount:\n values = self._result.fetch()\n if values is None:\n break\n else:\n keys = self._result.fields() # TODO: don't do this for every record\n if self._hydrant:\n values = self._hydrant.hydrate(keys, values, entities=self._entities, version=v)\n self._current = Record(zip(keys, values))\n moved += 1\n return moved", "def migrateBulk(project, from_type, to_type):\n # Get field names from both types.\n from_fields = [str(field).rsplit('.', 1)[1] for field in from_type._meta.fields]\n to_fields = [str(field).rsplit('.', 1)[1] for field in to_type._meta.fields]\n\n # Find intersection between fields and remove id.\n fields = list(set(from_fields) & set(to_fields))\n fields.remove('id')\n\n # Get reverse lookup.\n if to_type == Media:\n reverse_lookup = {'media_polymorphic__isnull': True}\n elif to_type == Localization:\n reverse_lookup = {'localization_polymorphic__isnull': True}\n elif to_type == State:\n reverse_lookup = {'state_polymorphic__isnull': True}\n elif to_type == Leaf:\n reverse_lookup = {'leaf_polymorphic__isnull': True}\n elif to_type == Analysis:\n reverse_lookup = {'analysis_polymorphic__isnull': True}\n\n # Migrate objects in chunks.\n flat = []\n total = 0\n for obj in from_type.objects.filter(project=project).iterator():\n # Convert the objects and bulk create.\n if to_type.objects.filter(polymorphic=obj.id).exists():\n continue\n total+=1\n\n flat.append(convertObject(obj))\n if len(flat) == 1000:\n to_type.objects.bulk_create(flat)\n logger.info(f\"Migrated {total} records of {from_type.__name__} to {to_type.__name__}...\")\n flat = []\n\n if len(flat) > 0:\n to_type.objects.bulk_create(flat)\n logger.info(f\"Migrated {total} records of {from_type.__name__} to {to_type.__name__}...\")\n flat = []", "def _migrate(self):\n if self.parent.destination is None:\n # This is a \"verification\" migration\n return self._verify()\n\n d = self.obj.getContent()\n d.addCallback(\n lambda content: self.parent.destination.storeObject(\n content=content,\n contentType=self.obj.contentType,\n metadata=self.obj.metadata,\n created=self.obj.created,\n objectId=self.obj.objectId))\n return d", "def _changes_sync(self, operations):\n #mark the start of execution in the db\n self._feedlgr.start()\n try:\n LOG.debug('operations requested: %s', operations)\n src = self._get_source_dataset()\n LOG.debug('source dataset contains %s records', len(src))\n dst = self._get_destination_dataset()\n LOG.debug('destination dataset contains %s records', len(dst))\n dos = JHRecordSyncer(src, dst)\n if 'add' in operations:\n adds = dos.get_additions()\n LOG.debug('%s records to be added', len(adds))\n else:\n adds = set()\n if 'remove' in operations:\n rms = dos.get_removals()\n LOG.debug('%s records to be removed', len(rms))\n else:\n rms = set()\n if 'modify' in operations:\n mods = dos.get_modifications()\n LOG.debug('%s records to be modified', len(mods))\n else:\n mods = set()\n self._sl.set_total_records(len(dst))\n self._sl.add_changes(len(adds) + len(rms) + len(mods))\n if not self._sl.check_changes():\n raise SyncException(self._sl.get_error_str())\n #run operations and collect any failures\n if not (adds or rms or mods):\n LOG.info('No changes found. Exiting')\n self._feedlgr.success()\n return True\n if 'add' in operations:\n LOG.debug('attempting to add new records')\n self._add_records(adds)\n LOG.debug('additions complete')\n if 'remove' in operations:\n LOG.debug('attempting to remove records')\n self._rm_records(rms)\n LOG.debug('removals complete')\n if 'modify' in operations:\n LOG.debug('attempting to modify records')\n self._modify_records(mods)\n LOG.debug('modifications complete')\n except Exception as exc:\n LOG.exception(exc)\n LOG.debug('Rolling back any uncommited changes')\n self.rollback()\n self._feedlgr.fail(exc)\n raise exc\n if not self._dry_run:\n self.commit()\n LOG.info(\n 'Successfully added: %s, modified: %s, removed: %s',\n len(adds), len(mods), len(rms))\n else:\n self.rollback()\n LOG.info(\n 'Dry Run. Would have added: %s, modified: %s,'\n ' removed: %s', len(adds), len(mods), len(rms))\n self._feedlgr.success()\n return True", "def make_migration(self):\n self.logger.info('{} migrating'.format(self.island_name))\n migration_size = floor(len(self.population) * configurations[\"MIGRATION_RATIO\"])\n migrating_individuals = self._select_best_no_repetition(self.population, migration_size)\n destination_island_num = self.get_neighbor_island_number()\n\n self.migration_coordinator.send_migrants(migrating_individuals, self.island_number, destination_island_num)\n\n replacement_individuals, sender_island_num = self.migration_coordinator.get_migrants(self.island_number)\n if replacement_individuals is not None:\n for individual in replacement_individuals:\n individual.invalidate_fitness()\n\n replaced_individuals_idxs = select_worst_idx(self.population, len(replacement_individuals))\n\n for i in range(len(replacement_individuals)):\n self.population[replaced_individuals_idxs[i]] = replacement_individuals[i]\n self.evaluate_population()\n\n self.logger.info('{} finished migration, got {} migrants from island {}'.format(self.island_name,\n len(replacement_individuals),\n sender_island_num))", "def migrate_defined_models(self):\n # find any models referred to in old models.yaml\n conf = OmegaConf.load(self.root_directory / \"configs/models.yaml\")\n\n for model_name, stanza in conf.items():\n try:\n passthru_args = {}\n\n if vae := stanza.get(\"vae\"):\n try:\n passthru_args[\"vae\"] = str(self._vae_path(vae))\n except Exception as e:\n logger.warning(f'Could not find a VAE matching \"{vae}\" for model \"{model_name}\"')\n logger.warning(str(e))\n\n if config := stanza.get(\"config\"):\n passthru_args[\"config\"] = config\n\n if description := stanza.get(\"description\"):\n passthru_args[\"description\"] = description\n\n if repo_id := stanza.get(\"repo_id\"):\n logger.info(f\"Migrating diffusers model {model_name}\")\n self.migrate_repo_id(repo_id, model_name, **passthru_args)\n\n elif location := stanza.get(\"weights\"):\n logger.info(f\"Migrating checkpoint model {model_name}\")\n self.migrate_path(Path(location), model_name, **passthru_args)\n\n elif location := stanza.get(\"path\"):\n logger.info(f\"Migrating diffusers model {model_name}\")\n self.migrate_path(Path(location), model_name, **passthru_args)\n\n except KeyboardInterrupt:\n raise\n except Exception as e:\n logger.error(str(e))", "def migrate_all(src, dst, replace=True, nprocs=1):\n srchost, srcport, _ = parse_uri(src)\n srcr = redis.StrictRedis(host=srchost, port=srcport, charset='utf8')\n keyspace = srcr.info('keyspace')\n\n freeze_support() # for Windows support\n pool = Pool(processes=min(len(keyspace.keys()), nprocs))\n pool.starmap(migrate, [(src, dst, int(db[2:]), replace, i) for i, db in enumerate(keyspace.keys())])\n print('\\n' * max(0, len(keyspace.keys())-1))", "def run_migrations(self):\n\n while self.version < self.SCHEMA_VERSION:\n self.version += 1\n self.migrations.get(self.version, lambda _: None)(self)", "def record(recid):\n click.echo(f\"Migrating record {recid} from INSPIRE legacy\")\n migrate_record_from_legacy(recid)", "def _update_destination(self, records):\n raise NotImplementedError", "def migrate(self, irc, msg, args, vmname, hostname):\n username = self.user\n password = self.password\n vcenter = self.vcenter\n\n try:\n si = SmartConnect(host=vcenter, user=username, pwd=password, port=443)\n except:\n err_text = 'Error connecting to {0}'.format(vcenter)\n log.info(err_text)\n irc.reply(err_text)\n return\n\n if hostname:\n try:\n host = vmutils.get_host_by_name(si, hostname)\n hostname = host.name\n except:\n irc.reply('{0} not found'.format(hostname))\n return\n else:\n # hostname was not passed\n all_hosts = vmutils.get_hosts(si)\n host = vmutils.get_host_by_name(si, random.choice(all_hosts.values()))\n hostname = host.name\n\n # Finding source VM\n try:\n vm = vmutils.get_vm_by_name(si, vmname)\n except:\n irc.reply('{0} not found.'.format(vmname))\n return\n\n # relocate spec, to migrate to another host\n # this can do other things, like storage and resource pool\n # migrations\n relocate_spec = vim.vm.RelocateSpec(host=host)\n\n # does the actual migration to host\n vm.Relocate(relocate_spec)\n irc.reply('Migrating {0} to {1}'.format(vmname, hostname))\n\n Disconnect(si)", "def migrate_tasks(source, dest, migrate=..., app=..., queues=..., **kwargs):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Migrate a single record from legacy.
def record(recid): click.echo(f"Migrating record {recid} from INSPIRE legacy") migrate_record_from_legacy(recid)
[ "def Migrate(self):\n\n # TODO(amoser): This doesn't do anything yet.\n pass", "def upgrade_legacy(self):\n # Transform to version 1\n if not hasattr(self, 'version'):\n self._models = [\n self.conv1,\n self.conv2,\n self.conv3,\n self.conv4\n ]\n\n self.version = '1'", "def migrate(self, source):\n raise NotImplementedError", "def migrate_1to2(store):\n\n # migrate metadata\n from zarr import meta_v1\n meta = meta_v1.decode_metadata(store['meta'])\n del store['meta']\n\n # add empty filters\n meta['filters'] = None\n\n # migration compression metadata\n compression = meta['compression']\n if compression is None or compression == 'none':\n compressor_config = None\n else:\n compression_opts = meta['compression_opts']\n codec_cls = codec_registry[compression]\n if isinstance(compression_opts, dict):\n compressor = codec_cls(**compression_opts)\n else:\n compressor = codec_cls(compression_opts)\n compressor_config = compressor.get_config()\n meta['compressor'] = compressor_config\n del meta['compression']\n del meta['compression_opts']\n\n # store migrated metadata\n store[array_meta_key] = encode_array_metadata(meta)\n\n # migrate user attributes\n store[attrs_key] = store['attrs']\n del store['attrs']", "def _migrate(self):\n if self.parent.destination is None:\n # This is a \"verification\" migration\n return self._verify()\n\n d = self.obj.getContent()\n d.addCallback(\n lambda content: self.parent.destination.storeObject(\n content=content,\n contentType=self.obj.contentType,\n metadata=self.obj.metadata,\n created=self.obj.created,\n objectId=self.obj.objectId))\n return d", "def transform(self, data_record: DataRecord) -> DataRecord: # pragma: no cover\n raise NotImplementedError()", "def success(self, migration):", "def migration(*args):\n return func(*args)", "def migrate_obj(self, obj, jref, spec_version):\n spec_version = spec_version\n supported_versions = self.migratable_spec_versions\n\n if spec_version not in supported_versions:\n raise ValueError(\n 'unsupported spec version: {}'.format(spec_version))\n\n # only keep required version strings for this migration\n supported_versions = supported_versions[:(\n supported_versions.index(spec_version) + 1)]\n\n # filter out those migration with lower version than current one\n supported_versions = [\n v for v in supported_versions\n if StrictVersion(obj.__swagger_version__) <= StrictVersion(v)\n ]\n\n # load migration module\n url, relocated_jp = utils.jr_split(jref)\n from_spec_version = obj.__swagger_version__\n for version in supported_versions:\n patched_version = 'v{}'.format(version).replace('.', '_')\n migration_module_path = '.'.join(\n ['pyopenapi', 'migration', 'versions', patched_version, 'main'])\n loader = pkgutil.find_loader(migration_module_path)\n if not loader:\n raise Exception('unable to find module loader for {}'.format(\n migration_module_path))\n\n migration_module = loader.load_module(migration_module_path)\n if not migration_module:\n raise Exception('unable to load {} for migration'.format(\n migration_module_path))\n\n # preform migration\n obj, reloc = migration_module.upgrade(obj, self, jref)\n\n # update route for object relocation\n self.spec_obj_store.update_routes(url, version,\n {relocated_jp: reloc})\n\n # update JSON pointer for next round\n relocated_jp = self.spec_obj_store.relocate(\n url, relocated_jp, from_spec_version, version)\n\n # prepare this object if needy\n obj = self.prepare_obj(obj, url + relocated_jp)\n\n # cache migrated and prepared object if we need it later\n self.spec_obj_store.set(\n obj, url, relocated_jp, spec_version=version)\n\n from_spec_version = version\n\n if isinstance(obj, (OpenApi, Swagger, ResourceListing)):\n self.__current_spec_version = spec_version\n\n return obj", "def _from_db_record(cls, record):\n kwargs = {\n 'id': record.id,\n 'name': record.name,\n 'user': record.user,\n 'project': record.project,\n 'domain': record.domain,\n 'created_at': record.created_at,\n 'updated_at': record.updated_at,\n 'actor': record.actor,\n 'params': record.params,\n 'channel': record.channel,\n }\n\n return cls(record.type, record.cluster_id, record.action, **kwargs)", "def _update_well_record(self, submission: ActivitySubmission) -> Well:\n records = ActivitySubmission.objects.filter(well=submission.well) \\\n .prefetch_related(\n 'well_status',\n 'well_activity_type'\n )\n\n # 1) only 1 legacy record created from a previous _create_legacy_submission call [*no* legacy creation]\n # 2) (previous behaviour) - 1 non-legacy record created (e.g. staff_edit) [*yes* legacy creation]\n # 3) (previous behaviour) - 2 records exist (1 legacy, one previous non-legacy) = [*no* legacy creation]\n\n legacy_record_creation_needed = False\n if records.count() == 1:\n if records[0].well_activity_type.code == WELL_ACTIVITY_CODE_LEGACY:\n # if there is only one activity submission and it is LEGACY type then we know that\n # we don't need to process the stack. This one ActivitySubmission was created by the\n # legacy_records command from an existing well and therefore we don't need to update\n # the Well via the self._stack() call. The reason why we don't want to allow the\n # stack to be processed is that saving a Well could fail because the current well's\n # data is invalid according to the model validators.\n return records[0].well\n else:\n legacy_record_creation_needed = True\n if not legacy_record_creation_needed:\n # If there's more than one submission we don't need to create a legacy well, we can\n # safely assume that the 1st submission is either a legacy or construction report\n # submission.\n return self._stack(records, submission.well)\n else:\n # If there aren't prior submissions, we may create a legacy record using the current\n # well record.\n # Edge case of note:\n # Re. discussion with Lindsay on Oct 15 2018: There may be an instance, where there is a\n # pre-existing well, and a construct report is submitted. In this instance, we may end\n # up with a LEGACY record and a CONSTRUCTION record. This is odd, but we don't want to\n # lose the information stored in the existing well record. It is imerative that we\n # always create a legacy record.\n self._create_legacy_submission(submission.well)\n # We should now have multiple records\n records = ActivitySubmission.objects.filter(well=submission.well)\n return self._stack(records, submission.well)", "def _record_to_resource(self, record):\n # Conditional to allow passing in a record or an ID\n if not isinstance(record, StorageResourceRecord):\n if record in self._pk_to_resource:\n return self._pk_to_resource[record]\n record = StorageResourceRecord.objects.get(pk=record)\n else:\n if record.pk in self._pk_to_resource:\n return self._pk_to_resource[record.pk]\n\n plugin_module = record.resource_class.storage_plugin.module_name\n if plugin_module in [e for e in self._errored_plugins]:\n return None\n\n resource = record.to_resource()\n self._pk_to_resource[record.pk] = resource\n return resource", "def update_local_db_based_on_record(eox_record, create_missing=False):\n pid = eox_record['EOLProductID']\n # only used with Cisco Products\n v = Vendor.objects.get(name=\"Cisco Systems\")\n\n if create_missing:\n product, created = Product.objects.get_or_create(\n product_id=pid,\n vendor=v\n )\n\n else:\n try:\n product = Product.objects.get(\n product_id=pid,\n vendor=v\n )\n created = False\n\n except ObjectDoesNotExist:\n logger.debug(\"%15s: Product not found in database (create disabled)\" % pid, exc_info=True)\n return None\n\n if created:\n product.product_id = pid\n product.description = eox_record['ProductIDDescription']\n # it is a Cisco API and the vendors are predefined in the database\n product.vendor = v\n logger.debug(\"%15s: Product created\" % pid)\n\n # update the lifecycle information\n try:\n logger.debug(\"%15s: update product lifecycle values\" % pid)\n\n # save datetime values from Cisco EoX API record\n value_map = {\n # <API value> : <class attribute>\n \"UpdatedTimeStamp\": \"eox_update_time_stamp\",\n \"EndOfSaleDate\": \"end_of_sale_date\",\n \"LastDateOfSupport\": \"end_of_support_date\",\n \"EOXExternalAnnouncementDate\": \"eol_ext_announcement_date\",\n \"EndOfSWMaintenanceReleases\": \"end_of_sw_maintenance_date\",\n \"EndOfRoutineFailureAnalysisDate\": \"end_of_routine_failure_analysis\",\n \"EndOfServiceContractRenewal\": \"end_of_service_contract_renewal\",\n \"EndOfSvcAttachDate\": \"end_of_new_service_attachment_date\",\n \"EndOfSecurityVulSupportDate\": \"end_of_sec_vuln_supp_date\",\n }\n\n for key in value_map.keys():\n if eox_record.get(key, None):\n value = eox_record[key].get(\"value\", None)\n value = value.strip() if value else \"\"\n if value != \"\":\n setattr(\n product,\n value_map[key],\n datetime.strptime(\n value,\n convert_time_format(eox_record[key].get(\"dateFormat\", \"%Y-%m-%d\"))\n ).date()\n )\n\n else:\n # required if date is removed after an earlier sync\n setattr(\n product,\n value_map[key],\n None\n )\n\n # save string values from Cisco EoX API record\n if \"LinkToProductBulletinURL\" in eox_record.keys():\n value = clean_api_url_response(eox_record.get('LinkToProductBulletinURL', \"\"))\n if value != \"\":\n val = URLValidator()\n try:\n val(value)\n product.eol_reference_url = value\n\n except ValidationError:\n raise Exception(\"invalid EoL reference URL\")\n\n if \"ProductBulletinNumber\" in eox_record.keys():\n product.eol_reference_number = eox_record.get('ProductBulletinNumber', \"EoL bulletin\")\n\n product.save()\n\n except Exception as ex:\n if created:\n # remove the new (incomplete) entry from the database\n product.delete()\n\n logger.error(\"%15s: Product Data update failed.\" % pid, exc_info=True)\n logger.debug(\"%15s: DataSet with exception\\n%s\" % (pid, json.dumps(eox_record, indent=4)))\n return \"Product Data update failed: %s\" % str(ex)\n\n # save migration information if defined\n if \"EOXMigrationDetails\" in eox_record:\n migration_details = eox_record[\"EOXMigrationDetails\"]\n product_migration_source, created = ProductMigrationSource.objects.get_or_create(\n name=\"Cisco EoX Migration option\"\n )\n\n if created:\n product_migration_source.description = \"Migration option suggested by the Cisco EoX API.\"\n product_migration_source.save()\n\n if \"MigrationOption\" in migration_details:\n candidate_replacement_pid = migration_details[\"MigrationProductId\"].strip()\n\n if candidate_replacement_pid == pid:\n logger.error(\"Product ID '%s' should be replaced by itself, which is not possible\" % pid)\n\n else:\n # only a single migration option per migration source is allowed\n pmo, _ = ProductMigrationOption.objects.get_or_create(product=product,\n migration_source=product_migration_source)\n if migration_details[\"MigrationOption\"] == \"Enter PID(s)\":\n # product replacement available, add replacement PID\n pmo.replacement_product_id = candidate_replacement_pid\n pmo.migration_product_info_url = clean_api_url_response(migration_details[\"MigrationProductInfoURL\"])\n\n elif migration_details[\"MigrationOption\"] == \"See Migration Section\" or \\\n migration_details[\"MigrationOption\"] == \"Enter Product Name(s)\":\n # complex product migration, only add comment\n mig_strat = migration_details[\"MigrationStrategy\"].strip()\n pmo.comment = mig_strat if mig_strat != \"\" else migration_details[\"MigrationProductName\"].strip()\n pmo.migration_product_info_url = clean_api_url_response(migration_details[\"MigrationProductInfoURL\"])\n\n else:\n # no replacement available, only add comment\n pmo.comment = migration_details[\"MigrationOption\"].strip() # some data separated by blank\n pmo.migration_product_info_url = clean_api_url_response(migration_details[\"MigrationProductInfoURL\"])\n\n # add message if only a single entry was saved\n if pmo.migration_product_info_url != migration_details[\"MigrationProductInfoURL\"].strip():\n return \"Multiple URL values from the Migration Note received, only the first one is saved\"\n\n pmo.save()", "def migrate() -> None:\n run_migration()", "def migrate_to_1_dot_0(self):\n self.migrate_comments()\n self.migrate_tinymce()\n self.migrate_textindexng2()\n return 1", "def migrateBulk(project, from_type, to_type):\n # Get field names from both types.\n from_fields = [str(field).rsplit('.', 1)[1] for field in from_type._meta.fields]\n to_fields = [str(field).rsplit('.', 1)[1] for field in to_type._meta.fields]\n\n # Find intersection between fields and remove id.\n fields = list(set(from_fields) & set(to_fields))\n fields.remove('id')\n\n # Get reverse lookup.\n if to_type == Media:\n reverse_lookup = {'media_polymorphic__isnull': True}\n elif to_type == Localization:\n reverse_lookup = {'localization_polymorphic__isnull': True}\n elif to_type == State:\n reverse_lookup = {'state_polymorphic__isnull': True}\n elif to_type == Leaf:\n reverse_lookup = {'leaf_polymorphic__isnull': True}\n elif to_type == Analysis:\n reverse_lookup = {'analysis_polymorphic__isnull': True}\n\n # Migrate objects in chunks.\n flat = []\n total = 0\n for obj in from_type.objects.filter(project=project).iterator():\n # Convert the objects and bulk create.\n if to_type.objects.filter(polymorphic=obj.id).exists():\n continue\n total+=1\n\n flat.append(convertObject(obj))\n if len(flat) == 1000:\n to_type.objects.bulk_create(flat)\n logger.info(f\"Migrated {total} records of {from_type.__name__} to {to_type.__name__}...\")\n flat = []\n\n if len(flat) > 0:\n to_type.objects.bulk_create(flat)\n logger.info(f\"Migrated {total} records of {from_type.__name__} to {to_type.__name__}...\")\n flat = []", "def test_migration_job_converts_old_question(self):\n # Generate question with old(v27) state data.\n self.save_new_question_with_state_data_schema_v27(\n self.QUESTION_ID, self.albert_id, [self.skill_id])\n question = (\n question_services.get_question_by_id(self.QUESTION_ID))\n self.assertEqual(question.question_state_data_schema_version, 30)\n\n # Start migration job.\n job_id = (\n question_jobs_one_off.QuestionMigrationOneOffJob.create_new())\n question_jobs_one_off.QuestionMigrationOneOffJob.enqueue(job_id)\n self.process_and_flush_pending_tasks()\n\n # Verify the question migrates correctly.\n updated_question = (\n question_services.get_question_by_id(self.QUESTION_ID))\n self.assertEqual(\n updated_question.question_state_data_schema_version,\n feconf.CURRENT_STATE_SCHEMA_VERSION)\n\n output = question_jobs_one_off.QuestionMigrationOneOffJob.get_output(job_id) # pylint: disable=line-too-long\n expected = [[u'question_migrated',\n [u'1 questions successfully migrated.']]]\n self.assertEqual(expected, [ast.literal_eval(x) for x in output])", "def convert_to(self, other_schema, overrides=None):\n if self._saved_state.storage and self._saved_state.key:\n # the record may be invalid for another document class so we are\n # very careful about it\n# try:\n new_instance = self._saved_state.storage.get(other_schema, self.pk)\n# except validators.ValidationError:\n# pass\n## new_instance = other_schema()\n## new_instance._saved_state = self._saved_state.clone()\n## for key, value in self.iteritems():\n## try:\n## new_instance[key] = value\n## except KeyError:\n## pass\n else:\n new_instance = self._clone(as_model=other_schema)\n\n if overrides:\n for attr, value in overrides.items():\n setattr(new_instance, attr, value)\n\n return new_instance", "def from_db_response(cls, record: Dict[str, Any]) -> BaseModel:\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to randomly select the four elements of the winning ticket
def pick_winner(self) : winning_combo = [] count = 0 while count <= 3 : random = self.lotto_elements[randint(0, 14)] winning_combo.append(random) count += 1 return winning_combo
[ "def draw_winning_ticket(self):\r\n number_taken = []\r\n ticket = []\r\n n_number = 0\r\n while len(ticket)<6:\r\n number = random.randint(1, 45)\r\n if number not in ticket:\r\n ticket.append(number)\r\n number_taken.append(number)\r\n ticket.sort()\r\n return ticket", "def draw_ticket(self):\r\n number_taken = []\r\n ticket = []\r\n n_number = 0\r\n while len(ticket)<6:\r\n number = random.randint(1, 45)\r\n if number not in ticket:\r\n ticket.append(number)\r\n number_taken.append(number)\r\n ticket.sort()\r\n return ticket", "def lottery():\n drawing_list = []\n i = 0\n while i < 6:\n pick = randint(1, 50)\n if pick not in drawing_list:\n drawing_list.append(pick)\n i += 1\n drawing_list.sort()\n return drawing_list", "def pick_random_questions(num_questions):\n print(\"=====pick_random_questions fired...\")\n shuffle(QUESTIONS)\n questions = sample(list(QUESTIONS), k=num_questions)\n\n shuffle(questions)\n return questions", "def draw(self,n):\n selected=[]\n for i in range(n):\n index = random.randint(0, len(self.balls)-1)\n color = self.balls.pop(index) #select color and remove it from hat\n selected.append(color)\n return selected", "def select_ten():\n random_10 = set([])\n while len(random_10) < 10:\n num = randint(0,20)\n random_10.add(num)\n \n return random_10", "def rand_selector(population, num_to_select=3):\n return random.sample(population, num_to_select)", "def retrieve_by_random(quota, cand_list):\n # Step 1: Shuffle to prevent sequential sampling.\n shuffle(cand_list)\n \n # Step 2: Retrieve the first k random items.\n result_list = cand_list[:quota] # return1\n \n return result_list", "def _choose_winners_weighted(\n guild_id: int, entrants: list[discord.Member], num_winners: int\n) -> list[discord.Member]:\n if len(entrants) < num_winners:\n raise Exception(\"There are not enough entrants for that many winners.\")\n\n # Just to add even more randomness\n random.shuffle(entrants)\n random.shuffle(entrants)\n random.shuffle(entrants)\n\n past_winner_win_counts = DB.get().win_counts(guild_id)\n entrants = sorted(\n entrants, key=lambda entrant: past_winner_win_counts.get(entrant.id, 0)\n )\n\n total_win_counts = {}\n for entrant in entrants:\n entrant_past_wins = past_winner_win_counts.get(entrant.id, 0)\n if entrant_past_wins not in total_win_counts:\n total_win_counts[entrant_past_wins] = 1\n else:\n total_win_counts[entrant_past_wins] += 1\n\n tickets_per_win_bucket = {}\n highest_entrant_wins = max(total_win_counts.keys())\n for i in range(highest_entrant_wins, -1, -1):\n tickets_per_win_bucket[i] = (4 / 3) ** (highest_entrant_wins - i)\n\n total_tickets = 0\n for win, tickets in tickets_per_win_bucket.items():\n total_tickets += total_win_counts.get(win, 0) * tickets\n\n value_of_one_ticket = 1 / total_tickets\n for win, multiplier in tickets_per_win_bucket.copy().items():\n tickets_per_win_bucket[win] = multiplier * value_of_one_ticket\n\n p_list = []\n for win, tickets in reversed(tickets_per_win_bucket.items()):\n for i in range(0, total_win_counts.get(win, 0)):\n p_list.append(tickets)\n\n return numpy.random.choice(entrants, num_winners, replace=False, p=p_list)", "def test_combo(self):\n i = random.randrange(1, 7)\n odds_to_land_arrival = 0.75\n odds_to_takeoff_arrival = 0.89\n\n\n q = []\n for clock_tick in range(0, i):\n #there lies a possiblity where a random number adds both a landing and take off to the queu\n random_gen = random.random()\n\n if random_gen < odds_to_land_arrival:\n status = \"to_land\"\n new_plane = Plane.generate_Plane(status,i)\n q.append(new_plane)\n\n if random_gen < odds_to_takeoff_arrival:\n status = \"to_takeoff\"\n new_plane = Plane.generate_Plane(status,i)\n q.append(new_plane)\n\n if random_gen > odds_to_takeoff_arrival and random_gen > odds_to_land_arrival:\n status = \"None\"\n new_plane = Plane.generate_Plane(status,i)\n q.append(new_plane)", "def getRandom():\n alist = []\n with open(\"./wappen.tsv\", encoding=\"utf8\") as coas:\n reader = csv.DictReader(coas, delimiter=\"\\t\")\n for row in reader:\n alist.append(row)\n chosen = random.choice(alist)\n return chosen", "def select_random_tour(self): #May or may not be useful\n\n tour = []\n reds = list(self.redSet)\n blues = list(self.blueSet)\n \n \n red = False\n for i in range (self.numcities):\n if red: \n x = (random.randint(0,1) * (len(reds) - 1) ) // 1\n city = reds[x]\n reds.remove(city)\n tour.append(city)\n red = False\n else: \n x = (random.randint(0,1) * (len(blues) - 1)) // 1\n city = blues[x]\n blues.remove(city)\n tour.append(city)\n red = True\n\n if self.is_valid_tour(tour) and len(tour) == self.numcities: \n return tour\n else: \n print(\"RANDOM TOUR IS BAD\")\n \n return None", "def pickMontyDoor(door1,door2,door3,doors,PlayerDoor):\r\n MontyDoor = randint(1,3)\r\n while MontyDoor == PlayerDoor:\r\n MontyDoor = randint(1,3)\r\n ## With the Monty's door selected, we must now make sure he didnt pick the same\r\n ## door as the Player as well as with the car behind it.\r\n unchosenDoor = [1,2,3]\r\n unchosenDoor.pop((PlayerDoor - 1))\r\n unchosenDoor.pop((MontyDoor - 1))", "def test_six_pairs_by_rng(self):\n self.check_for_six_pairs = check_for_six_pairs_instant_win(self.list_of_players)\n self.assertFalse(self.check_for_six_pairs, \"Instant win by 6 pairs!!!\")", "def get_cards():\n return random.randint(1, 10)", "def computerinput(kleuren):\r\n vierhidden = list()\r\n for pos in range(0, 4):\r\n vierhidden.append(random.choice(kleuren))\r\n return vierhidden", "def _tournament_selection(self, k=2):\n random.shuffle(self._population)\n\n return max(self._population[:k])", "def main():\r\n num_of_quick_pick = int(input(\"How many quick pick? \"))\r\n for i in range(0, num_of_quick_pick):\r\n quick_pick_numbers = []\r\n for j in range(0, 6):\r\n random_pick = random.randint(MIN, MAX)\r\n while random_pick in quick_pick_numbers:\r\n random_pick = random.randint(MIN, MAX)\r\n quick_pick_numbers.append(random_pick)\r\n quick_pick_numbers.sort()\r\n\r\n for j in quick_pick_numbers:\r\n print(\"{:<5d}\".format(j), end=\"\")\r\n print()", "def tournament_selection(population, tournament_size=2):\n\n winners = []\n while len(winners) < POPULATION_SIZE:\n competitors = random.sample(population, tournament_size)\n competitors.sort()\n winners.append(competitors[0])\n \n return winners" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
converts a txt file into a csv
def convert_to_csv(txtfile, csvfile): with open(txtfile, 'r') as f1, open(csvfile, 'w', newline='') as f2: out_file = csv.writer(f2) for line in f1: line = line.strip(' \n\t') for ch in [',', ';', '.', '"', '!', '(', ')', ':', '/', "-", '\\', '?' ]: if ch in line: line = line.replace(ch, ' ') out_file.writerow(list(line.strip().split()))
[ "def txt_to_csv(path_to_txt_file):\n\tcsv_filename = f\"{path_to_txt_file.split('/')[-1][:-4]}.csv\"\n\twith open(path_to_txt_file.split('/')[-1]) as fin, open(csv_filename, 'w') as fout:\n\n\t\to=csv.writer(fout)\n\t\tfor line in fin:\n\t\t\to.writerow(line.split())\n\n\t#the text processing ruins the columns names and so we'll add them back in\n\tdata = pd.read_csv(csv_filename)\n\t#percent = data.With\n\tdata = data.iloc[1:len(data)-1]\n\tdata = data.dropna(axis=1)\n\n\t#data['Monthly Coverage Percent'] = percent\n\tdata.columns = ['CSA','CBSA','Name','Total','1 Unit','2 Units','3 & 4 Units',\n\t\t\t\t\t '5 or more','Number of structures with 5 units or more',\n\t\t\t\t\t 'Monthly Coverage Percent']\n\n\tdata.to_csv(csv_filename, index=False)", "def raw_to_csv(self, raw_file):\n csv_file = tempfile.NamedTemporaryFile()\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(self.headers)\n for i, line in enumerate(raw_file):\n if i >= self.start_line and self.raw_separator_re.search(line):\n with_commas = self.raw_separator_re.sub(r'\\1,\\3', line).strip()\n csv_file.write(with_commas)\n csv_file.write('\\n')\n # cursor is at the end of the file, move it back to beginning\n csv_file.seek(0)\n return csv_file", "def text_to_csv(file_path, save_path):\n file_path = Path(file_path)\n save_path = Path(save_path)\n\n # set up caption dictionary\n captions = {}\n labels = ['image_id', 'image_name', 'caption_id', 'caption']\n for lbl in labels:\n captions[lbl] = []\n\n image_name2ids = {}\n counter = 0\n\n # read txt file an extract info\n with open(file_path, 'r') as file:\n doc = file.read()\n for line in doc.split('\\n'):\n # split line by white space\n tokens = line.split()\n if len(tokens) == 0:\n continue\n # take the first token as image id, the rest as description\n caption_id, caption_tokens = tokens[0], tokens[1:]\n\n # extract .jpg filename from image id\n image_name = caption_id.split('#')[0]\n\n if image_name not in image_name2ids:\n image_name2ids[image_name] = counter\n counter += 1\n\n # convert description tokens back to string\n caption = ' '.join(caption_tokens)\n\n # add all info to the caption dictionary\n captions['image_id'].append(image_name2ids[image_name])\n captions['image_name'].append(image_name)\n captions['caption_id'].append(caption_id)\n captions['caption'].append(caption)\n\n parent = save_path.parent\n if not parent.is_dir():\n # if the directory of the save path is not a directory\n # then make it a directory along with any of its parents\n parent.mkdir(parents=True)\n\n # convert dict to DataFrame\n cap_df = pd.DataFrame(data=captions, columns=labels)\n cap_df.to_csv(save_path)", "def altc2csv(input_file=\"\"):\n data = []\n text_head = \"SiteName;Date;Time;Severity;Object;Problem;Cause;AdditionalText;AckState;AlarmId;NotificationId\"\n output_file = None\n if os.path.exists(input_file):\n with open(input_file, 'r') as f:\n for line in f:\n words = get_words_from_line(line, \"_altc(.*?).log:\", \"|;\")\n data.append(words)\n output_file = output_data(text_head, data, input_file)\n return output_file", "def write_csv(data, filepath):\n pass #TODO implement", "def process_csv(self, file_name: str):", "def csv_read(self):\n with open(self.filename) as file:\n sn = csv.Sniffer() #Initialisieren des Sniffers\n sn.preferred = [\";\"]\n\n #Das try und except wurde im Unterricht besprochen und ich habe es so uebernommen\n try:\n dialect = sn.sniff(file.read(1024)) #durch das Sniffen erkennt der Sniffer meistens um welchen Dialekt es sich handelt\n except csv.Error:\n if file.endswith(\"csv\"): #bei einer Fehlermeldung wird der Delimiter manuell gesetzt\n delimiter = \";\" #Setzen des \"Seperators\"\n else:\n delimiter = \"\\t\" #Setzen des \"Seperators\"\n file.seek(0)\n reader = csv.reader(file,delimiter=delimiter)\n dialect = reader.dialect\n\n file.seek(0) #damit das File wieder an den Anfang zurueckspringt\n\n reader = csv.reader(file, dialect) #Reader wird festgelegt mit File und dem Dialekt\n\n text = []\n rownum = 0\n for row in reader:\n if rownum == 0:\n header = row #Header bestimmen\n else:\n colnum = 0\n for col in row:\n text.append(row) #Anhaengen der Werte an text\n colnum += 1\n rownum += 1\n\n file.close() #Schliessen des Files\n\n return text.copy() #Zurueckgeben des Textes", "def lgjc2csv(input_file=\"\"):\n data = []\n text_head = \"SiteName;Date;Time;Severity;Status;Duration;Object;Problem;Cause;AdditionalText;AckState;AlarmId\"\n output_file = None\n if os.path.exists(input_file):\n with open(input_file, 'r') as f:\n for line in f:\n words = get_words_from_line(line, \"_lgjc(.*?).log:\", \"|;\")\n data.append(words)\n output_file = output_data(text_head, data, input_file)\n return output_file", "def read_text(filepath, **kwargs):\n if isinstance(filepath, pd.DataFrame):\n return filepath\n sep = kwargs.get('sep', None)\n ext = os.path.splitext(filepath)[1].lower()\n\n if sep is None:\n if ext == '.tsv':\n kwargs['sep'] = '\\t'\n\n elif ext == '.csv':\n kwargs['sep'] = ','\n\n else:\n found_sep = _identify_separator(filepath)\n kwargs['sep'] = found_sep\n\n return read_csv(filepath, **kwargs)", "def invlrc2csv(input_file=\"\"):\n data = []\n text_head = \"SiteName;LicenseType;LicenseName;SiteNameLicenseKey;FAJ;LicenseState;FeatureState;ServiceState\" \\\n \";ValidFrom;ValidUntil;currLimit;grantedLevel;limitReached;Description \"\n output_file = None\n if os.path.exists(input_file):\n with open(input_file, 'r') as f:\n for line in f:\n words = get_words_from_line(line, \"_invlrc(.*?).log:\", \"|;\")\n node_name = words[0]\n license_name = words[1]\n license_key = node_name + words[2]\n faj = words[3]\n license_state = words[4]\n if len(words) == 10:\n license_type = \"Feature\"\n feature_state = words[5]\n service_state = words[6]\n valid_from = words[7]\n valid_until = words[8]\n description = words[9]\n curr_limit = \"-\"\n granted_level = \"-\"\n limit_reached = \"-\"\n elif len(words) == 11:\n license_type = \"Capacity\"\n valid_from = words[5]\n valid_until = words[6]\n curr_limit = words[7]\n granted_level = words[8]\n limit_reached = words[9]\n description = words[10]\n feature_state = \"-\"\n service_state = \"-\"\n data.append(\n [node_name, license_type, license_name, license_key, faj, license_state, feature_state,\n service_state, valid_from, valid_until, curr_limit, granted_level, limit_reached,\n description])\n output_file = output_data(text_head, data, input_file)\n return output_file", "def _make_csv(self, fixed_width_data):\n self._schema.seek(0)\n fixed_width_data.seek(0)\n fixed_width_text = TextIOWrapper(fixed_width_data, encoding='latin-1')\n\n csv_file = TemporaryFile(mode='w+')\n fixed2csv(fixed_width_text, self._schema, output=csv_file)\n\n fixed_width_text.close()\n csv_file.seek(0)\n\n self.logger.debug('Converted fixed-width data to CSV')\n return csv_file", "def lgoc2csv(input_file=\"\"):\n data = []\n text_head = \"SiteName;Date;Time;User;Action;MO;Attributes;Value\"\n output_file = None\n if os.path.exists(input_file):\n with open(input_file, 'r') as f:\n for line in f:\n words = get_words_from_line(line, \"_lgoc(.*?).log:\", \"|;\")\n x = re.split(r\" \", words[-1])\n list1 = [words[0], words[1], words[2]]\n for i in x:\n list1.append(i)\n data.append(list1)\n output_file = output_data(text_head, data, input_file)\n return output_file", "def csv(self):\r\n reader = csv.reader(self.text.splitlines())\r\n return [l for l in reader]", "def import_csv(self, model, header_fields, file_txt):\n # get xml_ids\n f = StringIO.StringIO(file_txt)\n rows = csv.reader(f, delimiter=',')\n id_index = -1\n xml_ids = []\n for row in rows: # Check the first row only\n head_row = [isinstance(x, basestring) and x.lower() or ''\n for x in row]\n id_index = head_row.index('id')\n break\n if id_index >= 0:\n for row in rows:\n if isinstance(row[id_index], basestring) and \\\n len(row[id_index].strip()) > 0:\n xml_ids.append(row[id_index])\n # Do the import\n Import = self.env['base_import.import']\n imp = Import.create({\n 'res_model': model,\n 'file': file_txt,\n })\n [errors] = imp.do(\n header_fields,\n {'headers': True, 'separator': ',',\n 'quoting': '\"', 'encoding': 'utf-8'})\n if errors:\n raise ValidationError(_(str(errors[0]['message'])))\n return xml_ids", "def tuplelist_to_csv(edgelist, filename):\n\twith open(filename, 'w') as out:\n\t\tcsv_out = csv.writer(out)\n\t\tfor row in edgelist:\n\t\t\tcsv_out.writerow(row)", "def trace2csv(tracefile, reader):\n out_path = os.path.splitext(tracefile)[0]+'.csv'\n data_keys = core.get_data_keys(reader)\n data_key_order = core.get_data_key_order(reader)\n\n infile = core.open_tracefile(tracefile, reader)\n outfile = open(out_path, 'w')\n\n if data_key_order is not None:\n outfile.write(','.join([key for key in data_key_order]) + '\\n')\n else:\n outfile.write(','.join([key for key in data_keys]) + '\\n')\n\n for packet in infile:\n out_str = packet2csv_row(packet, num_fields=len(data_keys),\n data_key_order=data_key_order)\n outfile.write(out_str)\n\n outfile.close()", "def read_csv(filename):\n\n # Verify the existence of the file\n verify = os.path.exists(filename)\n if not verify:\n raise IOError('The input file given does not exist!')\n\n # Verify that the input file is a csv\n verify = verify_csv_extension(filename)\n if not verify:\n raise ValueError('Please input a valid .csv file!')\n\n # Read from csv file\n data = genfromtxt(filename, dtype='float', delimiter=',', autostrip=True)\n\n return data", "def st2csv(input_file=\"\"):\n data = []\n text_head = \"SiteName;Adm_State;Op_State;MO\"\n output_file = None\n if os.path.exists(input_file):\n with open(input_file, 'r') as f:\n for line in f:\n words = get_words_from_line(line, \"_st(.*?).log:\", r\"|\\s+\")\n if len(words) == 8:\n data.append([words[0], words[4], words[-2], words[-1]])\n else:\n data.append([words[0], \"-\", words[-2], words[-1]])\n output_file = output_data(text_head, data, input_file)\n return output_file", "def cvsw2csv(input_file=\"\"):\n data = []\n text_head = \"SiteName;SW\"\n output_file = None\n if os.path.exists(input_file):\n with open(input_file, 'r') as f:\n for line in f:\n words = get_words_from_line(line, \"_cv(.*?).log:\", r\"|:\\s\")\n data.append([words[0], words[2]])\n output_file = output_data(text_head, data, input_file)\n return output_file" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update self.body and set new timestamp
def update(self, body): self.body = body
[ "def save(self, *args, **kwargs):\n self.updated_ts = datetime.utcnow()\n super().save(*args, **kwargs)", "def update(self, *args, **kwargs):\n utcnow = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S UTC\")\n self.updated_at = utcnow\n\n super(ModelBaseWithTimeStamp, self).update(*args, **kwargs)", "def _updateTs(self, newTs):\n self.longPollPayload.update({'ts': newTs})", "def _timestamp_message(self, attrs):\n if self.timestamp_messages and 'timestamp' not in attrs:\n attrs['timestamp'] = _NOW().strftime(_RFC3339_MICROS)", "def set_updated_at(self):\n self.record['updated_at'] = datetime.utcnow()", "def update(self,):\n req_url = self.user.api_url+'2/data/%s?auth=%s&email=%s' % \\\n (self.key, self.user.token, self.user.email,)\n response = urllib2.urlopen(req_url)\n note_object = json.load(response)\n self.update_from_object(note_object)\n self.content = note_object[u'content']", "async def put_date( # pylint: disable=inconsistent-return-statements\n self, complex_body: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "def setTimeStamp(self, ts):\r\n \tself.timeStamp = ts", "def set_timestamp(self, ps, pfs):\n return _raw_util.raw_message_set_timestamp(self, ps, pfs)", "def _update_now(self, user=None):\r\n self.last_update = datetime.datetime.now()\r\n if user:\r\n self.last_committer = user", "def save(self, *args, **kwargs):\n utcnow = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S UTC\")\n self.created_at = utcnow\n self.updated_at = utcnow\n\n super(ModelBaseWithTimeStamp, self).save(*args, **kwargs)", "def set_last_post_time(self) -> None:\n key = f\"{self.key_prefix}:lastposttime\"\n now = datetime.utcnow()\n dt_formatted = now.isoformat()\n self.rdb.set(key, dt_formatted)", "def set_timestamp(self, timestamp):\n self.timestamp = LogEntry.normalize_timestamp(timestamp)", "def _handle_edited_timestamp(self: message.Message, value: str) -> None:\n self._edited_timestamp = utils.parse_time(value)", "def set_last_update_time(self, time):\n self.last_updated = time", "def save(self, *args, **kwargs):\n if self.time_created > self.thread.time_last_activity:\n self.thread.time_last_activity = self.time_created\n self.thread.save()\n\n return super(Message, self).save(*args, **kwargs)", "def setBody(self, body):\n self._body = body", "def set_timestamp(self, ps, pfs):\n return _raw_util.raw_message_sptr_set_timestamp(self, ps, pfs)", "def setTimestamp(self, id):\n updateData = {'$set': {'timestamp': datetime.now()}}\n self.getDataset().update_one(\n {'_id': id}, updateData)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Records the outcome of a single match between two players and updates all tables.
def reportMatch(player1, player2, outcome): player1 = bleach.clean(player1) player2 = bleach.clean(player2) outcome = bleach.clean(outcome) if player1 == outcome: winner = player1 loser = player2 else: winner = player2 loser = player1 result = checkMatch(player1, player2) db, cursor = connect() if result == []: query1 = """ INSERT INTO matches (round, match, id_1, id_2, result) VALUES (%s, %s, %s, %s, %s);""" cursor.execute (query1, [0,0, player1, player2, outcome]) else: query1 = """ UPDATE matches SET id_1=%s, id_2=%s, result=%s WHERE (id_1 = %s and id_2= %s) or (id_1 = %s and id_2 = %s);""" cursor.execute(query1, [player1, player2, outcome, player1, player2, player2, player1]) db.commit() db.close() if outcome != 0: db, cursor = connect() query2 = "UPDATE players SET wins=wins+1 WHERE id=%s;" cursor.execute(query2, [outcome]) query3 = "UPDATE players SET no_matches=no_matches+1 WHERE id=%s;" cursor.execute(query3, [outcome]) db.commit() db.close() if loser !=0: db, cursor = connect() cursor.execute(query3, [loser]) query4 = "UPDATE players SET losses=losses+1 WHERE id=%s;" cursor.execute(query4, [loser]) w_omw = opponentMatchWins(winner) l_omw = opponentMatchWins(loser) query5 = "UPDATE opponentmw SET omw=%s WHERE id=%s;" cursor.execute(query5, [w_omw, winner]) cursor.execute(query5, [l_omw, loser]) db.commit() db.close() else: db, cursor = connect() query6 = "UPDATE players SET ties=ties+1 WHERE id=%s or id=%s;" cursor.execute(query6, [player1, player2]) query7 = """ UPDATE players SET no_matches=no_matches+1 WHERE id=%s or id=%s;""" cursor.execute(query7, [player1, player2]) db.commit() db.close()
[ "def reportMatch(winner, loser):\n\n db = connect()\n c = db.cursor()\n\n # these following lines will retrieve\n query = \"SELECT score FROM players WHERE id = %s\"\n\n data = (winner, )\n c.execute(query, data)\n w_score = c.fetchone()\n w_score = int(w_score[0])\n\n data = (loser, )\n c.execute(query, data)\n l_score = c.fetchone()\n l_score = int(l_score[0])\n\n # these criteria rewards/punish a player depending on\n # how strog is the opponent\n # e.g. a winner gets more points for defeating a stronger player that if\n # the opponent was weaker\n # e.g.2 A loser's punish is bigger if the winner was weaker\n if w_score == l_score:\n w_score = w_score + 2\n l_score = l_score - 2\n elif w_score < l_score:\n w_score = w_score + 3\n l_score = l_score - 3\n else:\n w_score > l_score\n w_score = w_score + 1\n l_score = l_score - 1\n\n query = \"UPDATE players SET score = %s WHERE id = %s\"\n\n data = (w_score, winner, )\n c.execute(query,data)\n\n data = (l_score, loser, )\n c.execute(query, data)\n\n query = \"INSERT INTO matches (winner,loser) VALUES (%s,%s)\" \n\n data = (winner, loser)\n c.execute(query, data)\n\n db.commit()\n\n db.close()", "def reportMatch(player1, result1, player2=0, result2=0):\n conn = connect()\n c = conn.cursor()\n # get the id of the current tournament\n c.execute(\"select id from tournaments where active = '1'\")\n currentTournament = c.fetchone()\n\n # get the last matchid (lastMatchID) from the db.\n # if none exists, set lastMatchID to 1.\n # a matchid identifies who played vs who.\n # in the case of a bye, there should only be one match id\n c.execute(\"select matchid from matchresults order by matchid desc limit 1\")\n lastMatchID = c.fetchone()\n if lastMatchID is None:\n lastMatchID = 1\n else:\n lastMatchID = int(lastMatchID[0]) + 1\n\n if result1 == 'b': # if result1 == b(ye), there is no player2 result2\n c.execute(\n \"insert into matchresults \\\n (matchid, playerid, result, tournamentid) values \\\n ('%s', %s, %s, %s)\",\n (lastMatchID, player1, result1, currentTournament)\n )\n conn.commit()\n else: # for all other results (w/l/d), there should be two inserts\n c.execute(\n \"insert into matchresults \\\n (matchid, playerid, result, tournamentid) values \\\n ('%s', %s, %s, %s)\",\n (lastMatchID, player1, result1, currentTournament)\n )\n c.execute(\n \"insert into matchresults \\\n (matchid, playerid, result, tournamentid) values \\\n ('%s', %s, %s, %s)\",\n (lastMatchID, player2, result2, currentTournament)\n )\n conn.commit()", "def reportMatch(winner, loser=None):\n with connect_to_db() as database:\n if loser is None:\n # So a bye is to be given to player `winner`.\n # Check to see whether player `winner` has be give a bye before.\n query = \"SELECT had_bye FROM players WHERE id=%s\"\n parameter = (winner,)\n database['cursor'].execute(query, parameter)\n had_bye = database['cursor'].fetchone()[0]\n\n if had_bye is True:\n print \"Error: Player has already had a bye.\"\n return\n\n # Update the had_bye attribute of the player.\n query = \"UPDATE players SET had_bye=TRUE WHERE id=%s\"\n database['cursor'].execute(query, parameter)\n\n query = \"INSERT INTO matches (winner_pid, loser_pid) VALUES (%s, %s);\"\n parameter = (winner, loser)\n database['cursor'].execute(query, parameter)\n database['connection'].commit()", "def process_result(self, winner, loser):\n competitors = self.db['competitors']\n losers = self.db['losers']\n\n if not winner in competitors.name.values:\n print(\"Warn: Given winner is not in list of competitors\")\n if not loser in competitors.name.values:\n print(\"Warn: Given loser is not in list of competitors\")\n \n #winner levels up\n competitors.loc[competitors.name == winner,'level'] += 1\n competitors.loc[competitors.name == winner,'active'] = False\n\n #loser is removed from competitor table and added to losers table\n losers = losers.append(competitors.loc[competitors.name == loser,])\n competitors = competitors.drop(\n competitors.index[competitors.name==loser],\n inplace=False\n )\n\n self.db['competitors'] = competitors\n self.db['losers'] = losers\n\n #update record of all results\n match_records = self.db['match_records']\n record = pd.DataFrame([[winner, loser, pd.Timestamp.now()]], columns=['winner', 'loser', 'timestamp'])\n match_records.append(record)\n self.db['match_records'] = match_records", "def store_name_match(self, match_id, name, account):\n self.c.execute('SELECT * FROM player WHERE (name = ?) AND (matchid = ?)', (name, match_id))\n results = self.c.fetchone()\n if results is None:\n self.c.execute('INSERT INTO player (name, matchid, account) VALUES (?,?,?)', (name, match_id, account))\n self.conn.commit()\n logging.log(logging.INFO, 'Sotring a name and macth id in player table: %s and %s', name, match_id)", "def update_potential_matches():\n\n matched = request.form.get(\"user_match\")\n user_id_1 = current_user.id\n match_date = datetime.datetime.now()\n query_pincode = session['query_pincode']\n session['matched_user'] = matched\n\n successfulmatch = UserMatch.query.filter_by(user_id_1 = current_user.id).first()\n if successfulmatch is not None:\n if successfulmatch.user_id_2 == matched:\n return redirect(url_for('show_potential_matches'))\n\n\n match = UserMatch(user_id_1=user_id_1,\n user_id_2=matched,\n match_date=match_date,\n user_2_status=False,\n query_pincode=query_pincode)\n\n db.session.add(match)\n db.session.commit()\n\n return redirect(url_for('confirmed'))", "def simulate_match(\n team_1, team_2, first_to, predictions, adjust_records=True, adjust_ratings=True\n):\n team_1_win_p = expected_outcome(team_1.rating, team_2.rating)\n\n # simulate the match:\n team_1_wins = 0\n team_2_wins = 0\n while team_1_wins < first_to and team_2_wins < first_to:\n if random.random() <= team_1_win_p:\n team_1_wins += 1\n else:\n team_2_wins += 1\n\n if adjust_records:\n team_1.game_wins += team_1_wins\n team_2.game_losses += team_1_wins\n team_2.game_wins += team_2_wins\n team_1.game_losses += team_2_wins\n\n if team_1_wins > team_2_wins:\n team_1.match_wins += 1\n team_1.head_to_head[team_2.name] += 1\n else:\n team_2.match_wins += 1\n team_2.head_to_head[team_1.name] += 1\n\n if adjust_ratings:\n games_played = team_1_wins + team_2_wins\n expected_team_1_wins = (\n expected_outcome(team_1.rating, team_2.rating) * games_played\n )\n expected_team_2_wins = (\n expected_outcome(team_2.rating, team_1.rating) * games_played\n )\n team_1.rating += K_FACTOR * (team_1_wins - expected_team_1_wins)\n team_2.rating += K_FACTOR * (team_2_wins - expected_team_2_wins)\n\n return (team_1, team_2) if team_1_wins > team_2_wins else (team_2, team_1)", "def update_players(self, p1_id, p2_id, p1_wins, p2_wins, draws):\n\n p1_elo = self.scores.loc[p1_id, 'elo']\n p2_elo = self.scores.loc[p2_id, 'elo']\n\n p1_updated, p2_updated = \\\n self.calculate_update(p1_elo, p2_elo, p1_wins, p2_wins, draws)\n\n self.scores.loc[p1_id, 'elo'] = p1_updated\n self.scores.loc[p2_id, 'elo'] = p2_updated", "def decide_match(t_id, player1_id, player2_id):\n players = [player1_id, player2_id]\n winner = random.choice(players)\n players.remove(winner)\n loser = players[0]\n report_match(t_id, winner, loser)", "def populate_matches(pid):\n hist = DOTA_API.get_match_history(account_id=pid)\n for match in hist['matches']:\n str_match = match['match_id']\n obj_player = dota.factory.player_get_byid(pid)\n dota.factory.match_put(str_match, obj_player)\n print '--- Finished match import for ' + str(obj_player)", "def swissPairings():\n\n count = countPlayers()\n odd_player = count % 2\n adjust = count-odd_player\n total_matches = totalMatches()\n matches_count = (total_matches + (total_matches*odd_player)/count)/2\n total_rounds = int(math.log(adjust,2))\n round_no = int(total_matches/count) + 1\n\n if round_no > total_rounds:\n result = playerStandings()\n print \"\"\"\n The tournament is finished! The winner is \" + str(result[0][1])\n + \", id# \" + str(result[0][0]) + \" with \" + str(result[0][2])\n + \" wins.\"\"\"\n return\n\n elif (round_no == total_rounds) and (matches_count != \n ((adjust/2)+odd_player)*(round_no-1)):\n print \"The last round is currently in play.\"\n return\n \n elif matches_count != ((adjust/2)+odd_player)*(round_no-1):\n print \"The current round needs to finish before pairing players.\"\n return\n\n else:\n matched_players = []\n if round_no != 1:\n result = playerStandings()\n else:\n db, cursor = connect()\n query = \"\"\"\n SELECT id, name, wins, no_matches FROM players ORDER BY \n RANDOM() LIMIT \" + str(count) + \";\"\"\"\n cursor.execute(query)\n result = cursor.fetchall()\n db.close()\n x=0\n m=1\n for row in result:\n while checkOne(row[0], round_no) == []:\n if odd_player == 1:\n bye_player = checkMatch(row[0], 0)\n bye = checkOne(0, round_no)\n if bye_player == [] and bye == []:\n matched_players.append((row[0], row[1], 0, 'bye'))\n db, cursor = connect()\n query = \"\"\"\n INSERT INTO matches (round, match, id_1, id_2,\n result) VALUES (%s, %s, %s, %s, null);\"\"\"\n cursor.execute(query, [round_no, m, row[0], 0])\n db.commit()\n db.close()\n m = m+1\n reportMatch(row[0], 0, row[0])\n if (row[0] != result[x][0]) and (checkOne(result[x][0], \n round_no) == []): \n matched_players.append((row[0], row[1], result[x][0], \n result[x][1]))\n db, cursor = connect()\n query = \"\"\"\n INSERT INTO matches (round, match, id_1, id_2, \n result) VALUES (%s, %s, %s, %s, null);\"\"\"\n cursor.execute(query, [round_no, m, row[0], result[x][0]])\n db.commit()\n db.close()\n m = m+1\n else:\n x = x+1 \n return matched_players", "def play_match(self):\n self.games = []\n self.games_played = 0\n self.player1.wins = 0\n self.player2.wins = 0\n loop = True\n while(loop):\n self.games_played += 1\n game = Game(self.games_played, self.player1, self.player2)\n print(game)\n if(game.winner == self.player1.name):\n self.player1.wins += 1\n elif(game.winner == self.player2.name):\n self.player2.wins += 1\n else:\n pass\n\n if(self.player1.wins >= self.wins_needed):\n loop = False\n\n if(self.player2.wins >= self.wins_needed):\n loop = False\n\n # Match has ended! Reset player variables and return a dictionary with results\n self.winner = self.player1.name if self.player1.wins >= self.wins_needed else self.player2.name\n self.player1.wins = 0\n self.player2.wins = 0\n return {'games_played': self.games_played, 'winner': self.winner}", "def process_match_data(matches, player_map):\n for match_object in matches:\n match = match_object['match']\n player_1, player_2 = match['player1_id'], match['player2_id']\n player_1_rating = player_map[player_1]['rating']\n player_2_rating = player_map[player_2]['rating']\n\n # Recalculate TrueSkill ratings of the winner and loser\n winner = match['winner_id']\n if winner == player_1:\n player_1_rating, player_2_rating = rate_1vs1(player_1_rating, player_2_rating)\n else:\n player_2_rating, player_1_rating = rate_1vs1(player_2_rating, player_1_rating)\n\n # Update the new ratings\n player_map[player_1]['rating'] = player_1_rating\n player_map[player_2]['rating'] = player_2_rating\n\n return player_map", "def start_match(team1, team2):\n print()\n print('Starting match: '+ RNAMES[team1]+' vs ' + RNAMES[team2])\n print()\n wait_for_keypress()\n dumpfile = os.path.join(DUMPSTORE, time.strftime('%Y%m%d-%H%M%S'))\n args = CMD_STUB + [team1, team2,\n '--dump', dumpfile,\n '--seed', str(random.randint(0, sys.maxsize))]\n stdout, stderr = Popen(args, stdout=PIPE, stderr=PIPE,\n universal_newlines=True).communicate()\n tmp = reversed(stdout.splitlines())\n for lastline in tmp:\n if lastline.startswith('Finished.'):\n break\n else:\n print(\"*** ERROR: Apparently the game crashed. At least I could not find the outcome of the game.\")\n print(\"*** Maybe stderr helps you to debug the problem\")\n print(stderr, speak=False)\n print(\"***\", speak=False)\n return 0\n if stderr:\n print(\"***\", stderr, speak=False)\n print('***', lastline)\n if 'had a draw.' in lastline:\n return 0\n else:\n tmp = lastline.split(\"'\")\n winner = tmp[1]\n loser = tmp[3]\n if winner == RNAMES[team1]:\n print(RNAMES[team1], 'wins.')\n return 1\n elif winner == RNAMES[team2]:\n print(RNAMES[team2], 'wins.')\n return 2\n else:\n print(\"Unable to parse winning result :(\")\n return 0", "def main(db):\n invalid_players = get_players_with_no_team(db)\n player_info_to_update = []\n for p in invalid_players:\n pid = p[0]\n pos = p[1]\n if pos == Player.GOALIE:\n team_info = get_team_info_from_goalie_stats(db, pid)\n else:\n team_info = get_team_info_from_skater_stats(db, pid)\n if len(team_info) > 0:\n # get team id from the last game\n player_info_to_update.append((team_info[-1][0], pid))\n\n if len(player_info_to_update) > 0:\n print(player_info_to_update)\n update_player_teams(db, player_info_to_update)", "def updateMatch():\n\tglobal matchId\n\tglobal PLAYER\n\tglobal keyURL\n\ttry:\n\t\tpipe = requests.get(\"https://na.api.pvp.net/api/lol/na/v1.3/game/by-summoner/\"+ str(playerId)+\"/recent\" + keyURL)\n\t\twrapperMap = pipe.json()\n\t\tfor game in wrapperMap[\"games\"]:\n\t\t\tif 'numDeaths' in game['stats'] and game['stats']['numDeaths'] > 0:\n\t\t\t\tif game['gameId']!= matchId:\n\t\t\t\t\t#Update the cache, returns true\n\t\t\t\t\tmatchId = game['gameId']\n\t\t\t\t\tPLAYER['teamId'] = game['teamId']\n\t\t\t\t\tPLAYER['championId'] = game['championId']\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\texcept:\n\t\tprint 'Failed to update match data.'\n\t\t#assume that nothing changed\n\t\tpass\n\treturn False", "def match_ended(self, game_result):\n if self._current_match is None:\n return\n\n white_player, black_player = self._current_match\n winner = None\n if game_result.winner is None:\n white_player.player_tied()\n black_player.player_tied()\n elif game_result.winner.username == white_player.username:\n white_player.player_won_white()\n winner = 0\n black_player.player_lost()\n elif game_result.winner.username == black_player.username:\n black_player.player_won_black()\n winner = 1\n white_player.player_lost()\n\n self._played_matches = [(self._current_match, winner)] + self._played_matches\n self._game_results += [game_result]", "def _insert_match_to_another_pitch(self, match_ind, pitch1_ind, pitch2_ind):\n # if target is match\n if isinstance(self.schedule.iloc[match_ind,pitch2_ind],models.Match):\n # add new row to end\n self.schedule.loc[self.schedule.index.max()+1] = None\n # create space for match\n self.schedule.loc[match_ind:,pitch2_ind] = self.schedule.loc[match_ind:,pitch2_ind].shift()\n # move match to another pitch\n self._switchMatches((match_ind,pitch1_ind),(match_ind,pitch2_ind))", "def _last_round_matching(self):\n\n\t\tself.matches[0].playerA = self.players[0]\n\t\tself.matches[0].playerB = self.players[1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of pairs of players for the next round of a match. Assuming that there are an even number of players registered, each player appears exactly once in the pairings. If there is an odd number of players then an odd player is assigned a 'bye' only once per tournament. A 'bye' is reported as an id of 0, with name 'bye'. Each player is paired with another player with an equal or nearlyequal win record, that is, a player adjacent to him or her in the standings. If players have equal wins, they are rated by Opponent Match Wins. For the first round, players are matched randomly. Function prevents rematches between players during a given round.
def swissPairings(): count = countPlayers() odd_player = count % 2 adjust = count-odd_player total_matches = totalMatches() matches_count = (total_matches + (total_matches*odd_player)/count)/2 total_rounds = int(math.log(adjust,2)) round_no = int(total_matches/count) + 1 if round_no > total_rounds: result = playerStandings() print """ The tournament is finished! The winner is " + str(result[0][1]) + ", id# " + str(result[0][0]) + " with " + str(result[0][2]) + " wins.""" return elif (round_no == total_rounds) and (matches_count != ((adjust/2)+odd_player)*(round_no-1)): print "The last round is currently in play." return elif matches_count != ((adjust/2)+odd_player)*(round_no-1): print "The current round needs to finish before pairing players." return else: matched_players = [] if round_no != 1: result = playerStandings() else: db, cursor = connect() query = """ SELECT id, name, wins, no_matches FROM players ORDER BY RANDOM() LIMIT " + str(count) + ";""" cursor.execute(query) result = cursor.fetchall() db.close() x=0 m=1 for row in result: while checkOne(row[0], round_no) == []: if odd_player == 1: bye_player = checkMatch(row[0], 0) bye = checkOne(0, round_no) if bye_player == [] and bye == []: matched_players.append((row[0], row[1], 0, 'bye')) db, cursor = connect() query = """ INSERT INTO matches (round, match, id_1, id_2, result) VALUES (%s, %s, %s, %s, null);""" cursor.execute(query, [round_no, m, row[0], 0]) db.commit() db.close() m = m+1 reportMatch(row[0], 0, row[0]) if (row[0] != result[x][0]) and (checkOne(result[x][0], round_no) == []): matched_players.append((row[0], row[1], result[x][0], result[x][1])) db, cursor = connect() query = """ INSERT INTO matches (round, match, id_1, id_2, result) VALUES (%s, %s, %s, %s, null);""" cursor.execute(query, [round_no, m, row[0], result[x][0]]) db.commit() db.close() m = m+1 else: x = x+1 return matched_players
[ "def swissPairings():\n current_standings = playerStandings()\n next_round = []\n match_count = 0\n for player_row in current_standings:\n player_id = player_row[0]\n name = player_row[1]\n if match_count % 2 == 0:\n match_list = [player_id, name]\n else:\n match_list += [player_id, name]\n next_round.append(tuple(match_list))\n match_count += 1\n return next_round", "def simRound():\n pairings = swissPairings()\n for (id1, name1, id2, name2) in pairings:\n if id1 == id2:\n # bye, so just report it\n reportMatch(id1, id2, id1)\n else:\n # randomly select winner or draw\n x = randint(0,9)\n if x < 4:\n # id1 wins\n reportMatch(id1, id2, id1)\n elif x > 4:\n # id2 wins\n reportMatch(id1, id2, id2)\n else:\n # draw\n reportMatch(id1, id2)", "def swissPairings():\n pairing_index = 0\n pairs = []\n DB = psycopg2.connect(\"dbname=tournament\")\n c = DB.cursor()\n \n c.execute(\"SELECT playerID from match_record\")\n number_of_pairings = len(c.fetchall())/2\n\n while pairing_index < number_of_pairings:\n c.execute(\"SELECT playerID, name from match_record ORDER BY wins DESC LIMIT 2 OFFSET %s\", (pairing_index * 2,))\n current_list = c.fetchall()\n new_tuple = current_list[0] + current_list[1]\n pairs.append(new_tuple)\n pairing_index = pairing_index + 1\n \n return pairs", "def get_matchups(self):\n length = len(self.players)\n p = sample(self.players, length)\n if length % 2 == 1:\n p.append(choice(p[:-1]))\n return [(p[i * 2], p[i * 2 + 1]) for i in range(len(p) // 2)]", "def create_matchs(cls, tournament, rounds):\r\n p_match = TournamentService.tournament_players_list(tournament)\r\n p_match.sort(key=operator.attrgetter('tournament_points', 'rank'), reverse=True)\r\n t_players_id = GetModelService.get_models_id(p_match)\r\n for player in p_match:\r\n player.no_vs = []\r\n for id_player in t_players_id:\r\n if player.id != id_player and id_player not in player.vs:\r\n player.no_vs.append(id_player)\r\n player.update('no_vs', player.no_vs)\r\n if rounds['count'] == 1:\r\n while p_match:\r\n middle = int(len(p_match)/2)\r\n player_1 = p_match[0]\r\n player_2 = p_match[middle]\r\n player_1.vs.append(player_2.id)\r\n player_1.update('vs', player_1.vs)\r\n player_2.vs.append(player_1.id)\r\n player_2.update('vs', player_2.vs)\r\n match = ([player_1.id, 0], [player_2.id, 0])\r\n rounds['matchs_list'].append(match)\r\n del p_match[middle]\r\n del p_match[0]\r\n else:\r\n pos_player = 0\r\n while True:\r\n p_match.sort(key=operator.attrgetter('tournament_points', 'rank'), reverse=True)\r\n player_1 = p_match[pos_player]\r\n del p_match[pos_player]\r\n for player_model in p_match:\r\n try:\r\n if player_model.id == player_1.no_vs[pos_player]:\r\n player_2 = player_model\r\n p_match.remove(player_model)\r\n break\r\n except IndexError:\r\n if player_model.id == player_1.no_vs[-1]:\r\n player_2 = player_model\r\n p_match.remove(player_model)\r\n break\r\n player_1.vs.append(player_2.id)\r\n player_1.update('vs', player_1.vs)\r\n player_2.vs.append(player_1.id)\r\n player_2.update('vs', player_2.vs)\r\n match = ([player_1.id, 0], [player_2.id, 0])\r\n rounds['matchs_list'].append(match)\r\n while p_match:\r\n player_1 = p_match[0]\r\n del p_match[0]\r\n for player_model in p_match:\r\n if player_model.id in player_1.no_vs:\r\n player_2 = player_model\r\n p_match.remove(player_model)\r\n break\r\n player_1.vs.append(player_2.id)\r\n player_1.update('vs', player_1.vs)\r\n player_2.vs.append(player_1.id)\r\n player_2.update('vs', player_2.vs)\r\n match = ([player_1.id, 0], [player_2.id, 0])\r\n rounds['matchs_list'].append(match)\r\n if len(rounds['matchs_list']) > int(tournament.nb_players/2):\r\n p_match = TournamentService.tournament_players_list(tournament)\r\n p_match.sort(key=operator.attrgetter('tournament_points', 'rank'), reverse=True)\r\n for player in p_match:\r\n while len(player.vs) >= rounds['count']:\r\n del player.vs[-1]\r\n player.update('vs', player.vs)\r\n rounds['matchs_list'] = []\r\n pos_player += 1\r\n continue\r\n else:\r\n break", "def update_opponents(_round, opponents):\n\n none_player = player.Player()\n for _match in _round.matches:\n # Here, '_match' is an objet of Match class with attribute match = ([player_1, score1], [player_2, score2])\n if none_player not in (_match.match[0][0], _match.match[1][0]): # odd number of players\n opponents[_match.match[0][0]].append(_match.match[1][0])\n opponents[_match.match[1][0]].append(_match.match[0][0])\n return opponents", "def generate_matches(self, first_round: bool = False):\n self._tournament.current_round += 1\n self._tournament.matches.append([])\n matches = []\n if first_round:\n _temp_players = self._tournament.players.copy()\n _temp_players.sort(key=lambda x: x.rank, reverse=True)\n upper = []\n lower = []\n\n for i in range(len(_temp_players)):\n if i < (len(_temp_players) - 1) / 2:\n upper.insert(len(upper), _temp_players[i])\n else:\n lower.insert(len(lower), _temp_players[i])\n\n for i in range(len(upper)):\n match = Match()\n match.upPlayer = upper[i]\n match.downPlayer = lower[i]\n matches.insert(len(matches), match)\n\n else:\n _temp_players = self._tournament.players.copy()\n _temp_players.sort(key=lambda x: (x.tournament_rank, x.rank), reverse=True)\n\n while len(_temp_players) > 0:\n current = _temp_players[0]\n _temp_players.remove(current)\n opponent = self.__get_next_opponent_for_player(current, _temp_players)\n\n if opponent == None:\n self._view.print_error('No opponent found for a player')\n return\n\n matches.insert(len(matches), Match(current, opponent))\n\n _temp_players.remove(opponent)\n\n self._tournament.matches[self._tournament.current_round] = matches\n self._tournament.save_tournament()\n self._view.print_matches_list(matches, self._tournament.current_round)\n\n self.enter_match_result()", "def make_pairs_first_round(self):\n\n none_player = player.Player()\n if self._players:\n pairs = []\n sorted_players = sorted(self._players, key=lambda p: p.elo_rating, reverse=True)\n half_number = len(self._players) // 2\n\n for index in range(half_number):\n pairs.append([sorted_players[index], sorted_players[half_number + index]])\n\n if len(self._players) % 2 == 1:\n pairs.append([none_player, sorted_players[-1]])\n else:\n raise mvc_exc.EmptyListError('There is no player at the moment!')\n return pairs", "def generate_pairings(win_groups):\n # For each win group, try each combination of matches, checking for\n # rematches.\n pairings = []\n for idx, win_group in enumerate(win_groups):\n win_group_success = False\n # Go through each pair in the win group, checking for rematches.\n for pairs in all_pairs(win_group):\n # Go through each pair in the win group, checking for rematches.\n contains_rematch = False\n for pair in pairs:\n is_rematch = check_for_rematch(pair[0], pair[1])\n if is_rematch is True:\n contains_rematch = True\n break\n\n if contains_rematch is True:\n # This set of pairs contains a rematch. Try the next pairing\n # permutation.\n continue\n else:\n win_group_success = True\n for pair in pairs:\n # Add this pairing to the pairings\n player1_name = id_to_name(pair[0])\n player2_name = id_to_name(pair[1])\n pairings.append((pair[0], player1_name, pair[1],\n player2_name))\n break\n\n # If there was no success on any pair permutation, return to\n # swissPairings() to adjust the groupings.\n if win_group_success is False:\n return None, idx\n\n return pairings, None", "def swissPairings():\n # get the current playerStandings and pairup and make a list\n # as the tuples are already sorted by the number of wins a\n # sequential pairing will pair players with equal wins or nearly\n # equal wins\n results = playerStandings()\n pairings = []\n i = 0\n while i < len(results):\n pairings.append((results[i][0], results[i][1], results[i+1][0],\n results[i+1][1]))\n i = i + 2\n return pairings", "def play_round(self, tournament_id, current_round, past_matches=None):\n\n for pairings in self.swisspairing:\n if len(pairings) == 4:\n if past_matches:\n if have_played_before(pairings[0], pairings[2],\n past_matches):\n self.logger.warning(\"This pairing has played before pairings1=\" \\\n + str(pairings[0]) \\\n + \" pairings2=\" + str(pairings[2]))\n aval = randint(0, 9)\n bval = randint(0, 9)\n if pairings[2] == -1: #Odd Player\n tournament.reportMatch(pairings[0], pairings[2],\n tournament_id, current_round,\n self.database,\n self.cursor)\n else:\n if aval > bval:\n tournament.reportMatch(pairings[0], pairings[2],\n tournament_id, current_round,\n self.database,\n self.cursor)\n else:\n tournament.reportMatch(pairings[2], pairings[0],\n tournament_id, current_round,\n self.database,\n self.cursor)", "def make_pairs(self, players_info):\n\n pairs = []\n sorted_players_info = sorted(players_info, key=lambda k: (-k[\"total_point\"], -k['initial_ranking']))\n\n player_number = len(sorted_players_info)\n pair_index = 1\n # If the numbers of players is odd, the treatment is only done for the numbers of players - 1\n pair_number = player_number // 2\n\n # Each pair has two players, build player_1 as a dictionary in order to mark the player of which pair\n sorted_players_info_dict = dict()\n sorted_players_info_dict[pair_index] = sorted_players_info\n not_yet_encountered_players = dict()\n called_time = dict()\n called_time[pair_index] = 1\n while pair_index <= pair_number:\n player_1_info = sorted_players_info_dict[pair_index][0]\n player_1 = player_1_info[\"player\"]\n if called_time[pair_index] == 1:\n # This list has always the player_1\n not_yet_encountered_players[pair_index] = [info for info in sorted_players_info_dict[pair_index]\n if info[\"player\"] not in player_1_info[\"opponents\"]]\n # Eliminate the player_1 in the list, player_1 is the first element of the list.\n not_yet_encountered_players[pair_index] = not_yet_encountered_players[pair_index][1:]\n called_time[pair_index] += 1\n try:\n pair_result = self.make_pair(player_1, not_yet_encountered_players[pair_index])\n except mvc_exc.EmptyListError:\n # Return in the previous pair and re-make previous pair\n pair_index -= 1 # CHANGE HERE : -=1 to re-make the previous pair, pair_index = 1: re-begin all\n # Remove the previous pair to re-make it.\n pairs.pop()\n else:\n pair, player_2_info, new_not_yet_encountered_players = pair_result\n # Update the list of not yet encountered players after selecting one of its elements, then remove the\n # selected element from the list. The goal is that the next time, another player will be selected.\n not_yet_encountered_players[pair_index] = new_not_yet_encountered_players\n # pass to next pair\n pair_index += 1\n # Each time, remove the pair of players has been made in the sorted list\n sorted_players_info_dict[pair_index] = [element for element\n in sorted_players_info_dict[pair_index - 1]\n if element[\"player\"] not in pair]\n called_time[pair_index] = 1\n\n # Build pair\n pairs.append(pair)\n\n if player_number % 2 == 1:\n # Pair with none player if number of players is odd\n none_player = player.Player()\n pairs.append([sorted_players_info_dict[pair_index][-1][\"player\"], none_player])\n\n return pairs", "def play_match(self):\n self.games = []\n self.games_played = 0\n self.player1.wins = 0\n self.player2.wins = 0\n loop = True\n while(loop):\n self.games_played += 1\n game = Game(self.games_played, self.player1, self.player2)\n print(game)\n if(game.winner == self.player1.name):\n self.player1.wins += 1\n elif(game.winner == self.player2.name):\n self.player2.wins += 1\n else:\n pass\n\n if(self.player1.wins >= self.wins_needed):\n loop = False\n\n if(self.player2.wins >= self.wins_needed):\n loop = False\n\n # Match has ended! Reset player variables and return a dictionary with results\n self.winner = self.player1.name if self.player1.wins >= self.wins_needed else self.player2.name\n self.player1.wins = 0\n self.player2.wins = 0\n return {'games_played': self.games_played, 'winner': self.winner}", "def recursivePairingWithNamedData(currentPlayerStandings):\n # 2 players left, return if not previously matched\n if len(currentPlayerStandings) == 2:\n p1 = currentPlayerStandings[0]\n p2 = currentPlayerStandings[1]\n if not hasPreviouslyMatchd(p1.id, p2.id):\n return [(p1.id, p1.name, p2.id, p2.name)]\n else:\n return None\n # more than 2 players, always select first player\n # and pair with the rest players according to ranking\n # till found a available pairing\n else:\n p1 = currentPlayerStandings[0]\n for p2 in currentPlayerStandings[1:]:\n if hasPreviouslyMatchd(p1.id, p2.id):\n continue\n # Recursively pair with a copy of the rest players\n playerStandingsCopy = list(currentPlayerStandings)\n playerStandingsCopy.remove(p1)\n playerStandingsCopy.remove(p2)\n subPairing = recursivePairingWithNamedData(playerStandingsCopy)\n if subPairing is not None:\n # found available paring! concact and return\n return [(p1.id, p1.name, p2.id, p2.name)] + subPairing\n else:\n # (p1, p2) is not a possible pairing, try next\n continue\n return None", "def _create_matches(self):\n\n\t\tmatches = []\n\n\t\t# Last round contains no matches, just a single player\n\t\tfor round in range(1, TOT_NB_ROUNDS):\n\t\t\tnbMatchs = int(NB_PLAYERS / (2 ** round))\n\t\t\tmatches.append([])\n\t\t\tfor _ in range(nbMatchs):\n\t\t\t\tmatches[round - 1].append(Match(self.playerMatrix))\n\n\t\treturn matches", "def simulate(self, debug = False):\n\t\t\n\t\t# roundNb starts at 0, this is for the 2 first rounds\n\t\tif self.roundNb == 0:\n\t\t\tself._initialisation_matching()\n\t\t\n\t\telif self.roundNb == 1:\n\t\t\tself._early_matching()\n\n\t\telif self.roundNb == TOT_NB_ROUNDS - 2:\n\t\t\tself._last_round_matching()\n\n\t\telse:\n\t\t\tself._end_matching()\n\n\t\tif debug:\n\t\t\tprint(\"players:\")\n\t\t\tfor player in self.players:\n\t\t\t\tprint(player.name)\n\n\t\t\tprint(\"matches:\")\n\t\t\tfor match in self.matches:\n\t\t\t\tprint(match)\n\n\t\tnextPlayers = [match.winner() for match in self.matches]\n\n\t\treturn nextPlayers", "def players_cards_with_tie():\n player1_cards = [Card('H', '2'), Card('H', '3'), Card('H', '4'), Card('H', '5'), Card('H', '6'),\n Card('H', '7'), Card('H', '8'), Card('H', '10'), Card('D', '2'), Card('D', '4'),\n Card('D', '5'), Card('D', '6'), Card('D', '7'), Card('D', '8'), Card('S', '2'),\n Card('S', '3'), Card('S', '4'), Card('S', '5'), Card('S', '6'), Card('S', '7'),\n Card('C', '2'), Card('C', '3'), Card('C', '4'), Card('C', '5'), Card('C', '6'),\n Card('C', '7')]\n player2_cards = [Card('H', '9'), Card('D', '3'), Card('H', 'J'), Card('H', 'K'), Card('H', 'Q'),\n Card('H', 'A'), Card('D', '9'), Card('D', '10'), Card('D', 'J'), Card('D', 'K'),\n Card('D', 'Q'), Card('D', 'A'), Card('S', '8'), Card('S', '9'), Card('S', '10'),\n Card('S', 'J'), Card('S', 'K'), Card('S', 'Q'), Card('S', 'A'), Card('C', '8'),\n Card('C', '9'), Card('C', '10'), Card('C', 'J'), Card('C', 'K'), Card('C', 'Q'),\n Card('C', 'A')]\n players_cards = [player1_cards, player2_cards]\n return players_cards", "def random_pairing(standings, pairings):\n # Randomly shuffle the standings in place.\n shuffle(standings)\n\n # Check to see if there are an odd number of players.\n have_odd_players = False\n if len(standings) % 2 != 0:\n have_odd_players = True\n\n # If we have an odd number of players, we need to deal with a bye for a\n # player.\n # Let's keep track of that.\n if have_odd_players is True:\n dealt_with_bye = False\n\n # Go through standings two at a time and generate the pairings [1].\n standings_it = iter(standings)\n for home_player in standings_it:\n if have_odd_players is True and dealt_with_bye is False:\n pairings.append((home_player[0], home_player[1], home_player[0],\n 'Give a Bye'))\n dealt_with_bye = True\n else:\n away_player = next(standings_it)\n pairings.append((home_player[0], home_player[1], away_player[0],\n away_player[1]))\n\n return\n\n # Credits\n # [1] Idea for using an iterator to go through a list two items at a time\n # was found on this Stack Overflow page:\n # http://stackoverflow.com/questions/16789776/", "def get_next_match(self):\n if not self._not_played_matches:\n return\n\n p1, p2 = self._not_played_matches.pop(0)\n p1 = self._players[p1]\n p2 = self._players[p2]\n p1.set_classification(Classification.WHITE)\n p2.set_classification(Classification.BLACK)\n self._current_match = (p1, p2)\n return self._current_match" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a set of message components with yes/no buttons, ready for use. If provided, the given IDs will be used for the buttons. If not, the button custom IDs will be set to the strings "YES" and "NO".
def boolean_buttons(cls, yes_id: str = None, no_id: str = None) -> 'MessageComponents': return cls( ActionRow( Button(label="Yes", style=ButtonStyle.success, custom_id=yes_id or "YES"), Button(label="No", style=ButtonStyle.danger, custom_id=no_id or "NO"), ), )
[ "def YesNo(title, question, default):\n dlg = wx.MessageDialog(None, question, title, wx.YES_NO | wx.ICON_QUESTION) \n if dlg.ShowModal() == wx.ID_YES:\n result = True\n else:\n result = False\n dlg.Destroy()\n return result", "def translateButtonsFromKupu(self, context, buttons):\n return_buttons = []\n\n for button in buttons:\n if button == 'save-button':\n try:\n if not context.checkCreationFlag():\n return_buttons.append('save')\n except AttributeError:\n pass\n elif button == 'bg-basicmarkup':\n pass\n elif button == 'bold-button':\n return_buttons.append('bold')\n elif button == 'italic-button':\n return_buttons.append('italic')\n elif button == 'bg-supsuper-button':\n pass\n elif button == 'subscript':\n return_buttons.append('sub')\n elif button == 'supscript':\n return_buttons.append('sup')\n elif button == 'bg-colorchooser':\n pass\n elif button == 'forecolor-button':\n return_buttons.append('forecolor')\n elif button == 'hilitecolor-button':\n return_buttons.append('backcolor')\n elif button == 'bg-justify':\n pass\n elif button == 'justifyleft-button':\n return_buttons.append('justifyleft')\n elif button == 'justifycenter-button':\n return_buttons.append('justifycenter')\n elif button == 'justifyright-button':\n return_buttons.append('justifyright')\n elif button == 'bg-list':\n pass\n elif button == 'list-ol-addbutton':\n return_buttons.append('numlist')\n elif button == 'list-ul-addbutton':\n return_buttons.append('bullist')\n elif button == 'definitionlist':\n pass\n elif button == 'bg-indent':\n pass\n elif button == 'outdent-button':\n return_buttons.append('outdent')\n elif button == 'indent-button':\n return_buttons.append('indent')\n elif button == 'bg-drawers':\n pass\n elif button == 'imagelibdrawer-button':\n return_buttons.append('image')\n elif button == 'linklibdrawer-button' or button == 'linkdrawer-button' or button == 'anchors-button':\n if 'link' not in return_buttons:\n return_buttons.append('link')\n elif button == 'embed-tab':\n return_buttons.append('media')\n elif button == 'manage-anchors-tab':\n return_buttons.append('anchor')\n elif button == 'toc-tab':\n pass\n elif button == 'tabledrawer-button':\n return_buttons.append('tablecontrols')\n elif button == 'bg-remove':\n pass\n elif button == 'removeimage-button':\n pass\n elif button == 'removelink-button':\n return_buttons.append('unlink')\n elif button == 'bg-undo':\n pass\n elif button == 'undo-button':\n return_buttons.append('undo')\n elif button == 'redo-button':\n return_buttons.append('redo')\n elif button == 'spellchecker':\n return_buttons.append('iespell')\n elif button == 'source':\n return_buttons.append('code')\n elif button == 'styles' or button == 'ulstyles' or button == 'olstyles':\n if 'style' not in return_buttons:\n return_buttons.append('style')\n elif button == 'zoom':\n return_buttons.append('fullscreen')\n else:\n if button not in return_buttons:\n return_buttons.append(button)\n return return_buttons", "def ask(theMessage: str, theResponses=None, default=0, cancel=-1, wrap_width=60, **kwds):\n\n #\n # Fix 'Mutable default arguments'\n #\n if theResponses is None:\n theResponses = DEFAULT_ASK_RESPONSES\n\n box = Dialog(**kwds)\n d = box.margin\n lb = wrapped_label(theMessage, wrap_width)\n\n lb.topleft = (d, d)\n buts = []\n for caption in theResponses:\n but = Button(caption, action=lambda x=caption: box.dismiss(x))\n buts.append(but)\n\n brow = Row(buts, spacing=d, equalize='w')\n lb.width = max(lb.width, brow.width)\n col = Column([lb, brow], spacing=d, align='r')\n col.topleft = (d, d)\n\n if default is not None:\n box.enter_response = theResponses[default]\n else:\n box.enter_response = None\n if cancel is not None:\n box.cancel_response = theResponses[cancel]\n else:\n box.cancel_response = None\n\n box.add(col)\n box.shrink_wrap()\n\n return box.present()", "def CreateButtons(self):\r\n \r\n # Build a couple of fancy and useless buttons \r\n okBmp = self.MainFrame.CreateBitmap(\"ok\")\r\n cancelBmp = self.MainFrame.CreateBitmap(\"file_error\")\r\n self.okButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_OK, okBmp, \"Ok\")\r\n self.cancelButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_CANCEL, cancelBmp, \"Cancel\")", "def minus_buttons(manager: pygame_gui.UIManager) -> tuple[pygame_gui.elements.UIButton,\n pygame_gui.elements.UIButton,\n pygame_gui.elements.UIButton,\n pygame_gui.elements.UIButton]:\n num_people_minus = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((280, 55), (50, 25)),\n text='-', manager=manager)\n\n closeness_minus = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((280, 115), (50, 25)),\n text='-', manager=manager)\n\n infected_minus = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((280, 175), (50, 25)),\n text='-', manager=manager)\n\n connected_minus = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((280, 235), (50, 25)),\n text='-', manager=manager)\n\n return (num_people_minus, closeness_minus, infected_minus, connected_minus)", "def _create_buttons(self, share_button, move_buttons, jump_button, \n top_label):\n if top_label:\n self.top_label = Gtk.Label(label=top_label)\n self.top_label.set_use_markup(True)\n self.track_ref_for_deletion(\"top_label\")\n\n self.add_btn = SimpleButton(Gtk.STOCK_ADD, self.add_button_clicked)\n self.edit_btn = SimpleButton(Gtk.STOCK_EDIT, self.edit_button_clicked)\n self.del_btn = SimpleButton(Gtk.STOCK_REMOVE, self.del_button_clicked)\n self.track_ref_for_deletion(\"add_btn\")\n self.track_ref_for_deletion(\"edit_btn\")\n self.track_ref_for_deletion(\"del_btn\")\n\n self.add_btn.set_tooltip_text(self._MSG['add'])\n self.edit_btn.set_tooltip_text(self._MSG['edit'])\n self.del_btn.set_tooltip_text(self._MSG['del'])\n \n if share_button:\n self.share_btn = SimpleButton(Gtk.STOCK_INDEX, self.share_button_clicked)\n self.share_btn.set_tooltip_text(self._MSG['share'])\n self.track_ref_for_deletion(\"share_btn\")\n else:\n self.share_btn = None\n \n if move_buttons:\n self.up_btn = SimpleButton(Gtk.STOCK_GO_UP, self.up_button_clicked)\n self.up_btn.set_tooltip_text(self._MSG['up'])\n self.down_btn = SimpleButton(Gtk.STOCK_GO_DOWN, \n self.down_button_clicked)\n self.down_btn.set_tooltip_text(self._MSG['down'])\n self.track_ref_for_deletion(\"up_btn\")\n self.track_ref_for_deletion(\"down_btn\")\n else:\n self.up_btn = None\n self.down_btn = None\n\n if jump_button:\n self.jump_btn = SimpleButton(Gtk.STOCK_JUMP_TO, self.jump_button_clicked)\n self.track_ref_for_deletion(\"jump_btn\")\n self.jump_btn.set_tooltip_text(self._MSG['jump'])\n else:\n self.jump_btn = None\n\n hbox = Gtk.HBox()\n hbox.set_spacing(6)\n if top_label:\n hbox.pack_start(self.top_label, False, True, 0)\n hbox.pack_start(self.add_btn, False, True, 0)\n if share_button:\n hbox.pack_start(self.share_btn, False, True, 0)\n hbox.pack_start(self.edit_btn, False, True, 0)\n hbox.pack_start(self.del_btn, False, True, 0)\n if move_buttons:\n hbox.pack_start(self.up_btn, False, True, 0)\n hbox.pack_start(self.down_btn, False, True, 0)\n\n if self.jump_btn:\n hbox.pack_start(self.jump_btn, False, True, 0)\n hbox.show_all()\n self.pack_start(hbox, False, True, 0)\n\n if self.dbstate.db.readonly:\n self.add_btn.set_sensitive(False)\n self.del_btn.set_sensitive(False)\n if share_button:\n self.share_btn.set_sensitive(False)\n if jump_button and self.jump_btn:\n self.jump_btn.set_sensitive(False)\n if move_buttons:\n self.up_btn.set_sensitive(False)\n self.down_btn.set_sensitive(False)", "def check_buttons(self):\n\n check_options = []\n for var in self.__info.get_keys():\n\n # \"mode\" settings will get handled as radio buttons.\n if var != \"mode\":\n check_options.append(var)\n\n # Check buttons need a variable to work properly so we'll save them\n # into a list.\n self.__var_list = []\n for i in range(len(check_options)):\n\n variable = StringVar()\n self.__var_list.append(variable)\n\n # Using lambda function, each time the check button is pressed\n # it will trigger the configure_check function\n cb = Checkbutton(self.__group_options,\n text=check_options[i].capitalize(),\n variable=variable,\n onvalue=\"yes\",\n offvalue=\"no\",\n command=lambda x=check_options[i],i=i:\n self.configure_check(text=x,index=i))\n\n cb.pack(side=TOP, pady=5, anchor=\"w\")\n\n if self.__settings[check_options[i]] == \"yes\":\n cb.select()\n else:\n cb.deselect()", "def create_buttons(self):\n self.create_button(\"ADD\", self.add_contact)\n self.create_button(\"EDIT\", self.edit, y=260)\n self.create_button(\"DELETE\", self.delete, y=210)\n self.create_button(\"VIEW\", self.view, y=160)\n self.create_button(\"EXIT\", self.exit_book, bg='tomato', x=300, y=320)\n self.create_button(\"RESET\", self.reset, y=310)", "def remove_buttons(msg):\n\n bot.editMessageReplyMarkup(telepot.message_identifier(\n msg[\"message\"]), reply_markup=None)", "def assigning_buttons_to_items():\n bottled_drinks = ['water', 'sprite', 'cran-water', 'iced coffee']\n juices = ['mango juice', 'cherry juice', 'black-currant juice', 'orange juice']\n snacks = ['fruit snacks', 'nuts', 'granola bar', 'snickers']\n stationery = ['pencil', 'eraser', 'book', 'paper pack']\n\n programmed_buttons = {'A1': bottled_drinks[0], 'A2': bottled_drinks[1],\n 'A3': bottled_drinks[2], 'A4': bottled_drinks[3],\n\n 'B1': juices[0], 'B2': juices[1], 'B3': juices[2], 'B4': juices[3],\n\n 'C1': snacks[0], 'C2': snacks[1], 'C3': snacks[2], 'C4': snacks[3],\n\n 'D1': stationery[0], 'D2': stationery[1], 'D3': stationery[2], 'D4': stationery[3],\n\n '**': 'Give back money.',\n\n 'YES': 'YES', 'NO': 'NO'}\n return programmed_buttons", "def create_buttons(self):\n self.log.info(__name__ + ': ' + 'def ' + self.create_buttons.__name__ + '(): ' + self.create_buttons.__doc__)\n\n for index, phrase in enumerate(self.phrases['menu_buttons']):\n _x = self.offset[0]\n _y = self.offset[1] + (self.button_y + self.size_font * 2) * index\n text = Text(self.font_obj, phrase, _x, _y, Colors.ORANGE)\n self.buttons.append(Button(_x, _y, self.textures['button'], text))", "def display_yes_no_dialog(msg, title):\n\n dlg = ix.application.message_box(msg, title, ix.api.AppDialog.cancel(),\n ix.api.AppDialog.STYLE_YES_NO)\n\n return dlg.is_yes()", "def create_new_message():\n return questionary.confirm(\"Create widgets for another message?\").ask()", "def get_approve_kb() -> InlineKeyboardMarkup:\n return InlineKeyboardMarkup([[\n InlineKeyboardButton(\"🟢 0\", callback_data=\"meme_approve_yes\"),\n InlineKeyboardButton(\"🔴 0\", callback_data=\"meme_approve_no\")\n ]])", "def utter_button_template(self,\n template, # type: Text\n buttons, # type: List[Dict[Text, Any]]\n tracker, # type: DialogueStateTracker\n silent_fail=False, # type: bool\n **kwargs # type: **Any\n ):\n # type: (Text, List[Dict[Text, Any]], **Any) -> None\n\n message = self._generate_response(template,\n tracker,\n silent_fail,\n **kwargs)\n if not message:\n return\n\n if \"buttons\" not in message:\n message[\"buttons\"] = buttons\n else:\n message[\"buttons\"].extend(buttons)\n self.utter_response(message)", "def buttons(self):\n return [b.text for b in self.harness.css('button', self.element)]", "def sensitive_buttons(self, mesg_btn):\n #TODO: Maybe it's better a sorted insert on ButtonSet\n sorted_btncodes = self.set.keys()\n sorted_btncodes.sort()\n sorted_btncodes.reverse()\n \n for btn in [self.set[btncode] for btncode in sorted_btncodes]:\n if (btn.btncode & mesg_btn) == btn.btncode:\n mesg_btn -= btn.btncode\n yield btn", "def button_dialog(message: str = 'Please select an option.',\n choices: Sequence[str] = ['Cancel', 'OK'],\n **kwargs) -> int:\n return li.button_dialog(\n message, choices,\n icon=[config.root_folder + '/kineticstoolkit/logo.png',\n config.root_folder + '/kineticstoolkit/logo_hires.png'],\n **kwargs)", "def add_buttons(self, *args):\n\n def buttons(b):\n while b:\n t, r = b[0:2]\n b = b[2:]\n yield t, r\n\n try:\n for text, response in buttons(args):\n self.add_button(text, response)\n except (IndexError):\n raise TypeError('Must pass an even number of arguments')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a message components object with a list of number buttons added. Each number is added as its own button. Numbers provided as a list will be added as primary, where numbers added as negatives (if ``add_negative`` is set to ``True``) will be added as secondary buttons. A confirm button will not be automatically added.
def add_number_buttons( cls, numbers: typing.List[int] = MISSING, *, add_negative: bool = False): if numbers is MISSING: numbers = [1, 5, 10, 50, 100] v = cls() if add_negative: v.add_component(ActionRow(*[ Button(label=f"{i:+d}", custom_id=f"NUMBER {i}", style=ButtonStyle.primary) for i in numbers ])) v.add_component(ActionRow(*[ Button(label=f"{-i:+d}", custom_id=f"NUMBER {-i}", style=ButtonStyle.secondary) for i in numbers ])) else: v.add_component(ActionRow(*[ Button(label=str(i), custom_id=f"NUMBER {i}", style=ButtonStyle.primary) for i in numbers ])) return v
[ "def add_number_buttons(self):\r\n add_rowspace, add_colspace = False, False\r\n for i in range(len(self.board)): # for each row i of the board\r\n add_rowspace = ((i+1)%self.base_size == 1 and i != 0)\r\n if add_rowspace:\r\n [self.widget_board.add_widget(Label(text='')) for _ in range(self.side_size + self.base_size - 1)] # add sudoku row spacing\r\n for j in range(len(self.board[i])): # for each index j in row i of the board\r\n add_colspace = ((j+1)%self.base_size == 1 and j != 0)\r\n if add_colspace:\r\n self.widget_board.add_widget(Label(text='')) # add sudoku spacing\r\n button = NumberButton(row=i, column=j, number=self.board[i][j])\r\n button.bind(on_release=button.use_button)\r\n self.widget_board.add_widget(button)", "def create_buttons(self):\n self.create_button(\"ADD\", self.add_contact)\n self.create_button(\"EDIT\", self.edit, y=260)\n self.create_button(\"DELETE\", self.delete, y=210)\n self.create_button(\"VIEW\", self.view, y=160)\n self.create_button(\"EXIT\", self.exit_book, bg='tomato', x=300, y=320)\n self.create_button(\"RESET\", self.reset, y=310)", "def create_buttons(self):\n self.log.info(__name__ + ': ' + 'def ' + self.create_buttons.__name__ + '(): ' + self.create_buttons.__doc__)\n\n for index, phrase in enumerate(self.phrases['menu_buttons']):\n _x = self.offset[0]\n _y = self.offset[1] + (self.button_y + self.size_font * 2) * index\n text = Text(self.font_obj, phrase, _x, _y, Colors.ORANGE)\n self.buttons.append(Button(_x, _y, self.textures['button'], text))", "def _create_buttons(self, share_button, move_buttons, jump_button, \n top_label):\n if top_label:\n self.top_label = Gtk.Label(label=top_label)\n self.top_label.set_use_markup(True)\n self.track_ref_for_deletion(\"top_label\")\n\n self.add_btn = SimpleButton(Gtk.STOCK_ADD, self.add_button_clicked)\n self.edit_btn = SimpleButton(Gtk.STOCK_EDIT, self.edit_button_clicked)\n self.del_btn = SimpleButton(Gtk.STOCK_REMOVE, self.del_button_clicked)\n self.track_ref_for_deletion(\"add_btn\")\n self.track_ref_for_deletion(\"edit_btn\")\n self.track_ref_for_deletion(\"del_btn\")\n\n self.add_btn.set_tooltip_text(self._MSG['add'])\n self.edit_btn.set_tooltip_text(self._MSG['edit'])\n self.del_btn.set_tooltip_text(self._MSG['del'])\n \n if share_button:\n self.share_btn = SimpleButton(Gtk.STOCK_INDEX, self.share_button_clicked)\n self.share_btn.set_tooltip_text(self._MSG['share'])\n self.track_ref_for_deletion(\"share_btn\")\n else:\n self.share_btn = None\n \n if move_buttons:\n self.up_btn = SimpleButton(Gtk.STOCK_GO_UP, self.up_button_clicked)\n self.up_btn.set_tooltip_text(self._MSG['up'])\n self.down_btn = SimpleButton(Gtk.STOCK_GO_DOWN, \n self.down_button_clicked)\n self.down_btn.set_tooltip_text(self._MSG['down'])\n self.track_ref_for_deletion(\"up_btn\")\n self.track_ref_for_deletion(\"down_btn\")\n else:\n self.up_btn = None\n self.down_btn = None\n\n if jump_button:\n self.jump_btn = SimpleButton(Gtk.STOCK_JUMP_TO, self.jump_button_clicked)\n self.track_ref_for_deletion(\"jump_btn\")\n self.jump_btn.set_tooltip_text(self._MSG['jump'])\n else:\n self.jump_btn = None\n\n hbox = Gtk.HBox()\n hbox.set_spacing(6)\n if top_label:\n hbox.pack_start(self.top_label, False, True, 0)\n hbox.pack_start(self.add_btn, False, True, 0)\n if share_button:\n hbox.pack_start(self.share_btn, False, True, 0)\n hbox.pack_start(self.edit_btn, False, True, 0)\n hbox.pack_start(self.del_btn, False, True, 0)\n if move_buttons:\n hbox.pack_start(self.up_btn, False, True, 0)\n hbox.pack_start(self.down_btn, False, True, 0)\n\n if self.jump_btn:\n hbox.pack_start(self.jump_btn, False, True, 0)\n hbox.show_all()\n self.pack_start(hbox, False, True, 0)\n\n if self.dbstate.db.readonly:\n self.add_btn.set_sensitive(False)\n self.del_btn.set_sensitive(False)\n if share_button:\n self.share_btn.set_sensitive(False)\n if jump_button and self.jump_btn:\n self.jump_btn.set_sensitive(False)\n if move_buttons:\n self.up_btn.set_sensitive(False)\n self.down_btn.set_sensitive(False)", "def createButtons(self):\n\t\tnrow = 0\n\t\tncol = 0\n\t\tif self.dataUnit:\n\t\t\tn = self.dataUnit.getNumberOfTimepoints()\n\t\telse:\n\t\t\tn = self.numberOfTimepoints\n\t\tfor i in range(n):\n\t\t\tif ncol == 30:\n\t\t\t\tnrow += 1\n\t\t\t\tncol = 0\n\t\t\tbtn = buttons.GenButton(self.buttonFrame, -1, \"%d\"%(i+1), size = (24, 24))\n\t\t\tbtn.SetFont(wx.Font(7, wx.SWISS, wx.NORMAL, wx.NORMAL))\n\t\t\tbtn.Bind(wx.EVT_BUTTON, lambda e, btn = btn, i = i: self.buttonClickedCallback(btn, i))\n\t\t\tbtn.origColor = btn.GetBackgroundColour()\n\t\t\tbtn.origFgColor = btn.GetForegroundColour()\n\t\t\tself.buttonList.append(btn)\n\t\t\tself.timepointButtonSizer.Add(btn, (nrow, ncol))\n\t\t\tncol = ncol + 1\n\t\tself.buttonFrame.Layout()\n\t\tself.buttonFrame.Raise()\n\t\tself.timepointButtonSizer.Fit(self.buttonFrame)\n\t\tself.mainsizer.Fit(self)", "def initialize(self, buttonList):\r\n if isinstance(buttonList, list):\r\n for i in buttonList:\r\n try:\r\n commandLine = i['command']\r\n except KeyError:\r\n commandLine = None\r\n \r\n newButton = ChatCommandButton(commandLine)\r\n \r\n try:\r\n newButton.setIcon(i['icon'])\r\n except KeyError:\r\n pass\r\n \r\n try:\r\n newButton.setText(i['text'])\r\n except KeyError:\r\n pass\r\n \r\n try:\r\n newButton.setImage(i['image'], i.get('imagefocus', i['image']))\r\n except KeyError:\r\n pass\r\n \r\n try:\r\n newButton.setManialink(i['manialink'])\r\n except KeyError:\r\n pass\r\n \r\n self.__commandButtons.append(newButton)", "def add_button(self, new):\n self.animation_timer = ButtonAnimationTime\n self.Cash_Button = Button(BLC, self.x + self.width / 2 - ButtonPoleButtonXSize / 2,\n self.y + (self.height * 1.0) * ((len(self.Buttons) + 1.0) / (len(self.Buttons) + 2.0))\n - ButtonPoleButtonYSize / 2, ButtonPoleButtonXSize,\n ButtonPoleButtonYSize, 0, new[1], WHT, RED, new[0])\n self.Cost.append(new[2])\n self.Images.append(new[3])", "def boolean_buttons(cls, yes_id: str = None, no_id: str = None) -> 'MessageComponents':\n\n return cls(\n ActionRow(\n Button(label=\"Yes\", style=ButtonStyle.success, custom_id=yes_id or \"YES\"),\n Button(label=\"No\", style=ButtonStyle.danger, custom_id=no_id or \"NO\"),\n ),\n )", "def create_buttons(self):\r\n pos1 = [self.pos[0] + self.width + 10, self.pos[1] + self.height//2]\r\n pos2 = [self.pos[0]+self.width//2, self.pos[1]+self.height+10]\r\n pos3 = [self.pos[0], self.pos[1] + self.height]\r\n b1 = Button(self.game, pos1, 0, 0,\r\n 'speed : ', self.speed, value_min=0.05, value_max=0.2, step=0.01)\r\n b2 = Button(self.game, pos2, 0, 0,\r\n 'speed sprint: ', self.speed_sprint, value_min=0.1, value_max=0.5, step=0.01)\r\n b3 = Button(self.game, pos3, 0, 0, 'jump height', round(self.v0_max/v0_max,2), value_min=0.5, value_max=1.5, step=0.1)\r\n return [b1, b2, b3]", "def create_entry_buttons(self):\n\n for is_star in self.itemsList:\n if is_star[3] == 'out':\n temp_button = Button(text='{0} ({1}) = ${2}*'.format(*is_star))\n temp_button.bind(on_release=self.press_entry)\n else:\n temp_button = Button(text='{} ({}) = ${}'.format(*is_star))\n temp_button.bind(on_release=self.press_entry)\n self.root.ids.entriesBox.add_widget(temp_button)\n\n '''\n for name in self.itemsList:\n # create a button for each phonebook entry\n temp_button = Button(text='{0} ({1}) = ${2}*'.format(*name))\n temp_button.bind(on_release=self.press_entry)\n # add the button to the \"entriesBox\" using add_widget()\n self.root.ids.entriesBox.add_widget(temp_button)\n '''", "def build_buttons(self):\n\n # Create a frame w/ two equally-sized columns for the buttons [ | ]\n buttons_frame = tkinter.Frame(self.mainframe)\n buttons_frame.grid(row=2, column=0, sticky='nsew', pady=10)\n buttons_frame.columnconfigure(0, weight=1)\n buttons_frame.columnconfigure(1, weight=1)\n\n # Create the Start/Stop buttons\n self.start_button = tkinter.Button(\n buttons_frame,\n text='Start',\n command=self.start_timer\n )\n\n self.stop_button = tkinter.Button(\n buttons_frame,\n text='Stop',\n command=self.stop_timer\n )\n\n # Insert the buttons\n self.start_button.grid(row=0, column=0, sticky='ew')\n self.stop_button.grid(row=0, column=1, sticky='ew')\n\n # Ensure Stop Button is disabled at the start.\n self.stop_button.config(state=tkinter.DISABLED)", "def createButtonPane(self):\n self._button_listener = EditorButtonListener(self)\n\n panel = JPanel()\n panel.setLayout(BoxLayout(panel, BoxLayout.Y_AXIS))\n panel.setBorder(EmptyBorder(5, 5, 5, 5))\n\n panel.add(Box.createRigidArea(Dimension(0, 5)))\n type_scroll_pane = JScrollPane(self._type_list_component)\n type_scroll_pane.setMaximumSize(Dimension(200, 100))\n type_scroll_pane.setMinimumSize(Dimension(150, 100))\n panel.add(type_scroll_pane)\n panel.add(Box.createRigidArea(Dimension(0, 3)))\n\n new_type_panel = JPanel()\n new_type_panel.setLayout(BoxLayout(new_type_panel, BoxLayout.X_AXIS))\n new_type_panel.add(self._new_type_field)\n new_type_panel.add(Box.createRigidArea(Dimension(3, 0)))\n new_type_panel.add(\n self.createButton(\n \"New\", \"new-type\", \"Save this message's type under a new name\"\n )\n )\n new_type_panel.setMaximumSize(Dimension(200, 20))\n new_type_panel.setMinimumSize(Dimension(150, 20))\n\n panel.add(new_type_panel)\n\n button_panel = JPanel()\n button_panel.setLayout(FlowLayout())\n if self._editable:\n button_panel.add(\n self.createButton(\n \"Validate\", \"validate\", \"Validate the message can be encoded.\"\n )\n )\n button_panel.add(\n self.createButton(\"Edit Type\", \"edit-type\", \"Edit the message type\")\n )\n button_panel.add(\n self.createButton(\n \"Reset Message\", \"reset\", \"Reset the message and undo changes\"\n )\n )\n button_panel.add(\n self.createButton(\n \"Clear Type\", \"clear-type\", \"Reparse the message with an empty type\"\n )\n )\n button_panel.setMinimumSize(Dimension(100, 200))\n button_panel.setPreferredSize(Dimension(200, 1000))\n\n panel.add(button_panel)\n\n return panel", "def CreateButtons(self):\r\n \r\n # Build a couple of fancy and useless buttons \r\n okBmp = self.MainFrame.CreateBitmap(\"ok\")\r\n cancelBmp = self.MainFrame.CreateBitmap(\"file_error\")\r\n self.okButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_OK, okBmp, \"Ok\")\r\n self.cancelButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_CANCEL, cancelBmp, \"Cancel\")", "def add_buttons(self, *args):\n\n def buttons(b):\n while b:\n t, r = b[0:2]\n b = b[2:]\n yield t, r\n\n try:\n for text, response in buttons(args):\n self.add_button(text, response)\n except (IndexError):\n raise TypeError('Must pass an even number of arguments')", "def create_buttons(self):\n for name in self.name_to_age:\n temp_button = Button(text=name)\n temp_button.bind(on_release=self.press_entry)\n self.root.ids.entries_box.add_widget(temp_button)", "def init_buttons_poz():\r\n row = 0\r\n left_btn.grid(row=row, column=0, padx=3)\r\n right_btn.grid(row=row, column=19, padx=3)\r\n for i in range(1, 19):\r\n buttons_list[i - 1].grid(row=row, column=i, pady=5)", "def create_book_keyboard(self):\n result = InlineKeyboardMarkup(row_width=6)\n\n button = InlineKeyboardButton(self.strings.count_button,\n callback_data='count')\n result.add(button)\n\n # Adding Date- and Time- buttons by a row for each\n button = InlineKeyboardButton(self.strings.date_button,\n callback_data='date')\n result.add(button)\n button = InlineKeyboardButton(self.strings.time_button,\n callback_data='time')\n result.add(button)\n\n # Adding Set- and Hour- buttons in one row\n set_button = InlineKeyboardButton(self.strings.set_button,\n callback_data='set')\n hour_button = InlineKeyboardButton(self.strings.hour_button,\n callback_data='set_hour')\n result.row(set_button, hour_button)\n\n button = InlineKeyboardButton(self.strings.phone_button,\n callback_data='phone')\n result.add(button)\n\n if self.state_manager.data:\n reserve: Supboard = self.state_manager.data\n if reserve.is_complete:\n button = InlineKeyboardButton(\n self.strings.apply_button,\n callback_data='apply')\n result.add(button)\n\n # Adding Back-button separately\n button = InlineKeyboardButton(self.strings.back_button,\n callback_data='back')\n result.add(button)\n\n return result", "def minus_buttons(manager: pygame_gui.UIManager) -> tuple[pygame_gui.elements.UIButton,\n pygame_gui.elements.UIButton,\n pygame_gui.elements.UIButton,\n pygame_gui.elements.UIButton]:\n num_people_minus = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((280, 55), (50, 25)),\n text='-', manager=manager)\n\n closeness_minus = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((280, 115), (50, 25)),\n text='-', manager=manager)\n\n infected_minus = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((280, 175), (50, 25)),\n text='-', manager=manager)\n\n connected_minus = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((280, 235), (50, 25)),\n text='-', manager=manager)\n\n return (num_people_minus, closeness_minus, infected_minus, connected_minus)", "def create_all_buttons(self):\n\n # Create the play button\n button_y = (self.aliens[3].rect.bottom + self.settings.screen_padding)\n self.play_button = Button(self.screen, \"Play\", self.subtitle_color, self.bg_color,\n self.screen_rect.centerx, button_y)\n\n # Create the highscore button\n button_y = self.play_button.rect.bottom + self.settings.screen_padding\n self.highscore_button = Button(self.screen, \"Highscores\", self.orange_color,\n self.bg_color, self.screen_rect.centerx, button_y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pagerank v1 With the eucidean_distance
def pagerank_v2(page_map, eps=1.0e-8, d=0.85): vertex_num = page_map.shape[1] v_rank = np.ones((vertex_num, 1), dtype=np.float32) v_rank = v_rank/vertex_num last_v_rank = np.ones((vertex_num, 1), dtype=np.float32) page_map_hat = (d * page_map) + (((1 - d) / vertex_num) * np.ones((vertex_num, vertex_num), dtype=np.float32)) loop_num = 0 while eucidean_distance((v_rank - last_v_rank).tolist()) > eps: print("loop num = %d", loop_num) print("current eps = %f", eucidean_distance((v_rank - last_v_rank).tolist())) last_v_rank = v_rank v_rank = np.matmul(page_map_hat, v_rank) print("v_rank=") print(v_rank) loop_num = loop_num + 1 return v_rank
[ "def pagerank_calc(corpus, old_pagerank, damping_factor):\n new_pagerank = {}\n pages = list(corpus.keys())\n pagerank_base_prob = (1-damping_factor)/len(pages)\n\n for page in pages:\n # Set of tuples of all the links that link to the current page\n # (page_name, num_links_on_the_page)\n links = get_pages_that_link(corpus, page)\n\n # Sum of the PR of all the links to that page\n pr_links = 0\n for link in links:\n # Sum of page rank / numlinks(i)\n pr_links += old_pagerank[link[0]] / link[1]\n\n # print(f\"pr for {page} is: {pr_links}\")\n # Divide by num_links * damping factor probability\n pr_for_page = damping_factor * pr_links\n\n # Add that to the new pagerank\n new_pagerank[page] = round(pagerank_base_prob + pr_for_page, 8)\n\n return new_pagerank", "def CalculPointage(self):\n \n noir = 12\n blanc = 12\n for piece in self.partie.damier.cases.values():\n if str(piece) == \"x\" or str(piece) == \"X\":\n blanc = blanc - 1\n elif str(piece) == \"o\" or str(piece) == \"O\":\n noir = noir - 1\n self.pointBlanc[\"text\"] = str(blanc)\n self.pointNoir[\"text\"] = str(noir)", "def get_pagerank(M, alpha=0.85, return_P=False):\n n = len(M)\n M += np.ones([n, n]) * alpha / n\n la, v = scipy.sparse.linalg.eigs(M, k=1)\n P = v[:, 0]\n P /= np.sum(P,axis = 0)\n if return_P:\n return np.argsort(P)[-1::-1], P\n else:\n return np.argsort(P)[-1::-1]", "def goToPoint(self,point, pas):\n point1 = [self.positionX[-1], self.positionY[-1], self.positionZ[-1]] #position actuelle du drone\n if distanceXY(point, point1) >= pas and denivellation(point,point1)**2 >= pas:\n print(\"going to point\")\n theta = math.atan2(point[1],point[0]) #angle du point à atteindre\n phi = math.atan2(point1[1], point1[0]) #angle absolu du drone en tant que point\n print(\"theta\",theta)\n #angle := angle relatif selon lequel le drone doit se tourner pour faire face au point cible\n angle = math.atan2(distanceXY(point,[0,0])*math.sin(theta-self.alpha)-distanceXY(point1,[0,0])*math.sin(phi-self.alpha), distanceXY(point,[0,0])*math.cos(theta-self.alpha)-distanceXY(point1,[0,0])*math.cos(phi-self.alpha))\n print(\"angle\", angle)\n print(int((angle/self.maxSpeedRotation)*10000)/10000.0)\n if angle < 0:\n self.clockwise(1,int(-(angle/self.maxSpeedRotation)*10000)/10000.0)\n\n else :\n self.counterClockwise(1,int((angle/self.maxSpeedRotation)*10000)/10000.0)\n if denivellation(point1,point)==0:\n rate=0\n else:\n rate = distanceXY(point1, point)/denivellation(point1, point) \n print(\"rate\",rate)\n niveau = denivellation(point1, point)\n if rate > 0:\n self.frontUp(1,rate, niveau)\n elif rate< 0:\n self.frontDown(1, rate, niveau)\n else:\n self.front(1, int(((distanceXY(point1, point) /self.maxSpeed)*10000)/10000.0))\n self.fidelity += 1\n \n else: \n print(\"skipping point\")\n self.goneToX += [self.positionX[-1]]\n self.goneToY += [self.positionY[-1]]\n self.goneToZ += [self.positionZ[-1]]", "def calculPointageCamera(self):\n\n self.pointageLatitude = self.latitude + self.deltaLatitude\n self.pointageLongitude = self.longitude + self.deltaLongitude", "def filtro_pagerank(pagerank, comando, tipo_recomendacion ,canciones, cantidad):\n pagerank_canciones = {}\n pagerank_usuarios = {}\n for clave, valor in pagerank.items():\n if clave in canciones:\n pagerank_canciones[clave] = valor\n else:\n pagerank_usuarios[clave] = valor\n if comando == \"mas_importantes\" or comando == \"recomendacion\" and tipo_recomendacion == \"canciones\":\n return heapq.nlargest(cantidad, pagerank_canciones, key=pagerank_canciones.get)\n else:\n return heapq.nlargest(cantidad, pagerank_usuarios, key=pagerank_usuarios.get)", "def get_admin_distance(self):\n pass", "def _get_distance(self):\n\n # implement here", "def efface_point(p):\n\tx, y = p\n\ttag = \"p-{}-{}\".format(x, y)\n\tefface(tag)", "def calculate_points(self):\n pass", "def plane_distance(self,point,planepoint,n0):\n return dot(point,n0)-dot(planepoint,n0)", "def nextWaypoint(self, pose):\n #DONE implement\n location = pose.position\n dist = 100000.\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n nwp = 0\n for i in range(len(self.waypoints)):\n d1 = dl(location, self.waypoints[i].pose.pose.position)\n if dist > d1:\n nwp = i\n dist = d1\n if len(self.waypoints) != 0:\n x = self.waypoints[nwp].pose.pose.position.x\n y = self.waypoints[nwp].pose.pose.position.y\n heading = np.arctan2((y-location.y), (x-location.x))\n angle = np.abs(self.theta-heading)\n if angle > np.pi/4.:\n nwp += 1\n if nwp >= len(self.waypoints):\n nwp = 0\n else:\n nwp = 0\n return nwp", "def getPe(km,p_ref=100000.):\n ae, be = getEdge(km)\n return (ae + p_ref * be)", "def distance_from_point(self,point):\n d,node=self.tree.query(point)\n # distance=pool.map(lambda a: np.linalg.norm(a-point),self.nodes.values())\n return d", "def pageRankFrameSim(G, alp = 0.85, convergeThreshold = .01, maxIter=50):\r\n dims = G.shape\r\n if dims[0] != dims[1]:\r\n print \"Matrix is not square!\"\r\n sys.exit(1)\r\n n=dims[0]\r\n print n\r\n \r\n # transform G into sparsematrix M\r\n M = csc_matrix(G,dtype=np.float)\r\n #print M\r\n \r\n \r\n rsum = np.array(M.sum(1))\r\n rowsum=rsum[:,0]\r\n\r\n # Compute pagerank r until we converge\r\n po, p= np.zeros(n), np.ones(n)\r\n flag= True\r\n iter=0\r\n while np.sum(np.abs(p-po)) > convergeThreshold and iter<maxIter:\r\n iter=iter+1\r\n print iter\r\n po = p.copy()\r\n for i in xrange(0,n):\r\n # in-similarity values of frame i, i.e. similarity scores vector corresponding to frames that have i as the top-k similar frame\r\n Ii = np.array(M[:,i].todense())[:,0]\r\n # account for teleportation to frame i\r\n Ti = np.ones(n) / float(n)\r\n # Weighted PageRank Equation\r\n #r[i] = ro.dot( Ii*alpha/2.0 + Si*s + Ti*(1-s)*G[i] ) \r\n p[i] = po.dot( Ii*alp + Ti*(1-alp))\r\n print \"current diff :\"\r\n print np.sum(np.abs(p-po))\r\n # return normalized pagerank\r\n print \"paused at :\"\r\n print np.sum(np.abs(p-po))\r\n return p/sum(p)", "def estimate_fitness(self, tree: CassiopeiaTree) -> None:", "def test_pagerank_results(self):\n\n #expected_input = {'site1':{'incoming links':[ 'site3'], 'number of outgoing links': 2, 'pagerank': 1},\n # 'site2':{'incoming links':['site3', 'site1' ], 'number of outgoing links': 1, 'pagerank': 1},\n # 'site3':{'incoming links':['site2', 'site1' ], 'number of outgoing links': 2, 'pagerank': 1}}\n\n #self.assertEqual(spider.page_rank(expected_input, 1), {'site1':0.575, 'site2':1.0, 'site3':1.425}), \"Pagerank output round 1 incorrect\"\n #self.assertEqual(spider.page_rank(expected_input, 2), {'site1':0.755625, 'site2':1.0, 'site3':1.244375}), \"Pagerank output round 2 incorrect\"\n #self.assertEqual(spider.page_rank(expected_input, 3), {'site1':0.678859375, 'site2':1.0, 'site3':1.321140625}), \"Pagerank output round 3 incorrect\"", "def neighbor(points, p):\n points.sort(key=lambda q: (p[0] - q.get_position()[0]) * (p[0] - q.get_position()[0]) +\n (p[1] - q.get_position()[1]) * (p[1] - q.get_position()[1]) +\n (p[2] - q.get_position()[2]) * (p[2] - q.get_position()[2]))\n return points[0]", "def add_pedestrian_utilities(pedestrian: Cell):\n for cell in pedestrian.get_pedestrian_grid(R_MAX):\n distance = get_euclidean_distance(pedestrian, cell)\n if distance < R_MAX:\n cell.pedestrian_utility += math.exp(1 / (distance ** 2 - R_MAX ** 2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure we can create a basic LtpType and then read it back
def test_create_type_no_parent(self, app): with app.app_context(): conn = get_connection(current_app) name = 'Book' desc = 'A physical or digital book' resp = conn.create_type(name, desc) assert type(resp) == LtpType assert str(resp.name) == name assert str(resp.description) == desc
[ "def test_tool_types_read(self):\n pass", "def testGetType(self):\n self.assertEqual(b\"PhysAddress\",\n mib.ffi.string(mib._getType(\"PhysAddress\").name))\n self.assertEqual(b\"InetAddress\",\n mib.ffi.string(mib._getType(b\"InetAddress\").name))\n self.assertEqual(None, mib._getType(\"SomeUnknownType.kjgf\"))\n self.assertEqual(None, mib._getType(\"snimpySimpleTable\"))", "def wont_ttype(self, option):\r\n self.protocol.protocol_flags['TTYPE'][\"init_done\"] = True", "def test_tool_types_create(self):\n pass", "def _unkown_type(self, uridecodebin, decodebin, caps):\n # This is called *before* the stream becomes ready when the\n # file can't be read.\n streaminfo = caps.to_string()\n if not streaminfo.startswith('audio/'):\n # Ignore non-audio (e.g., video) decode errors.\n return\n self.read_exc = UnknownTypeError(streaminfo)\n self.ready_sem.release()", "def __init__(self, type_string):\n if type_string == 'INT':\n self.typ = self.INT\n elif type_string == 'STRING':\n self.typ = self.STRING\n elif type_string == 'VOID':\n self.typ = self.VOID", "def test_instantiating_a_new_type_returns_expected_type():\n NewType = make_type(int, \"NewType\", [numeric.Minimum(0), numeric.Maximum(10)])\n instance = NewType(5)\n assert isinstance(instance, NewType)\n assert isinstance(instance, int)", "def load_type(self, type_path):\n # Open the the file\n with open(type_path, 'r') as type_file:\n # read the file content\n type_content = type_file.read()\n # add the type in database\n type_object = create_type(type_content, 'type_name', type_path)\n return type_object", "def test_derived_type(self, native_or_pretty, targets):\n serialized = native_or_pretty.serialize(\"unpickleable\", targets)\n assert serialized == \"UnPickleableInt[42]\"", "def test_valid_message_with_type_obj():\n id_ = '12345'\n\n msg = Message({'@type': Type.from_str(TEST_TYPE), '@id': id_})\n assert msg.type == TEST_TYPE\n assert msg.id == id_\n assert msg.doc_uri == 'test_type/'\n assert msg.protocol == 'protocol'\n assert msg.version == '1.0'\n assert msg.normalized_version == '1.0.0'\n assert msg.name == 'test'\n assert msg.version_info == Semver(1, 0, 0)", "def Value(self) -> UnmanagedType:", "def testReadFileObjectMissingType(self):\n definitions_registry = registry.DataTypeDefinitionsRegistry()\n definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()\n\n yaml_data = u'\\n'.join([\n u'name: int8',\n u'attributes:',\n u' format: signed',\n u' size: 1',\n u' units: bytes']).encode(u'ascii')\n\n file_object = io.BytesIO(initial_bytes=yaml_data)\n\n with self.assertRaises(errors.FormatError):\n definitions_reader.ReadFileObject(definitions_registry, file_object)\n\n yaml_data = u'\\n'.join([\n u'name: int8',\n u'type: integer',\n u'attributes:',\n u' format: signed',\n u' size: 1',\n u' units: bytes',\n u'---',\n u'name: int16',\n u'attributes:',\n u' format: signed',\n u' size: 2',\n u' units: bytes']).encode(u'ascii')\n\n file_object = io.BytesIO(initial_bytes=yaml_data)\n\n with self.assertRaises(errors.FormatError):\n definitions_reader.ReadFileObject(definitions_registry, file_object)", "def TypeInitializer(self) -> _n_5_t_19:", "def TypeHandle(self) -> _n_2_t_12:", "def test_invalid_type(self):\r\n\t\tdataDict = {\r\n\t\t\t'type': 'gibberish'\r\n\t\t}\r\n\t\ttarget = __import__('')\r\n\t\tTHParse = target.TransactionHistoryParse(dataDict)\r\n\r\n\t\tresult = THParse.main()\r\n\r\n\t\tself.assertEqual(result, {})", "def python_type(self):", "def test_make_type_returns_a_new_type_that_is_a_subclass_of_the_base_type(type_):\n new = make_type(type_, \"NewType\", [])\n assert new != type_\n assert issubclass(new, type_)", "def test_new_fieldtypes(self):\n self.assertEqual(len(FieldTypes), FieldTypes.F_BYTES32.value + 1)\n for ndx, _ in enumerate(FieldTypes):\n self.assertEqual(_.value, ndx)\n # round trip member to sym and back to member\n self.assertEqual(FieldTypes.from_sym(_.sym), _)", "def test_create_from_gds_type(self):\n # without ids\n _TT = emdb_sff.transform_listType(self.gds_txs)\n TT = adapter.SFFTransformList.from_gds_type(_TT)\n self.assertEqual(self.tx_count, len(TT))\n self.assertEqual(len(TT.get_ids()), 0)\n # with ids\n _TT = emdb_sff.transform_listType(self.gds_txs_with_ids)\n TT = adapter.SFFTransformList.from_gds_type(_TT)\n self.assertEqual(self.tx_count, len(TT))\n self.assertEqual(list(TT.get_ids()), list(_xrange(len(TT))))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the optimal path from starting_point to the zero contour of travel_time. Solve the equation x_t = grad t / | grad t | travel_time is the travel time for each point of image from the trial point (zero contour) dx is the grid spacing N is the maximum travel time
def minimal_path(travel_time, starting_point, dx, boundary, steps, N=100): grad_t_y, grad_t_x = np.gradient(travel_time, dx) if isinstance(travel_time, np.ma.MaskedArray): grad_t_y[grad_t_y.mask] = 0.0 grad_t_y = grad_t_y.data grad_t_x[grad_t_x.mask] = 0.0 grad_t_x = grad_t_x.data # h, w = travel_time.shape # coords_x, coords_y = np.arange(boundary[0], boundary[2], (boundary[2]-boundary[0])/steps), \ # np.arange(boundary[1], boundary[3], (boundary[3]-boundary[1])/steps) coords_x, coords_y = np.linspace(boundary[0], boundary[2], steps), \ np.linspace(boundary[1], boundary[3], steps) gradx_interp = RectBivariateSpline(coords_y, coords_x, grad_t_x) grady_interp = RectBivariateSpline(coords_y, coords_x, grad_t_y) def get_velocity(position): """Returns normalized velocity at the position""" x, y = position vel = np.array([gradx_interp(y, x)[0][0], grady_interp(y, x)[0][0]]) return vel / np.linalg.norm(vel) def euler_point_update(pos, ds): return pos - get_velocity(pos) * ds def runge_kutta(pos, ds): """Fourth order Runge Kutta point update""" k1 = ds * get_velocity(pos) k2 = ds * get_velocity(pos - k1 / 2.0) k3 = ds * get_velocity(pos - k2 / 2.0) k4 = ds * get_velocity(pos - k3) return pos - (k1 + 2 * k2 + 2 * k3 + k4) / 6.0 p = runge_kutta(starting_point, dx) px, py = [p[0]], [p[1]] for i in range(N): px.append(p[0]) py.append(p[1]) p = runge_kutta(p, dx) # x = euler_point_update(x, dx) return px, py
[ "def optimal_path(x,distances,predecessors,xstart,xmin,xmax,resolution=None,cost=None):\n xmin = np.asarray(xmin)\n xmax = np.asarray(xmax)\n if resolution is None:\n rmax = np.max(xmax-xmin)\n resolution = rmax/DEFAULT_RESOLUTION\n invresolution = np.divide(1.0,resolution)\n corner = np.floor(np.multiply(x-xmin,invresolution)).astype(int)\n def to_state(index):\n return np.multiply(np.asarray(index),resolution) + xmin\n cellvertices = []\n for ofs in itertools.product(*[[0,1]]*len(xmin)):\n cellvertices.append(tuple(corner + ofs))\n if cost is None:\n cost = lambda x,y:np.linalg.norm(x-y)\n dmin = float('inf')\n vmin = None\n for v in cellvertices:\n if v in distances:\n xv = to_state(v)\n d = distances[v] + cost(xv,x)\n if d < dmin:\n dmin = d\n vmin = v\n if vmin is None:\n return None\n path = search.predecessor_traverse(predecessors,'start',vmin)\n xpath = []\n for v in path:\n if v == 'start':\n xpath.append(xstart)\n else:\n xpath.append(to_state(v))\n if np.any(xpath[-1] != x):\n xpath.append(x)\n return xpath", "def optimal_path_cost(x,distances,predecessors,xstart,xmin,xmax,resolution=None,cost=None):\n xmin = np.asarray(xmin)\n xmax = np.asarray(xmax)\n if resolution is None:\n rmax = np.max(xmax-xmin)\n resolution = rmax/DEFAULT_RESOLUTION\n invresolution = np.divide(1.0,resolution)\n corner = np.floor(np.multiply(x-xmin,invresolution)).astype(int)\n cellvertices = []\n cellvertexstates = []\n for ofs in itertools.product(*[[0,1]]*len(xmin)):\n cellvertices.append(tuple(corner + ofs))\n cellvertexstates.append(np.multiply(cellvertices[-1],resolution)+xmin)\n if cost is None:\n cost = lambda x,y:np.linalg.norm(x-y)\n dmin = float('inf')\n for v,xv in zip(cellvertices,cellvertexstates):\n if v in distances:\n dmin = min(dmin,distances[v] + cost(xv,x))\n return dmin", "def find_path(self, max_step = 2):\n print('start:',np.around(self.atom_chosen, decimals=2), 'goal',np.around(self.design_chosen,decimals=2))\n if np.linalg.norm(self.atom_chosen - self.design_chosen)< self.safe_radius_nm:\n print('direct step, RRT not used')\n return self.design_chosen, [self.design_chosen, self.atom_chosen]\n rrt = RRT(\n start=self.atom_chosen, goal=self.design_chosen, rand_area=[-2, 15],\n obstacle_list=self.obstacle_list, expand_dis= max_step, path_resolution=1)\n path_len = np.inf\n min_path = None\n for _ in range(20):\n path = rrt.planning(animation=False)\n if path is not None:\n if len(path)<path_len:\n min_path = path\n path_len = len(path)\n else:\n break\n\n if min_path is None:\n print('Cannot find path')\n return None, None\n next_target = np.array(min_path[-2])\n return next_target, min_path", "def find_shortest_path_between(self, origin: City, destination: City, visiting_cities: int) -> list:\n\n # Assign amount of cities to visit to the class\n self.__cities_to_visit = visiting_cities\n\n # Print a summary\n print(f\"Start: {origin.name} - {origin.get_region().name} - {origin.get_country().name}\")\n print(f\"Destination: {destination.name} - {destination.get_region().name} - {destination.get_country().name}\")\n print(f\"Amount of cities to visit: {str(visiting_cities)}\")\n\n # Count the iterations\n iterations = 0\n\n # Start timer\n start_time = time.time()\n\n print(f\"============================================================\")\n for traverse_level in self.__traverse_levels:\n\n # Reset runs to zero\n self.__runs_without_improvement = 0\n\n # Run til level has converged\n while self.__runs_without_improvement < self.__converge_threshold:\n\n iterations += 1\n\n # Initialize an Ant\n ant = Ant() # type: Ant\n ant.starting_city = origin\n ant.current_city = origin\n ant.destination_city = destination\n\n while len(ant.visited_cities) <= self.__cities_to_visit:\n # Get next city\n next_city = self.__get_next_city(ant, traverse_level)\n\n # Assign next city\n ant.visited_cities.append(next_city)\n ant.current_city = next_city\n\n # Get the score for the ant\n ant = self.__calculate_score(ant)\n\n # Check if current ant beats the previous best ant\n if self.__best_ant is None or self.__best_ant.score > ant.score:\n self.__best_ant = ant\n self.__runs_without_improvement = 0\n print(f\"Found new best route with score: {ant.score}\")\n\n if traverse_level is Country:\n self.__country_sequence = [city.get_country() for city in ant.visited_cities]\n\n if traverse_level is Region:\n self.__region_sequence = [city.get_region() for city in ant.visited_cities]\n\n continue\n\n self.__runs_without_improvement += 1\n\n print(f\"Search has converged on traverse level {traverse_level.__name__}\")\n\n self.__print_best_route()\n print(f\"Iterations: {iterations}\")\n print(f\"Total time elapsed: {time.time()-start_time} \")\n\n return self.__best_ant.visited_cities", "def _estimate_path(self, multiplier, pc_vel, pc_acc):\n # check for duplicates\n self.min_pair_dist, self.t_sum = _check_waypts(\n self.waypts, pc_vel.vlim, pc_acc.alim\n )\n if self.min_pair_dist < JNT_DIST_EPS: # issue a warning and try anyway\n logger.warning(\n \"Duplicates found in input waypoints. This is not recommended,\"\n \" especially for the beginning and the end of the trajectory. \"\n \"Toppra might throw a controllability exception. \"\n \"Attempting to optimise trajectory anyway...\"\n )\n # initial x for toppra's path, essentially normalised time on x axis\n # rescale by given speed limits.\n # only applies to ParametrizeSpline.\n self.path_length_limit = 100 * self.t_sum # empirical magic number\n # t_sum is the minimum time required to visit all given waypoints.\n # toppra generally needs a smaller number for controllabiility.\n # It will find that the needed total path length > t_sum in the end.\n x_max = 1 if multiplier is None else multiplier * self.t_sum\n x = np.linspace(0, x_max, self.waypts.shape[0])\n logger.debug(\n f\"t_sum = {self.t_sum}, t_sum_multiplier = {multiplier}, \"\n f\"estimated path length: {x_max}\"\n )\n # specifying natural here doensn't make a difference\n # toppra only produces clamped cubic splines\n return ta.SplineInterpolator(x, self.waypts, bc_type=\"clamped\")", "def trajOpt(self, state_initial, dircol=0, second_pass=False):\n\n # stopwatch for solver time\n tsolve_pre = time.time()\n\n (x_goal, V_goal, gamma_goal, q_goal) = (200.0, state_initial[2], 0.0, 0.0)\n\n # number of knot points - proportional to x-distance seems to work well\n if not dircol:\n N = int(np.floor(0.8 * np.abs(x_goal - state_initial[0])))\n else:\n N = 30\n\n # optimization problem: variables t_f, u[k], x[k]\n mp = MathematicalProgram()\n\n t_f = mp.NewContinuousVariables(1, \"t_f\")\n dt = t_f[0] / N\n\n k = 0\n u = mp.NewContinuousVariables(2, \"u_%d\" % k)\n input_trajectory = u\n\n x = mp.NewContinuousVariables(6, \"x_%d\" % k)\n state_trajectory = x\n\n for k in range(1, N):\n u = mp.NewContinuousVariables(2, \"u_%d\" % k)\n x = mp.NewContinuousVariables(6, \"x_%d\" % k)\n input_trajectory = np.vstack((input_trajectory, u))\n state_trajectory = np.vstack((state_trajectory, x))\n\n x = mp.NewContinuousVariables(6, \"x_%d\" % N)\n state_trajectory = np.vstack((state_trajectory, x))\n\n # for dircol we can use u_N and first-order hold\n if dircol:\n u = mp.NewContinuousVariables(2, \"u_%d\" % N)\n input_trajectory = np.vstack((input_trajectory, u))\n\n print \"Number of decision vars\", mp.num_vars()\n\n # cost function: penalize time and control effort\n thrust = input_trajectory[:, 0]\n elev = input_trajectory[:, 1]\n vel = state_trajectory[:, 2]\n allvars = np.hstack((t_f[0], thrust, elev, vel))\n # TODO: use u of length n+1 for dircol\n def totalcost(X):\n dt = X[0] / N\n u0 = X[1:N + 1]\n u1 = X[N + 1:2 * N + 1]\n v = X[2 * N + 1:3 * N + 1] # cut last item if dirtrans\n return dt * (1.0 * u0.dot(u0) + 1.0 * u1.dot(u1)) + 1.0 * X[0] * (u0.dot(v))\n # return dt * (1.0 * u0.dot(u0) + 1.0 * u1.dot(u1) + 10.0 * X[0] * (u0.dot(v)))\n\n mp.AddCost(totalcost, allvars)\n\n # initial state constraint\n for i in range(len(state_initial)):\n mp.AddLinearConstraint(state_trajectory[0, i] == state_initial[i])\n\n # final state constraint (x position)\n mp.AddLinearConstraint(state_trajectory[-1, 0] == x_goal)\n\n # final state constraint (z position) NOTE: range is acceptable\n mp.AddLinearConstraint(state_trajectory[-1, 1] <= 1.5)\n mp.AddLinearConstraint(state_trajectory[-1, 1] >= 0.5)\n\n # final state constraint (velocity) NOTE: range is acceptable\n mp.AddLinearConstraint(state_trajectory[-1, 2] <= 1.5 * V_goal)\n mp.AddLinearConstraint(state_trajectory[-1, 2] >= V_goal)\n\n # final state constraint (flight path angle) NOTE: small range here\n mp.AddLinearConstraint(state_trajectory[-1, 3] <= gamma_goal + 1.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[-1, 3] >= gamma_goal - 1.0 * np.pi / 180.0)\n\n # final state constraint (pitch rate)\n mp.AddLinearConstraint(state_trajectory[-1, 5] == q_goal)\n\n # input constraints\n for i in range(len(input_trajectory[:, 0])):\n mp.AddLinearConstraint(input_trajectory[i, 0] >= 0.0)\n mp.AddLinearConstraint(input_trajectory[i, 0] <= 1.2 * self.m * self.g)\n mp.AddLinearConstraint(input_trajectory[i, 1] >= -30.0)\n mp.AddLinearConstraint(input_trajectory[i, 1] <= 30.0)\n\n # state constraints\n for i in range(len(state_trajectory[:, 0])):\n # x position\n mp.AddLinearConstraint(state_trajectory[i, 0] >= state_initial[0])\n mp.AddLinearConstraint(state_trajectory[i, 0] <= x_goal)\n # z position\n mp.AddLinearConstraint(state_trajectory[i, 1] >= 0.3)\n mp.AddLinearConstraint(state_trajectory[i, 1] <= 2.0)\n # velocity\n mp.AddLinearConstraint(state_trajectory[i, 2] >= 1.0)\n mp.AddLinearConstraint(state_trajectory[i, 2] <= 3.0 * state_initial[2])\n # flight path angle\n mp.AddLinearConstraint(state_trajectory[i, 3] >= -30.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 3] <= 30.0 * np.pi / 180.0)\n # pitch angle\n mp.AddLinearConstraint(state_trajectory[i, 4] >= -20.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 4] <= 40.0 * np.pi / 180.0)\n # pitch rate\n mp.AddLinearConstraint(state_trajectory[i, 5] >= -20.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 5] <= 20.0 * np.pi / 180.0)\n\n # dynamic constraints\n if not dircol:\n # direct transcription\n for j in range(1, N + 1):\n dynamic_prop = dt * self.airplaneLongDynamics(state_trajectory[j - 1, :], input_trajectory[j - 1, :])\n for k in range(len(state_initial)):\n mp.AddConstraint(state_trajectory[j, k] == state_trajectory[j - 1, k] + dynamic_prop[k])\n else:\n # direct collocation\n for j in range(1, N + 1):\n x0 = state_trajectory[j - 1, :]\n x1 = state_trajectory[j, :]\n xdot0 = self.airplaneLongDynamics(x0, input_trajectory[j - 1, :])\n xdot1 = self.airplaneLongDynamics(x1, input_trajectory[j, :])\n\n xc = 0.5 * (x1 + x0) + dt * (xdot0 - xdot1) / 8.0\n xdotc = - 1.5 * (x0 - x1) / dt - 0.25 * (xdot0 + xdot1)\n uc = 0.5 * (input_trajectory[j - 1, :] + input_trajectory[j, :])\n f_xc = self.airplaneLongDynamics(xc, uc)\n for k in range(len(state_initial)):\n # TODO: why does \"==\" cause \"kUnknownError\"?\n # mp.AddConstraint(xdotc[k] - f_xc[k] == 0.0)\n mp.AddConstraint(xdotc[k] <= f_xc[k] + 0.001)\n mp.AddConstraint(xdotc[k] >= f_xc[k] - 0.001)\n\n # allow for warm start of dircol program with output of dirtrans program\n if (second_pass) and (self.mp_result == SolutionResult.kSolutionFound):\n # warm start using previous output\n print 'warm start to traj opt'\n t_guess = self.ttraj[-1]\n mp.SetInitialGuess(t_f[0], t_guess)\n\n for i in range(len(state_trajectory[:, 0])):\n for j in range(len(state_initial)):\n mp.SetInitialGuess(state_trajectory[i, j], self.xdtraj[i, j])\n for i in range(N):\n mp.SetInitialGuess(input_trajectory[i, 0], self.udtraj[i, 0])\n mp.SetInitialGuess(input_trajectory[i, 1], self.udtraj[i, 1])\n\n # time constraints\n mp.AddLinearConstraint(t_f[0] <= 1.25 * t_guess)\n mp.AddLinearConstraint(t_f[0] >= 0.8 * t_guess)\n\n else:\n # initial guesses\n t_guess = np.abs(x_goal - state_initial[0]) / (0.5 * (V_goal + state_initial[2]))\n mp.SetInitialGuess(t_f[0], t_guess)\n\n z_final_dummy = state_initial[1]\n theta_final_dummy = state_initial[4]\n state_final_dummy = np.array([x_goal, z_final_dummy, V_goal, gamma_goal, theta_final_dummy, q_goal])\n for i in range(len(state_trajectory[:, 0])):\n state_guess = ((N - i) / N) * state_initial + (i / N) * state_final_dummy\n for j in range(len(state_guess)):\n mp.SetInitialGuess(state_trajectory[i, j], state_guess[j])\n\n for i in range(N):\n mp.SetInitialGuess(input_trajectory[i, 0], self.m * self.g / 3.5)\n mp.SetInitialGuess(input_trajectory[i, 1], 0.01)\n\n # time constraints\n mp.AddLinearConstraint(t_f[0] <= 2.0 * t_guess)\n mp.AddLinearConstraint(t_f[0] >= 0.5 * t_guess)\n\n # set SNOPT iteration limit\n it_limit = int(max(20000, 40*mp.num_vars()))\n mp.SetSolverOption(SolverType.kSnopt, 'Iterations limit', it_limit)\n\n print(\"** solver begin with N = %d **\" % N)\n # solve nonlinear optimization problem (w/SNOPT)\n result = mp.Solve()\n print result\n\n # convert from symbolic to float\n input_trajectory = mp.GetSolution(input_trajectory)\n t_f = mp.GetSolution(t_f)\n state_trajectory_approx = mp.GetSolution(state_trajectory)\n time_array = t_f[0] * np.linspace(0.0, 1.0, (N + 1))\n\n tsolve_post = time.time()\n tsolve = tsolve_post - tsolve_pre\n\n solver_id = mp.GetSolverId()\n\n print (\"** %s solver finished in %.1f seconds **\\n\" % (solver_id.name(), tsolve))\n print (\"t_f computed: %.3f seconds\" % t_f[0])\n\n # get total cost of solution\n if result == SolutionResult.kSolutionFound:\n thrust = input_trajectory[:, 0]\n elev = input_trajectory[:, 1]\n vel = state_trajectory_approx[:, 2]\n allvars = np.hstack((t_f[0], thrust, elev, vel))\n print (\"cost computed: %.3f\" % totalcost(allvars))\n\n # save traj (this is a bit sloppy and redundant but scripts for visualization currently rely on this)\n self.udtraj = input_trajectory\n self.xdtraj = state_trajectory_approx\n self.ttraj = time_array\n self.mp_result = result\n\n # save polynomials of input, state trajectories\n if not dircol:\n self.udtraj_poly = PiecewisePolynomial.FirstOrderHold(time_array[0:-1], input_trajectory.T)\n else:\n self.udtraj_poly = PiecewisePolynomial.FirstOrderHold(time_array, input_trajectory.T)\n self.xdtraj_poly = PiecewisePolynomial.Cubic(time_array, state_trajectory_approx.T)\n\n return input_trajectory, state_trajectory_approx, time_array", "def __init__(self, world, start, goal):\n\n # You must choose resolution and margin parameters to use for path\n # planning. In the previous project these were provided to you; now you\n # must chose them for yourself. Your may try these default values, but\n # you should experiment with them!\n self.resolution = np.array([0.2, 0.2, 0.2])\n self.margin = 0.3\n\n # You must store the dense path returned from your Dijkstra or AStar\n # graph search algorithm as an object member. You will need it for\n # debugging, it will be used when plotting results.\n self.path = graph_search(world, self.resolution, self.margin, start, goal, astar=True)\n\n # You must generate a sparse set of waypoints to fly between. Your\n # original Dijkstra or AStar path probably has too many points that are\n # too close together. Store these waypoints as a class member; you will\n # need it for debugging and it will be used when plotting results.\n self.points = np.zeros((1, 3)) # shape=(n_pts,3)\n # self.points = self.path\n\n self.points = np.array(self.path[0])\n for i in range(self.path.shape[0]):\n if i != 0 and i != (self.path.shape[0] - 1):\n segment_1 = (self.path[i] - self.path[i-1])\n segment_2 = (self.path[i+1] - self.path[i-1])\n linear_segment = np.round(np.cross(segment_1, segment_2),5)\n if np.array_equal(linear_segment, [0, 0, 0]):\n pass\n else:\n self.points = np.vstack((self.points, self.path[i]))\n print('added point')\n\n self.points = np.vstack((self.points, self.path[-1]))\n print(self.points.shape)\n\n # Finally, you must compute a trajectory through the waypoints similar\n # to your task in the first project. One possibility is to use the\n # WaypointTraj object you already wrote in the first project. However,\n # you probably need to improve it using techniques we have learned this\n # semester.\n\n # STUDENT CODE HERE\n self.v = 2.5 # m/s\n self.t = np.zeros(len(self.points), )\n for i in range(len(self.t) - 1):\n self.t[(i + 1)] = np.linalg.norm((self.points[(i + 1)] - self.points[i])) / self.v\n\n self.point_t = np.zeros(len(self.points), )\n for i in range(int(len(self.t) - 1)):\n self.point_t[(i + 1)] = self.point_t[i] + self.t[i + 1]\n\n # self.f = CubicSpline(self.point_t, self.points, axis=0)\n self.f = interp1d(self.point_t, self.points, axis= 0)", "def pathfindTo(self,x,y,Game):\n self.goalx = x\n self.goaly = y\n dist = math.sqrt((y-self.y)**2 + (x-self.x)**2)\n time = dist / self.speed\n if(time != 0):\n self.xvel = (x - self.x) / time\n self.yvel = (y - self.y) / time\n else:\n self.xvel = 0\n self.yvel = 0", "def compute_tsp_tour(rt, data):\n try:\n # print('Solving TSP now')\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['time_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['time_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic. Parameters are set to defaults. Check website for more options.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem\n solution = routing.SolveWithParameters(search_parameters)\n rt.tsp_solver_status = routing.status()\n if rt.tsp_solver_status == 1:\n rt.tsp_route_time = solution.ObjectiveValue() / 10.0\n rt.tsp_optimality_gap = (rt.total_journey_time - rt.tsp_route_time) / rt.tsp_route_time\n # Save TSP tour in the form a dictionary for scoring\n set_tsp_dict(rt, manager, routing, solution)\n else:\n rt.tsp_route_time = CONST_INFTY\n rt.tsp_optimality_gap = (rt.total_journey_time - rt.tsp_route_time) / rt.tsp_route_time\n\n # # Print solution on console\n # if solution:\n # print_tsp_solution(manager, routing, solution)\n except:\n print('Exception found while analysing TSP for route', rt.index)\n rt.tsp_route_time = CONST_INFTY\n rt.tsp_optimality_gap = (rt.total_journey_time - rt.tsp_route_time) / rt.tsp_route_time", "def plan(self):\n self.start.cost = 0\n self.tree.add(self.start)\n for i in range(self.max_iter):\n #Generate a random node (rnd_node)\n rnd = self.get_random_node()\n # Get nearest node\n nearest_node = self.tree.nearest(rnd)\n # Get new node by connecting rnd_node and nearest_node\n new_node = self.steer(nearest_node, rnd)\n # If path between new_node and nearest node is not in collision\n if not self.map.collision(nearest_node.p,new_node.p):\n #add the node to tree\n self.add(new_node)\n #Return path if it exists\n if not self.goal.parent: path = None\n else: path = self.final_path()\n return path, self.goal.cost", "def _find_nearest_path_point(self, robot_pose):\n\n \"\"\"Parametrize the line by p0 = t*p1 + (1-t)p2, and let p3 = robot_pose.position, then (p3 - p0) is perpendicular\n to (p2-p1)\n \"\"\"\n nearest_dist = float('inf')\n\n # this is the s coordinate of the nearest point, we parametrize any point on a path by f(s), s is the length\n # along the path to the starting point\n nearest_point_s_coordinate = 0\n\n # the distance from the starting point to the curr line segment, that is, the total length of the previous line\n # segments\n dist_to_curr_line_segment = 0\n for index, start_waypoint in enumerate(self.waypoints):\n if index < len(self.waypoints) - 1:\n end_waypoint = self.waypoints[index + 1]\n line_len = np.linalg.norm(end_waypoint.position - start_waypoint.position)\n\n nearest_dist_to_curr_line = np.linalg.norm(robot_pose.position - start_waypoint.position)\n\n # s coordinate of the nearest point on the current line segment to the robot position\n curr_line_segment_nearest_point_s_coordinate = dist_to_curr_line_segment\n if np.linalg.norm(robot_pose.position - end_waypoint.position) < nearest_dist_to_curr_line:\n nearest_dist_to_curr_line = np.linalg.norm(robot_pose.position - end_waypoint.position)\n curr_line_segment_nearest_point_s_coordinate = dist_to_curr_line_segment + line_len\n\n if line_len > 1e-10:\n t = (-1.0) * np.dot(robot_pose.position - start_waypoint.position,\n start_waypoint.position - end_waypoint.position) / (line_len * line_len)\n if 0 < t < 1:\n perpendicular_point = t * end_waypoint.position + (1 - t) * start_waypoint.position\n if np.linalg.norm(robot_pose.position - perpendicular_point) < nearest_dist_to_curr_line:\n nearest_dist_to_curr_line = np.linalg.norm(robot_pose.position - perpendicular_point)\n curr_line_segment_nearest_point_s_coordinate = dist_to_curr_line_segment + \\\n np.linalg.norm(start_waypoint.position - perpendicular_point)\n\n if nearest_dist_to_curr_line < nearest_dist:\n nearest_dist = nearest_dist_to_curr_line\n nearest_point_s_coordinate = curr_line_segment_nearest_point_s_coordinate\n\n dist_to_curr_line_segment += line_len\n\n return nearest_point_s_coordinate", "def get_better_points_path(self, start_point, end_point, remaining_cities):\n\n result = []\n\n next_point = start_point\n \n while next_point.x != end_point.x or next_point.y != end_point.y:\n neighbors = self.get_neighbors(next_point)\n distance_to_end = min([Point.chebyshev_distance(end_point, n) for n in neighbors])\n neighbors = [n for n in neighbors if Point.chebyshev_distance(end_point, n) == distance_to_end]\n \n if len(neighbors) == 1:\n next_point = neighbors[0]\n result.append(next_point)\n \n else:\n best_neighbor = None\n best_dist_to_city = self.H + self.W + 2\n for neighbor in neighbors:\n neighbor_dist = self.H + self.W + 1\n for city in remaining_cities:\n d = Point.chebyshev_distance(neighbor, city)\n if d < neighbor_dist:\n neighbor_dist = d\n if neighbor_dist < best_dist_to_city:\n best_dist_to_city = neighbor_dist\n best_neighbor = neighbor\n\n next_point = best_neighbor\n result.append(next_point)\n \n\n if len(result) != Point.chebyshev_distance(start_point, end_point):\n raise ValueError(\"better path length unexpected from {} to {}\".format(start_point, end_point))\n \n return result", "def find_path(self, start_node, previous_node, destination_node):\r\n opened = []\r\n closed = []\r\n\r\n start_node.heuristic_cost = 0\r\n start_node.f = 0\r\n start_node.g = 0\r\n opened.append(start_node)\r\n\r\n while len(opened) > 0:\r\n minimum_node = None\r\n minimum_f = None\r\n for each_candidate in opened:\r\n if minimum_node is None or minimum_f > each_candidate.f:\r\n minimum_node = each_candidate\r\n minimum_f = each_candidate.f\r\n\r\n\r\n opened.remove(minimum_node)\r\n closed.append(minimum_node)\r\n successors = minimum_node.get_neighbors()\r\n for each_successor in successors:\r\n if each_successor == destination_node:\r\n # found goal\r\n each_successor.parent = minimum_node\r\n break\r\n\r\n # get h value for successor\r\n each_successor.heuristic_cost = Pathfinder.get_estimated_cost(each_successor, destination_node)\r\n # update g value for successor\r\n each_successor.g = minimum_node.g + 1\r\n # determine successor's f value\r\n each_successor.f = each_successor.g + each_successor.heuristic_cost\r\n\r\n # only add to list if it's not in there\r\n if each_successor not in opened and each_successor not in closed:\r\n each_successor.parent = minimum_node\r\n opened.append(each_successor)\r\n\r\n if destination_node.parent is None:\r\n raise Exception('Completed search without finding valid path to destination.')\r\n\r\n return Pathfinder.get_path(destination_node)", "def take_next_step(self) -> None:\r\n next_path_dic = {} # temporary var used to keep track of the result of the step\r\n paths_to_end = set() # temporary var used to keep track of which paths have met the termination criteria\r\n \r\n for current_path_val in self.path_dic: # loop through each point, or current state of a path\r\n for transition in self.transitions:# loop through each transformation (or card draw)\r\n next_path_val = current_path_val + transition # this is value after a card has been drawn\r\n \r\n if next_path_val >= self.target: # if the path has reached an endpoint, add to a set\r\n # which will be used later to move paths to the endpoint dictionary\r\n paths_to_end.add(next_path_val)\r\n\r\n # doing the transformation\r\n if next_path_val in next_path_dic: #this point has already been found, just need to update its probability\r\n next_path_dic[next_path_val] += self.path_dic[current_path_val] \\\r\n / len(self.transitions)\r\n else: # this point hasn't been found yet, need to create it\r\n next_path_dic[next_path_val] = self.path_dic[current_path_val] / len(self.transitions)\r\n \r\n self.path_dic = next_path_dic # all transformations have been done. The next state is set as the current state\r\n \r\n # now that we've calucated the next steps for all paths, \r\n # loop through paths that met the end condition and move them from\r\n # the path dictionary to the endpoint dictionary\r\n for point in paths_to_end:\r\n if point in self.end_point_dic: # if this endpoint has been reached before, add the\r\n # probability of current path to probablility of endpoint\r\n self.end_point_dic[point] += self.path_dic.pop(point) #pop from the pathDic becuase this path is ended\r\n \r\n else: #havent reached this endpoint before, add it to the dictionary\r\n self.end_point_dic.update({point: self.path_dic.pop(point)})", "def get_estimated_cost(start_node, destination_node):\r\n delta_x = abs(start_node.x - destination_node.x)\r\n delta_y = abs(start_node.y - destination_node.y)\r\n if delta_x < delta_y:\r\n return math.sqrt(2 * delta_x^2) + delta_y - delta_x\r\n else:\r\n return math.sqrt(2 * delta_y^2) + delta_x - delta_y", "def descent1proj(f, x0, D=lambda f,x : jac(f,x), eps=1e-4, d=None, \n a=armijoproj, \n norm=lambda x : np.sqrt(np.sum(x * x)), \n jmax=100, xmax=1e6, debug=False, \n TM=lambda x : np.identity(x.size), Pi=lambda x : x,\n alpha=0.1, beta=0.8, gamma=1.):\n # initial point\n x = Pi(x0)\n TxM = TM(x)\n # evaluate function\n fx = f(x)\n # compute & project gradient onto tangent space\n Dfx = proj(D(f,x),TxM)\n # loop over descent iterations\n for j in range(jmax):\n # fail if function or gradient are undefined\n if np.isnan(fx):\n print '(descent1proj) f(x) is nan'\n return x,j\n if np.any(np.isnan(Dfx)):\n print '(descent1proj) Df(x) is nan'\n return x,j\n # succeed at stationary point\n if norm(Dfx) < eps:\n return x,j\n # descend gradient\n if d is None:\n dd = -Dfx.T\n # descend given direction\n else:\n dd = d(f,x)\n # compute stepsize\n aa = a(f,x,dd,fx=fx,Dfx=Dfx,TM=lambda x : TxM,Pi=Pi,\n alpha=alpha,beta=beta,gamma=gamma)\n # fail if stepsize cannot be chosen\n if np.isnan(aa):\n print '(descent1proj) stepsize is nan'\n return x,j\n # descend\n x = Pi(x + aa * dd)\n TxM = TM(x)\n # fail if point is unbounded\n if norm(x) >= xmax:\n print '(descent1proj) norm(x) >= xmax'\n return x,j\n # evaluate function\n fx = f(x)\n # compute & project gradient onto tangent space\n Dfx = proj(D(f,x),TxM)\n if debug:\n print 'j = %3d, a = %0.2e, x = %s, f(x) = %0.2e' % (j,aa,x,f(x))\n print ' d = %s' % (dd)\n sys.stdout.flush()\n # descent failed to converge\n print '(descent1proj) j >= jmax'\n return x,j", "def trajectory (x0,y0,v,theta,g = 9.8, npts = 1000):\n vx = v * np.cos(np.deg2rad(theta))\n vy = v * np.sin(np.deg2rad(theta))\n tfinal = (vy/g) + np.sqrt((vy/g)**2 + 2*(y0)/g)\n t = np.linspace(0, tfinal, num = npts)\n x = x0 + vx*t\n y = y0 + vy*t - .5*g*(t**2)\n return x,y", "def get_path(self, start_idx, goal_idx):\n occ_grid = self.occ_grid\n open_list = self.open_list\n # get number of rows ni (x) and number of columns nj (y)\n ni, nj = occ_grid.num_idx\n path = []\n \n # resets h-cost, g-cost, update and occ for all cells\n for i in xrange(ni):\n for j in xrange(nj):\n # ! use occ_grid.idx2cell() and the cell's reset_for_planner()\n if occ_grid.idx2cell((i,j)) != None:\n occ_grid.idx2cell((i,j)).reset_for_planner(goal_idx)\n \n # put start cell into open list\n \n # ! get the start cell from start_idx\n start_cell = occ_grid.idx2cell(start_idx)\n # ! set the start cell distance using set_g_cost and Distance(0, 0)\n start_cell.set_g_cost(Distance(0, 0))\n # ! add the cell to open_list\n open_list.add(start_cell)\n \n # Finish adding start cell into OpenList()\n \n \n # now we non-recursively search the map\n while open_list.not_empty():\n cell = open_list.remove()\n # skip if already visited, bcos a cheaper path was already found\n if cell.visited:\n continue\n \n # ! set the cell as visited\n cell.visited = True\n \n # goal\n if cell.idx == goal_idx:\n while cell.parent is not None:\n # ! append the cell.idx onto path\n # ! let cell = cell's parent\n # ! if cell is None, break out of the while loop\n # pass\n # if cell == None:\n # raise Exception('e')\n # print(cell)\n path.append(cell.idx)\n cell = cell.parent\n path.append(cell.idx) \n break # breaks out of the loop: while open_list.not_empty()\n \n # if not goal or not visited, we try to add free neighbour cells into the open list\n for nb_cell in self.get_free_neighbors(cell):\n # ! calculate the tentative g cost of getting from current cell (cell) to neighbouring cell (nb_cell)...\n # ! use cell.g_cost and Distance.from_separation()\n # ! if the tentative g cost is less than the nb_cell.g_cost, ...\n # ! 1. assign the tentative g cost to nb_cell's g cost using set_g_cost\n # ! 2. set the nb_cell parent as cell\n # ! 3. add the nb_cell to the open list using open_list.add()\n # pass\n # tent_cell = copy.deepcopy(nb_cell)\n # nb_cell.set_g_cost(Distance.from_separation(nb_cell.idx, start_cell.idx))\n # tent_cell.set_g_cost(Distance.from_separation(nb_cell.idx, cell.idx) + cell.g_cost)\n tent_g_cost = Distance.from_separation(nb_cell.idx, cell.idx) + cell.g_cost\n # print(tent_cell.g_cost.__str__(), nb_cell.g_cost.__str__())\n if tent_g_cost < nb_cell.g_cost:\n nb_cell.set_g_cost(tent_g_cost)\n nb_cell.parent = cell\n open_list.add(nb_cell)\n # print(\"True\")\n \n return path", "def min_snap_trajectory(state_init, state_final, int_points, mpc_dt, speed=1.0):\n \n #Sum number of points to be fitted\n n_points = len(int_points)+2\n\n #create points list\n x = [state_init[0]]\n y = [state_init[1]]\n z = [state_init[2]]\n for point in int_points:\n x.append(point[0])\n y.append(point[1])\n z.append(point[2])\n x.append(state_final[0])\n y.append(state_final[1])\n z.append(state_final[2])\n\n #create a list of times for each trajectory segment\n delta_t = []\n for i in range(n_points-1):\n p1 = np.array([x[i], y[i], z[i]])\n p2 = np.array([x[i+1], y[i+1], z[i+1]])\n tim = np.linalg.norm(p2 - p1) / speed\n delta_t.append(tim)\n\n print('Total_time = ', np.sum(delta_t))\n\n #Construct P matrix (penalizes the snap)\n P = np.zeros((30*(n_points-1),30*(n_points-1)))\n\n dt = 1/50.0\n ti = np.arange(0,1,dt)\n t10 = np.sum(ti**10*dt)\n t9 = np.sum(ti**9*dt)\n t8 = np.sum(ti**8*dt)\n t7 = np.sum(ti**7*dt)\n t6 = np.sum(ti**6*dt)\n t5 = np.sum(ti**5*dt)\n t4 = np.sum(ti**4*dt)\n t3 = np.sum(ti**3*dt)\n t2 = np.sum(ti**2*dt)\n t = np.sum(ti*dt)\n\n Pb = np.zeros((10,10))\n\n Pb[0:6,0:6] = np.array([[3024**2*t10, 3024*1680*t9, 3024*840*t8, \n 3024*360*t7,3024*120*t6, 3024*24*t5],\n [3024*1680*t9, 1680**2*t8, 1680*840*t7, \n 1680*360*t6, 1680*120*t5, 1680*24*t4],\n [3024*840*t8, 1680*840*t7, 840**2*t6, \n 840*350*t5, 840*120*t4, 840*24*t3],\n [3024*360*t7,1680*360*t6, 840*360*t5, \n 360**2*t4, 360*120*t3, 360*24*t2],\n [3024*120*t6, 1680*120*t5, 840*120*t4,\n 360*120*t3, 120**2*t2, 120*24*t],\n [3024*24*t5, 1680*24*t4, 840*24*t3,\n 360*24*t2, 120*24*t, 24**2]])\n\n for i in range(n_points-1): \n P[i*30:i*30+10, i*30:i*30+10] = Pb/delta_t[i]**8\n P[i*30+10:i*30+20, i*30+10:i*30+20] = Pb/delta_t[i]**8\n P[i*30+20:i*30+30, i*30+20:i*30+30] = Pb/delta_t[i]**8\n\n\n #Construct A and b matrices\n A = np.zeros((18*(n_points)-6, 30*(n_points-1)))\n\n b = np.zeros((18*(n_points)-6,1))\n\n b[0,0] = state_init[0]\n b[1,0] = state_init[3]\n b[2,0] = state_init[6]\n b[5,0] = x[1]\n\n b[6,0] = state_init[1]\n b[7,0] = state_init[4]\n b[8,0] = state_init[7]\n b[11,0] = y[1]\n\n b[12,0] = state_init[2]\n b[13,0] = state_init[5]\n b[17,0] = z[1]\n\n\n for i in range(n_points-1):\n dt1 = delta_t[i]\n\n Ab0 =np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 1/dt1, 0],\n [0, 0, 0, 0, 0, 0, 0, 2/dt1**2, 0, 0],\n [0, 0, 0, 0, 0, 0, 6/dt1**3, 0, 0, 0],\n [0, 0, 0, 0, 0, 24/dt1**4, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\n\n A0 = np.zeros((18,30))\n\n A0[0:6, 0:10] = Ab0\n A0[6:12, 10:20] = Ab0\n A0[12:18, 20:30] = Ab0\n\n A[i*18:i*18+18, i*30:i*30+30] = A0\n if i >0:\n dt2 = delta_t[i-1]\n\n ab1_vec = np.array([1, 1/dt2, 1/dt2**2, 1/dt2**3, 1/dt2**4, 1]).reshape(-1,1)\n\n Ab1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],\n [72, 56, 42, 30 , 20, 12, 6, 2, 0, 0],\n [504, 336, 210, 120, 60, 24, 6, 0, 0, 0],\n [3024, 1680, 840, 360, 120, 24, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n \n Ab1 = np.multiply(Ab1, ab1_vec)\n\n A1 = np.zeros((18,30))\n\n A1[0:6, 0:10] = Ab1\n A1[6:12, 10:20] = Ab1\n A1[12:18, 20:30] = Ab1\n \n A[i*18:i*18+18, (i-1)*30:(i-1)*30+30] = -A1\n b[i*18+5, 0] = x[i+1]\n b[i*18+11, 0] = y[i+1]\n b[i*18+17, 0] = z[i+1]\n \n dt2 = delta_t[-1]\n ab1_vec = np.array([1, 1/dt2, 1/dt2**2, 1/dt2**3, 1/dt2**4, 1]).reshape(-1,1)\n Ab1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],\n [72, 56, 42, 30 , 20, 12, 6, 2, 0, 0],\n [504, 336, 210, 120, 60, 24, 6, 0, 0, 0],\n [3024, 1680, 840, 360, 120, 24, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n\n Ab1 = np.multiply(Ab1, ab1_vec)\n \n A[(i+1)*18:(i+1)*18+4, i*30:i*30+10] = Ab1[1:5, 0:10]\n A[(i+1)*18+4:(i+1)*18+8, i*30+10:i*30+20] = Ab1[1:5, 0:10]\n A[(i+1)*18+8:(i+1)*18+12, i*30+20:i*30+30] = Ab1[1:5, 0:10]\n\n\n b[(i+1)*18, 0] = state_final[3]\n b[(i+1)*18+1, 0] = state_final[6]\n\n b[(i+1)*18+4, 0] = state_final[4]\n b[(i+1)*18+5, 0] = state_final[7]\n\n b[(i+1)*18+8, 0] = state_final[5]\n\n q = np.zeros((30*(n_points-1),1))\n G = np.zeros((30*(n_points-1),30*(n_points-1)))\n h = np.zeros((30*(n_points-1),1))\n\n\n P = matrix(P)\n G = matrix(G)\n h = matrix(h)\n A = matrix(A)\n q = matrix(q)\n b = matrix(b)\n\n #silent solver \n solvers.options['show_progress'] = False\n #solvers.options['show_progress'] = False\n\n sol = solvers.qp(P, q, G, h, A, b)\n params = np.array(sol['x'])\n\n total_dist = 0\n for i in range(n_points-1):\n step = delta_t[i]/mpc_dt\n t = np.arange(0, 1, 1/step)\n N = t.shape[0]\n T = np.zeros((10, N))\n T[0,:] = t**9\n T[1,:] = t**8\n T[2,:] = t**7\n T[3,:] = t**6\n T[4,:] = t**5\n T[5,:] = t**4\n T[6,:] = t**3\n T[7,:] = t**2\n T[8,:] = t\n T[9,:] = np.ones(N)\n\n x = params[i*30:i*30+10].reshape(1,-1).dot(T)\n y = params[i*30+10:i*30+20].reshape(1,-1).dot(T)\n z = params[i*30+20:i*30+30].reshape(1,-1).dot(T)\n\n dx = x[0,1:N] - x[0,0:N-1]\n dy = y[0,1:N] - y[0,0:N-1]\n dz = z[0,1:N] - z[0,0:N-1]\n \n ds2 = dx**2 + dy**2 + dz**2\n ds = np.sqrt(ds2)\n s = np.sum(ds)\n\n total_dist += s\n\n #calculate velocities\n T = np.zeros((10, N))\n T[0,:] = t**8\n T[1,:] = t**7\n T[2,:] = t**6\n T[3,:] = t**5\n T[4,:] = t**4\n T[5,:] = t**3\n T[6,:] = t**2\n T[7,:] = t**1\n T[8,:] = np.ones(N)\n T[9,:] = np.zeros(N)\n\n T = T/delta_t[i]\n\n c_vel = np.diag([9, 8, 7, 6, 5, 4, 3, 2, 1, 0])\n \n vx = params[i*30:i*30+10].reshape(1,-1).dot(c_vel).dot(T)\n vy = params[i*30+10:i*30+20].reshape(1,-1).dot(c_vel).dot(T)\n vz = params[i*30+20:i*30+30].reshape(1,-1).dot(c_vel).dot(T)\n \n \n #calculate accelerations\n T = np.zeros((10, N))\n T[0,:] = t**7\n T[1,:] = t**6\n T[2,:] = t**5\n T[3,:] = t**4\n T[4,:] = t**3\n T[5,:] = t**2\n T[6,:] = t**1\n T[7,:] = np.ones(N)\n T[8,:] = np.zeros(N)\n T[9,:] = np.zeros(N)\n\n T = T/delta_t[i]**2\n\n c_acc = np.diag([72, 56, 42, 30, 20, 12, 6, 2, 0, 0])\n \n ax = params[i*30:i*30+10].reshape(1,-1).dot(c_acc).dot(T)\n ay = params[i*30+10:i*30+20].reshape(1,-1).dot(c_acc).dot(T)\n az = params[i*30+20:i*30+30].reshape(1,-1).dot(c_acc).dot(T)\n \n #Create a matrix of all points\n X = np.zeros((9,N))\n X[0,:] = x.flatten()\n X[1,:] = y.flatten()\n X[2,:] = z.flatten()\n X[3,:] = vx.flatten()\n X[4,:] = vy.flatten()\n X[5,:] = vz.flatten()\n X[6,:] = ax.flatten()\n X[7,:] = ay.flatten()\n X[8,:] = az.flatten()\n\n if i == 0:\n X_final = X\n else:\n X_final = np.concatenate((X_final, X), axis=1)\n\n return total_dist, X_final" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns normalized velocity at the position
def get_velocity(position): x, y = position vel = np.array([gradx_interp(y, x)[0][0], grady_interp(y, x)[0][0]]) return vel / np.linalg.norm(vel)
[ "def normalize_velocity(velocity, velocity_range):\n return (velocity - velocity_range[0]) / (velocity_range[1] - velocity_range[0])", "def u(self):\n return self.centroid_velocity / np.linalg.norm(self.centroid_velocity)", "def v(self):\n return self.centroid_velocity_tangent / np.linalg.norm(\n self.centroid_velocity_tangent\n )", "def normalized(self):\n mag = self.magnitude()\n if (mag == 0):\n return Vector2D(self.x, self.y)\n return Vector2D(self.x/mag, self.y/mag)", "def v(self):\n return self.velocity + self.dv()", "def velocity(self):\r\n if self.sprint:\r\n return self._absDirection * self.sprintSpeed\r\n else:\r\n return self._absDirection * self.baseSpeed", "def velocity(self):\n if self.vmax > 0:\n mod = VelField(x_0=self.x_0,\n y_0=self.y_0,\n r_eff=self.r_eff,\n ellip=self.ellip,\n theta=self.theta,\n vmax=self.vmax,\n q=self.q)\n result = mod(self.x, self.y)\n else:\n result = np.ones(shape=self.x.shape)\n\n return result", "def get_velocity(self):\n linear, angular = self._physics_client.getBaseVelocity(self.uid)\n return np.asarray(linear), np.asarray(angular)", "def unit_vector(self):\n return self.vector() / self.length()", "def normal_at(self, point):\n return self.u.cross(self.v).normalized()", "def get_norm(self,point):\r\n\t\treturn normal(point - self.position)", "def angular_velocity(self):\n return 0.0", "def normalize(self) -> \"float\":\n return _coin.SbVec2f_normalize(self)", "def normalize(self) -> \"double\":\n return _coin.SbVec2d_normalize(self)", "def velocity_p(self):\n return self._velocity_p", "def vel_coef(self):\n return self._vel_coef", "def __pos__(self):\n return _almathinternal.PosVelAcc___pos__(self)", "def toVector(self, *args):\n return _almathinternal.PosVelTime_toVector(self, *args)", "def v_x(self):\n return self.centroid_velocity[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Python 3.3 added a an addtional LOAD_CONST before MAKE_FUNCTION and this has an effect on many rules.
def add_make_function_rule(self, rule, opname, attr, customize): new_rule = rule % (('LOAD_CONST ') * (1 if self.version >= 3.3 else 0)) self.add_unique_rule(new_rule, opname, attr, customize)
[ "def __addConstantInitCode(\n context,\n emit,\n check,\n constant_type,\n constant_value,\n constant_identifier,\n module_level,\n):\n # This has many cases, that all return, and do a lot.\n # pylint: disable=too-many-branches,too-many-locals,too-many-return-statements,too-many-statements\n\n # For the module level, we only mean to create constants that are used only\n # inside of it. For the global level, it must must be single use.\n if module_level:\n if context.global_context.getConstantUseCount(constant_identifier) != 1:\n return\n else:\n if context.getConstantUseCount(constant_identifier) == 1:\n return\n\n # Adding it to \"done\". We cannot have recursive constants, so this is OK\n # to be done now.\n done.add(constant_identifier)\n\n # Use shortest code for ints and longs.\n if constant_type is long:\n # See above, same for long values. Note: These are of course not\n # existent with Python3 which would have covered it before.\n if 0 <= constant_value <= max_unsigned_long:\n emit(\n \"%s = PyLong_FromUnsignedLong(%sul);\"\n % (constant_identifier, constant_value)\n )\n\n return\n elif 0 > constant_value >= min_signed_long:\n emit(\"%s = PyLong_FromLong(%sl);\" % (constant_identifier, constant_value))\n\n return\n elif constant_value == min_signed_long - 1:\n # There are compilers out there, that give warnings for the literal\n # MININT when used. We work around that warning here.\n emit(\n \"\"\"\\\n%s = PyLong_FromLong(%sl); // To be corrected with -1 in-place next lines.\nCHECK_OBJECT(const_int_pos_1);\n%s = PyNumber_InPlaceSubtract(%s, PyLong_FromLong(1));\"\"\"\n % (\n constant_identifier,\n min_signed_long,\n constant_identifier,\n constant_identifier,\n )\n )\n\n return\n else:\n getMarshalCode(\n constant_identifier=constant_identifier,\n constant_value=constant_value,\n emit=emit,\n )\n\n return\n elif constant_type is int:\n if constant_value >= min_signed_long:\n emit(\"%s = PyInt_FromLong( %sl );\" % (constant_identifier, constant_value))\n\n return\n else:\n # There are compilers out there, that give warnings for the literal\n # MININT when used. We work around that warning here.\n assert constant_value == min_signed_long - 1\n\n emit(\n \"\"\"\\\n%s = PyInt_FromLong( %sl ); // To be corrected in next line.\n%s = PyNumber_InPlaceSubtract( %s, PyInt_FromLong( 1 ) );\"\"\"\n % (\n constant_identifier,\n min_signed_long,\n constant_identifier,\n constant_identifier,\n )\n )\n\n return\n\n if constant_type is unicode:\n try:\n encoded = constant_value.encode(\"utf-8\")\n\n if str is bytes:\n emit(\n \"%s = UNSTREAM_UNICODE( %s );\"\n % (constant_identifier, stream_data.getStreamDataCode(encoded))\n )\n else:\n if str is not bytes and len(constant_value) == len(encoded):\n emit(\n \"%s = UNSTREAM_STRING_ASCII( %s, %d );\"\n % (\n constant_identifier,\n stream_data.getStreamDataCode(encoded),\n 1 if _isAttributeName(constant_value) else 0,\n )\n )\n else:\n emit(\n \"%s = UNSTREAM_STRING( %s, %d );\"\n % (\n constant_identifier,\n stream_data.getStreamDataCode(encoded),\n 1 if _isAttributeName(constant_value) else 0,\n )\n )\n\n return\n except UnicodeEncodeError:\n getMarshalCode(\n constant_identifier=constant_identifier,\n constant_value=constant_value,\n emit=emit,\n )\n\n return\n\n elif constant_type is str:\n # Python3: Strings that can be encoded as UTF-8 are done more or less\n # directly. When they cannot be expressed as UTF-8, that is rare not we\n # can indeed use pickling.\n assert str is bytes\n\n if len(constant_value) == 1:\n emit(\n \"%s = UNSTREAM_CHAR( %d, %d );\"\n % (\n constant_identifier,\n ord(constant_value[0]),\n 1 if _isAttributeName(constant_value) else 0,\n )\n )\n else:\n emit(\n \"%s = UNSTREAM_STRING( %s, %d );\"\n % (\n constant_identifier,\n stream_data.getStreamDataCode(constant_value),\n 1 if _isAttributeName(constant_value) else 0,\n )\n )\n\n return\n elif constant_type is bytes:\n # Python3 only, for Python2, bytes do not happen.\n assert str is not bytes\n\n emit(\n \"%s = UNSTREAM_BYTES( %s );\"\n % (constant_identifier, stream_data.getStreamDataCode(constant_value))\n )\n\n return\n\n if constant_type is float:\n emit(\n \"%s = UNSTREAM_FLOAT( %s );\"\n % (\n constant_identifier,\n stream_data.getStreamDataCode(\n value=struct.pack(\"<d\", constant_value), fixed_size=True\n ),\n )\n )\n\n return\n\n if constant_type is dict:\n # Not all dictionaries can or should be marshaled. For small ones,\n # or ones with strange values, like \"{1:type}\", we have to do it.\n\n if attemptToMarshal(constant_identifier, constant_value, emit):\n return\n\n emit(\n \"%s = _PyDict_NewPresized( %d );\"\n % (constant_identifier, len(constant_value))\n )\n\n for key, value in iterItems(constant_value):\n key_name = context.getConstantCode(key)\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(key),\n constant_value=key,\n constant_identifier=key_name,\n module_level=module_level,\n context=context,\n )\n\n value_name = context.getConstantCode(value)\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(value),\n constant_value=value,\n constant_identifier=value_name,\n module_level=module_level,\n context=context,\n )\n\n # TODO: Error checking for debug.\n emit(\n \"PyDict_SetItem(%s, %s, %s);\"\n % (constant_identifier, key_name, value_name)\n )\n\n emit(\n \"assert(PyDict_Size( %s ) == %d);\"\n % (constant_identifier, len(constant_value))\n )\n\n return\n\n if constant_type is tuple:\n # Not all tuples can or should be marshaled. For small ones,\n # or ones with strange values, like \"(type,)\", we have to do it.\n\n if attemptToMarshal(constant_identifier, constant_value, emit):\n return\n\n emit(\"%s = PyTuple_New( %d );\" % (constant_identifier, len(constant_value)))\n\n for count, element_value in enumerate(constant_value):\n element_name = context.getConstantCode(constant=element_value)\n\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(element_value),\n constant_value=element_value,\n constant_identifier=context.getConstantCode(constant=element_value),\n module_level=module_level,\n context=context,\n )\n\n # Do not take references, these won't be deleted ever.\n emit(\n \"PyTuple_SET_ITEM( %s, %d, %s ); Py_INCREF(%s);\"\n % (constant_identifier, count, element_name, element_name)\n )\n\n return\n\n if constant_type is list:\n # Not all lists can or should be marshaled. For small ones,\n # or ones with strange values, like \"[type]\", we have to do it.\n\n if attemptToMarshal(constant_identifier, constant_value, emit):\n return\n\n emit(\"%s = PyList_New( %d );\" % (constant_identifier, len(constant_value)))\n\n for count, element_value in enumerate(constant_value):\n element_name = context.getConstantCode(constant=element_value)\n\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(element_value),\n constant_value=element_value,\n constant_identifier=element_name,\n module_level=module_level,\n context=context,\n )\n\n # Do not take references, these won't be deleted ever.\n emit(\n \"PyList_SET_ITEM(%s, %d, %s); Py_INCREF(%s);\"\n % (constant_identifier, count, element_name, element_name)\n )\n\n return\n\n if constant_type is set or constant_type is frozenset:\n # Not all sets can or should be marshaled. For small ones,\n # or ones with strange values, like \"{type}\", we have to do it.\n if attemptToMarshal(constant_identifier, constant_value, emit):\n return\n\n # Special handling for empty frozensets.\n if not constant_value and constant_type is frozenset:\n emit(\n \"%s = PyObject_CallFunction((PyObject*)&PyFrozenSet_Type, NULL);\"\n % (constant_identifier,)\n )\n\n return\n\n # TODO: Hinting size is really not possible?\n emit(\n \"%s = %s(NULL);\"\n % (\n constant_identifier,\n \"PySet_New\" if constant_type is set else \"PyFrozenSet_New\",\n )\n )\n\n for element_value in constant_value:\n element_name = context.getConstantCode(element_value)\n\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(element_value),\n constant_value=element_value,\n constant_identifier=element_name,\n module_level=module_level,\n context=context,\n )\n\n emit(\"PySet_Add(%s, %s);\" % (constant_identifier, element_name))\n\n emit(\n \"assert(PySet_Size(%s) == %d);\" % (constant_identifier, len(constant_value))\n )\n\n return\n\n if constant_type is slice:\n slice1_name = context.getConstantCode(constant_value.start)\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(constant_value.start),\n constant_value=constant_value.start,\n constant_identifier=slice1_name,\n module_level=module_level,\n context=context,\n )\n slice2_name = context.getConstantCode(constant_value.stop)\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(constant_value.stop),\n constant_value=constant_value.stop,\n constant_identifier=slice2_name,\n module_level=module_level,\n context=context,\n )\n slice3_name = context.getConstantCode(constant_value.step)\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(constant_value.step),\n constant_value=constant_value.step,\n constant_identifier=slice3_name,\n module_level=module_level,\n context=context,\n )\n\n emit(\n \"%s = PySlice_New(%s, %s, %s);\"\n % (constant_identifier, slice1_name, slice2_name, slice3_name)\n )\n\n return\n\n if constant_type is xrange:\n # Strip const_xrange.\n assert constant_identifier.startswith(\"const_xrange_\")\n\n # For Python2, xrange needs only long values to be created, so avoid objects.\n range_args = constant_identifier[13:].split(\"_\")\n\n # Default start.\n if len(range_args) == 1:\n range_args.insert(0, \"0\")\n\n # Default step\n if len(range_args) < 3:\n range_args.append(\"1\")\n\n # Negative values are encoded with \"neg\" prefix.\n range_args = [int(range_arg.replace(\"neg\", \"-\")) for range_arg in range_args]\n\n if xrange is not range:\n emit(\n \"%s = MAKE_XRANGE(%s, %s, %s);\"\n % (constant_identifier, range_args[0], range_args[1], range_args[2])\n )\n else:\n range1_name = context.getConstantCode(range_args[0])\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(range_args[0]),\n constant_value=range_args[0],\n constant_identifier=range1_name,\n module_level=module_level,\n context=context,\n )\n range2_name = context.getConstantCode(range_args[1])\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(range_args[1]),\n constant_value=range_args[1],\n constant_identifier=range2_name,\n module_level=module_level,\n context=context,\n )\n range3_name = context.getConstantCode(range_args[2])\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(range_args[2]),\n constant_value=range_args[2],\n constant_identifier=range3_name,\n module_level=module_level,\n context=context,\n )\n\n emit(\n \"%s = BUILTIN_XRANGE3(%s, %s, %s);\"\n % (constant_identifier, range1_name, range2_name, range3_name)\n )\n\n return\n\n if constant_type is bytearray:\n emit(\n \"%s = UNSTREAM_BYTEARRAY(%s);\"\n % (\n constant_identifier,\n stream_data.getStreamDataCode(bytes(constant_value)),\n )\n )\n\n return\n\n if constant_type is complex:\n getMarshalCode(\n constant_identifier=constant_identifier,\n constant_value=constant_value,\n emit=emit,\n )\n\n return\n\n if constant_value in builtin_named_values_list:\n builtin_name = builtin_named_values[constant_value]\n builtin_identifier = context.getConstantCode(builtin_name)\n\n _addConstantInitCode(\n emit=emit,\n check=check,\n constant_type=type(builtin_name),\n constant_value=builtin_name,\n constant_identifier=builtin_identifier,\n module_level=module_level,\n context=context,\n )\n\n emit(\"%s = LOOKUP_BUILTIN( %s );\" % (constant_identifier, builtin_identifier))\n\n return\n\n # Must not reach this, if we did, it's in error, and we need to know.\n assert False, (type(constant_value), constant_value, constant_identifier)", "def _addConstantInitCode(\n context,\n emit,\n check,\n constant_type,\n constant_value,\n constant_identifier,\n module_level,\n):\n # Got a couple of values to dodge, pylint: disable=too-many-return-statements\n\n if constant_value is None:\n return\n elif constant_value is False:\n return\n elif constant_value is True:\n return\n elif constant_value is Ellipsis:\n return\n elif constant_value is NotImplemented:\n return\n elif type(constant_value) is type:\n return\n elif constant_identifier in done:\n # Do not repeat ourselves.\n return\n\n if Options.shallTraceExecution():\n emit(\"\"\"NUITKA_PRINT_TRACE(\"Creating constant: %s\");\"\"\" % constant_identifier)\n\n # Then it's a real named constant not yet created.\n __addConstantInitCode(\n context,\n emit,\n check,\n constant_type,\n constant_value,\n constant_identifier,\n module_level,\n )\n\n # In debug mode, lets check if the constants somehow change behind our\n # back, add those values too.\n if Options.isDebug():\n emit(\n \"\"\"\\\nhash_%(constant_identifier)s = DEEP_HASH(%(constant_identifier)s);\"\"\"\n % {\"constant_identifier\": constant_identifier}\n )\n\n check(\n \"\"\"\\\nCHECK_OBJECT(%(constant_identifier)s);\nassert(hash_%(constant_identifier)s == DEEP_HASH(%(constant_identifier)s));\"\"\"\n % {\"constant_identifier\": constant_identifier}\n )", "def f0():\n return const_function(4)", "def _make_thunk(self, codeobj, offset):\n\n # Note: the patch_func is appended to the end of the original\n # function's co_consts\n var_idx = codeobj.co_varnames.index(self.local_name)\n func_const = len(codeobj.co_consts)\n total_size = 0\n\n def i(opname, arg):\n nonlocal offset, total_size\n instr = _Instruction(offset, dis.opmap[opname], arg)\n size = instr.n_bytes\n offset += size\n total_size += size\n return instr\n\n instructions = [\n i('LOAD_CONST', func_const),\n i('LOAD_FAST', var_idx),\n i('CALL_FUNCTION', 1),\n i('STORE_FAST', var_idx)\n ]\n\n return instructions, total_size", "def constant_fn(val):\n def func(_):\n return val\n return func", "def const(value):\n def const_(token):\n return ASTValue(value, token)\n return const_", "def _get_constant_function(constant: float):\n\n def function(x):\n return constant\n\n return function", "def make_constants(builtin_only=False, stoplist=[], verbose=False):\n\tif type(builtin_only) == type(make_constants):\n\t\traise ValueError(\"The bind_constants decorator must have arguments.\")\n\treturn lambda f: _make_constants(f, builtin_only, stoplist, verbose)", "def cast_to_const(expr):\r\n return expr if isinstance(expr, Expression) else types.constant()(expr)", "def const(expr: _T) -> _T:", "def create_constant(param, module):\n getter = reg.get_descriptor_by_name\n desc = getter(param.identifier, param.type, param.namespace)\n constant = desc.module()\n constant.id = module.id\n# if param.evaluatedStrValue:\n# constant.setValue(param.evaluatedStrValue)\n if param.strValue != '':\n constant.setValue(param.strValue)\n else:\n constant.setValue( \\\n constant.translate_to_string(constant.default_value))\n return constant", "def prefpp_const_conv(self):\n \n legal_mm = string.digits + \".\" + \"+\" + \"-\" # legal mantissa characters\n legal_ee = string.digits + \"+\" + \"-\" # legal exponent characters\n \n ileno = len(self.oline)\n ilast = ileno - 1\n ic = string.find(self.oline,'CONST(')\n while ic > -1:\n if ic + 10 > ileno: break # not enough room for CONST(m,e) macro\n \n ip1 = ic + 5 # first parenthesis\n im1 = ip1 + 1\n ip2 = ic + string.find(self.oline[ic:],')')\n if ip2 < 0: break # \")\" not found.\n \n nump = string.split(self.oline[ip1+1:ip2],',')\n if len(nump) <> 2: break # m,e form not inside parens\n \n mm = string.strip(nump[0])\n ee = string.strip(nump[1])\n \n if mm == \"\": break # non-null mantissa required.\n if ee == \"\": break # non-null exponent required.\n \n ichk = 0\n for i in range(len(mm)):\n if string.find(legal_mm,mm[i]) < 0: ichk = ichk + 1\n if ichk > 0: break # illegal character in mantissa\n \n ichk = 0\n for i in range(len(ee)):\n if string.find(legal_ee,ee[i]) < 0: ichk = ichk + 1\n if ichk > 0: break # illegal character in exponent\n \n # final check...\n \n try:\n iee = string.atoi(ee)\n except ValueError:\n ichk = ichk + 1\n \n try:\n zmm = string.atof(mm)\n except ValueError:\n ichk = ichk + 1\n \n if ichk > 0: break # decode failed\n \n newnum = mm + self.f77exp + ee\n \n if ip2 == ileno:\n self.oline = self.oline[:ic] + \" \" + newnum\n else:\n self.oline = self.oline[:ic] + \" \" + newnum + self.oline[ip2+1:]\n \n # update info on oline -- re-enter loop to process more than one CONST\n # per line...\n \n ileno = len(self.oline)\n ilast = ileno - 1\n ic = string.find(self.oline,'CONST(')\n \n return 0", "def file_func(self, fi, meta):\n fi = fi[0]\n constAddr = self.compiler.addConst(fi.value, fi.type)\n self.compiler.pushOperando(constAddr)\n self.compiler.pushTipo(fi.type)\n return fi", "def _insert_decl_function(cg, funcdef):\n # collect the explicit 'global' variable names\n global_vars = set()\n for node in ast.walk(funcdef):\n if isinstance(node, ast.Global):\n global_vars.update(node.names)\n\n # generate the code object which will create the function\n mod = ast.Module(body=[funcdef])\n code = compile(mod, cg.filename, mode='exec')\n\n # convert to a byteplay object and remove the leading and\n # trailing ops: SetLineno STORE_NAME LOAD_CONST RETURN_VALUE\n outer_ops = bp.Code.from_code(code).code[1:-3]\n\n # the stack now looks like the following:\n # ...\n # ...\n # LOAD_CONST (<code object>)\n # MAKE_FUCTION (num defaults) // TOS\n\n # extract the inner code object which represents the actual\n # function code and update its flags and global loads\n inner = outer_ops[-2][1]\n inner.newlocals = False\n inner_ops = inner.code\n for idx, (op, op_arg) in enumerate(inner_ops):\n if op == bp.LOAD_GLOBAL and op_arg not in global_vars:\n inner_ops[idx] = (bp.LOAD_NAME, op_arg)\n\n # inline the modified code ops into the code generator\n cg.code_ops.extend(outer_ops)", "def constant_func(i):\n return lambda x: i", "def test_doc_usage_other_constants():\n from mini_lambda import x, _, C\n from mini_lambda.symbols.math_ import E\n from math import e\n\n assert str(_(x + e)) == 'x + 2.718281828459045'\n assert str(_(x + E)) == 'x + e'\n assert str(_(E + E)) == 'e + e'\n\n # define the constant\n E = C(e, 'e')\n\n # use it in expressions. The name appears when printed\n assert str(_(x + E)) == 'x + e'", "def const(self, value, vars_out=None):\n return self.primop(lambda: value, vars_in=[], vars_out=vars_out)", "def translate(env, func, *args, **kwargs):\n\n func_code = six.get_function_code(func)\n func_globals = dict(__builtins__)\n func_globals.update(six.get_function_globals(func))\n\n ops = bc.disassemble(func_code.co_code)\n\n program = []\n\n f_args = inspect.getcallargs(func, *args, **kwargs)\n variables = dict(\n (func_code.co_varnames.index(arg_name), arg_value)\n for arg_name, arg_value in six.iteritems(f_args)\n )\n\n stack = []\n for op in ops:\n if op.name == 'LOAD_CONST':\n stack.append(func_code.co_consts[op.arg])\n\n elif op.name == 'LOAD_GLOBAL':\n global_name = func_code.co_names[op.arg]\n stack.append(getattr(env, global_name, func_globals.get(global_name)))\n\n elif op.name == 'LOAD_FAST':\n var_name = func_code.co_varnames[op.arg]\n stack.append(getattr(env, var_name, variables.get(op.arg)))\n\n elif op.name == 'BUILD_LIST':\n items = stack[-op.arg:]\n del stack[-op.arg:]\n stack.append(items)\n\n elif op.name == 'LOAD_ATTR':\n obj = stack.pop()\n stack.append(getattr(obj, func_code.co_names[op.arg]))\n\n elif op.name == 'CALL_FUNCTION':\n nargs = op.arg & 0xff\n nkwargs = op.arg >> 8\n\n if nkwargs:\n f_kwargs = dict(zip(stack[-nkwargs * 2::2], stack[-nkwargs * 2 + 1::2]))\n del stack[-nkwargs * 2:]\n else:\n f_kwargs = {}\n\n if nargs:\n f_args = stack[-nargs:]\n del stack[-nargs:]\n else:\n f_args = []\n\n f = stack.pop()\n if isinstance(f, Fragment):\n stack.append(f(env, *f_args, **f_kwargs))\n else:\n stack.append(f(*f_args, **f_kwargs))\n\n elif op.name == 'STORE_FAST':\n value = stack.pop()\n var_name = func_code.co_varnames[op.arg]\n var = getattr(env, var_name, variables.get(op.arg, None))\n if isinstance(var, Register):\n program.append(LoadRegister(var, value))\n else:\n variables[op.arg] = value\n\n elif op.name == 'POP_TOP':\n value = stack.pop()\n if isinstance(value, SyscallInvoke):\n program.append(value)\n elif isinstance(value, list):\n program.extend(value)\n else:\n raise ValueError('No idea how to compile %s' % (value,))\n\n elif op.name == 'RETURN_VALUE':\n stack.pop()\n\n elif op.name == 'DUP_TOP':\n value = stack[-1]\n if isinstance(value, SyscallInvoke):\n stack.insert(-1, env.SYSCALL_RET_REG)\n else:\n stack.append(value)\n\n elif op.name == 'BINARY_SUBSCR':\n index = stack.pop()\n value = stack.pop()\n stack.append(value[index])\n\n elif op.name == 'STORE_SUBSCR':\n index = stack.pop()\n value = stack.pop()\n new_value = stack.pop()\n var = value[index]\n if isinstance(var, Register):\n program.append(LoadRegister(var, new_value))\n else:\n value[index] = new_value\n\n elif op.name == 'INPLACE_ADD':\n value = stack.pop()\n reg = stack.pop()\n if not isinstance(reg, Register):\n raise TypeError('In-place addition is only supported on registers')\n program.extend(env.reg_add(reg, value))\n stack.append(reg)\n\n elif op.name == 'INPLACE_SUBTRACT':\n value = stack.pop()\n reg = stack.pop()\n if not isinstance(reg, Register):\n raise TypeError('In-place subtraction is only supported on registers')\n program.extend(env.reg_sub(reg, value))\n stack.append(reg)\n\n else:\n raise RuntimeError('Unsupported opcode: %s' % op.name)\n\n return program", "def compileBytecode(self, code):\n btc = dis.get_instructions(code)\n \n print(dis.code_info(code))\n dis.dis(code)\n \n level_name = code.co_name\n \n env = Env(code)\n\n # if we are not at the toplevel we setup the function prologue\n if level_name != \"<module>\":\n csts = env.getConsts()\n \n # Emit const strings before function definition\n for i, v in enumerate(csts):\n if v.type == ConstVal.Addr:\n self.emitter.emitString(env.getStringRef(i), v.value)\n\n self.emitter.emitLabel(level_name)\n self.emitter.emitPrologue(code.co_nlocals)\n \n # Copy args into slot\n for i in range(code.co_argcount):\n self.emitter.emitStoreSlot(REGS[i], i)\n\n for ins in btc:\n if ins.opname == \"MAKE_FUNCTION\":\n name = env.popEvent().value\n code = env.popEvent().value\n\n if not isinstance(code, type(self.compileBytecode.__code__)):\n raise Exception(\"MAKE_FUNCTION instruction with no code object\")\n\n self.compileBytecode(code)\n if ins.opname == \"CALL_FUNCTION\":\n arg_count = ins.argval\n\n if arg_count >= len(REGS)-1:\n raise Exception(\"Functions must have at most {} arguments\".format(len(REGS)-1))\n \n # TODO: Emit movs of variables into regs\n env.setupArgs(arg_count, self.emitter)\n\n func = env.popEvent().value\n self.emitter.emitRaw(\"call #{}\".format(func))\n \n env.pushEvent(StackEvent(StackEvent.MAKE_FUNCTION_DUMMY, 0, 0))\n\n if ins.opname == \"LOAD_FAST\":\n env.pushEvent(StackEvent(StackEvent.LOAD_FAST, ins.argval, ins.arg))\n if ins.opname == \"LOAD_CONST\":\n env.pushEvent(StackEvent(StackEvent.LOAD_CONST, ins.argval, ins.arg))\n if ins.opname == \"LOAD_GLOBAL\":\n env.pushEvent(StackEvent(StackEvent.LOAD_GLOBAL, ins.argval, ins.arg))\n if ins.opname == \"STORE_FAST\":\n evt = env.popEvent()\n \n # We returned from a function\n if evt.type == StackEvent.MAKE_FUNCTION_DUMMY:\n self.emitter.emitStoreSlot(REGS[0], evt.index)\n if evt.type == StackEvent.LOAD_CONST:\n cstval = env.getConsts()[evt.index]\n\n if cstval.type == ConstVal.Imm:\n self.emitter.emitMovImm(REGS[0], cstval.value)\n if cstval.type == ConstVal.Addr:\n self.emitter.emitMovRef(REGS[0], cstval.value)\n\n self.emitter.emitStoreSlot(REGS[0], ins.arg)\n\n if ins.opname == \"RETURN_VALUE\":\n evt = env.popEvent()\n\n if evt.type == StackEvent.LOAD_FAST:\n self.emitter.emitLoadSlot(REGS[0], evt.index)\n if evt.type == StackEvent.LOAD_CONST:\n cstval = env.getConsts()[evt.index]\n\n if cstval.type == ConstVal.Imm:\n self.emitter.emitMovImm(REGS[0], cstval.value)\n if cstval.type == ConstVal.Addr:\n self.emitter.emitMovAddr(REGS[0], env.getStringRef(evt.index))\n\n if ins.opname.startswith(\"BINARY\") or ins.opname.startswith(\"INPLACE\"):\n env.setupArgs(2, self.emitter)\n\n if ins.opname == \"BINARY_ADD\" or ins.opname == \"INPLACE_ADD\":\n self.emitter.emitRaw(\"add $A $B\")\n if ins.opname == \"BINARY_MULTIPLY\" or ins.opname == \"INPLACE_MULTIPLY\":\n self.emitter.emitRaw(\"mul $A $B\")\n if ins.opname == \"BINARY_SUBSTRACT\" or ins.opname == \"INPLACE_SUBSTRACT\":\n self.emitter.emitRaw(\"sub $A $B\")\n if ins.opname == \"BINARY_LSHIFT\":\n self.emitter.emitRaw(\"shl $A $B\")\n if ins.opname == \"BINARY_RSHIFT\":\n self.emitter.emitRaw(\"shr $A $B\")\n if ins.opname == \"BINARY_AND\":\n self.emitter.emitRaw(\"and $A $B\")\n if ins.opname == \"BINARY_XOR\":\n self.emitter.emitRaw(\"xor $A $B\")\n if ins.opname == \"BINARY_OR\":\n self.emitter.emitRaw(\"or $A $B\")\n\n env.pushEvent(StackEvent(StackEvent.MAKE_FUNCTION_DUMMY, 0, 0))\n if ins.opname == \"SETUP_LOOP\":\n self.emitter.emitLabel(env.addLoop())\n if ins.opname == \"JUMP_ABSOLUTE\":\n self.emitter.emitRaw(\"jmp #{}\".format(env.getLoopTop()))\n if ins.opname == \"POP_BLOCK\":\n self.emitter.emitRaw(env.popLoop())\n\n if ins.opname == \"COMPARE_OP\":\n env.setupArgs(2, self.emitter)\n env.addComparison(ins.argval)\n self.emitter.emitRaw(\"cmp $A $B\")\n env.pushEvent(StackEvent(StackEvent.MAKE_FUNCTION_DUMMY, 0, 0))\n \n if ins.opname == \"POP_JUMP_IF_TRUE\":\n cmp = env.popComparison()\n dest = env.getLoopTop() + \"_end\"\n\n if cmp == '>':\n self.emitter.emitRaw(\"jbe #{}\".format(dest))\n if cmp == '<':\n self.emitter.emitRaw(\"jle #{}\".format(dest))\n if cmp == \"==\":\n self.emitter.emitRaw(\"je #{}\".format(dest))\n if cmp == \"!=\":\n self.emitter.emitRaw(\"jne #{}\".format(dest))\n\n if ins.opname == \"POP_JUMP_IF_FALSE\":\n cmp = env.popComparison()\n dest = env.getLoopTop() + \"_end\"\n\n if cmp == '>':\n self.emitter.emitRaw(\"jle #{}\".format(dest))\n if cmp == '<':\n self.emitter.emitRaw(\"jbe #{}\".format(dest))\n if cmp == \"==\":\n self.emitter.emitRaw(\"jne #{}\".format(dest))\n if cmp == \"!=\":\n self.emitter.emitRaw(\"je #{}\".format(dest))\n\n\n if level_name != \"<module>\":\n self.emitter.emitEpilogue()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints 'args' and 'kwargs' in human readable form.
def inspect_args(*args, **kwargs): args_string = ', '.join(['{0!r}'.format(i) for i in args]) print('Positional arguments:') print(args_string) print() kwargs_string = ', '.join( '{0}={1!r}'.format(k, v) for k, v in kwargs.items()) print('Keyword arguments:') print(kwargs_string)
[ "def print_arg_summary(args: dict) -> None:\n print(\"Arguments object:\")\n print(args)", "def dump_args(func):\n argnames = func.func_code.co_varnames[:func.func_code.co_argcount]\n fname = func.func_name\n\n def echo_func(*args, **kwargs):\n print fname, \":\", ', '.join('%s=%r' % entry\n for entry in zip(argnames,args) + kwargs.items())\n return func(*args, **kwargs)\n\n return echo_func", "def print_arguments_given(args):\r\n\tprint('=' * 100)\r\n\tprint('Uai file : {}'.format(args.uai_file))\r\n\tprint('Task number : {}'.format(args.task_id))\r\n\tprint('Training data file : {}'.format(args.training_data))\r\n\tprint('Test data file : {}'.format(args.test_data))\r\n\tprint('=' * 100)", "def Print(self,*args,**kwargs):\n print(*args,**kwargs)", "def show_parameters(self):\n for p in self.parameters:\n print p", "def __str__(self):\n return \", \".join(a for a in self.args)", "def print_args(options):\n print(\"--Arguments given to the spider--\")\n for info, value in list(vars(options).items()):\n if value:\n print(\"{info}: {value}\".format(info=info, value=value))\n else:\n print(\"%s: Not set. The process might fail without this argument.\"\n % info)\n print(\"---------------------------------\")", "def __repr__(self,prefix=''):\n str_out = [self.show_search_parameters_values(prefix)]\n str_out.append(self.show_chains_info(prefix))\n # print transforms\n str_out = '\\n'.join(str_out)\n return str_out", "def special_print(*args):\n args = \"\".join(str(a) for a in args)\n indent = special_trace.level * special_trace.indent\n print indent + args.replace(\"\\n\", \"\\n\" + indent)", "def print(self, *args, **kwargs):\n def _is_mv(arg):\n return isinstance(arg, tf.Tensor) and arg.shape.ndims > 0 and arg.shape[-1] == self.num_blades\n new_args = [self.mv_repr(arg) if _is_mv(arg) else arg for arg in args]\n\n print(*new_args, **kwargs)", "def dump_args(self) -> None:\n x = self\n table = (\n (x.old_sent_lines, 'old private lines'),\n (x.a, 'old public lines'),\n (x.b, 'new public lines'),\n )\n for lines, title in table:\n x.dump_lines(lines, title)\n g.pr()", "def printr(obj: Any, *args, **kwargs) -> None:\n\n\tprint(repr(obj), *args, **kwargs)", "def ansiprint(self, *args: str, **kwargs):\n\n new_args = (str(i) if not isinstance(i, str) else i for i in args)\n parts = self.parse(*new_args, aslist=True)\n builtins.print(*parts, **kwargs)", "def print_help():\r\n\r\n print \"Usage: [train data path] [test data path] [classifier type] [degree] [alpha/gamma]\"\r\n print \"\\ttype = 'Lasso', 'Ridge', 'SVC-RBF', 'SVC-Linear'\"", "def kwargs_to_string(kwargs):\n outstr = ''\n for arg in kwargs:\n outstr += ' --{} {}'.format(arg, kwargs[arg])\n return outstr", "def eprint(*args, **kwargs):\r\n print(*args, file=stderr, **kwargs)", "def print(self, *args):\n print(*args, file=self.dump_file)", "def log_output(self):\n\t\tpretty_output = json.dumps(self.nested_params, sort_keys=False, indent=4, separators=(',', ': '))\n\t\tprint(pretty_output)", "def print_debug_arguments(args):\n logger = logging.getLogger(__name__)\n counter_mac = 0\n logger.debug('')\n logger.debug('CLI Arguments, %s', args)\n for macs in args.mac:\n counter_mac += 1\n logger.debug('CLI Arguments, mac %s %s', counter_mac, macs)\n logger.debug('')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a CNF that is satisfied exactly when at most one literal in the list is satisfied
def max_1(lits: List[Literal]): cnf: Formula = [] for i in range(len(lits)): for j in range(i + 1, len(lits)): cnf.append([negate(lits[i]), negate(lits[j])]) return cnf
[ "def atMostOne(literals) :\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Never exist two expr is true\n for example literals=[A,B,C], we return (~A|~B)&(~A|~C)&(~B|~C)\n \"\"\"\n symbols=[]\n results=[]\n for symbol in literals:\n symbols.append(~symbol)\n for i in range(0,len(literals)-1):\n for j in range(i+1,len(literals)):\n results.append((symbols[i]|symbols[j]))\n return logic.conjoin(results)", "def exactlyOne(literals) :\n final_literals = [logic.disjoin(literals)]\n non_literals = []\n for i in range(len(literals)):\n non_literals.append(~literals[i])\n for i in range(len(non_literals)):\n for j in range(i+1, len(non_literals)):\n clause = logic.disjoin(non_literals[i], non_literals[j])\n final_literals.append(clause)\n return logic.conjoin(final_literals)", "def exactlyOne(literals):\n if len(literals) > 1:\n return atMostOne(literals) & atLeastOne(literals)\n else:\n return atLeastOne(literals)\n # if len(literals) == 0:\n # return False\n # if len(literals) == 1:\n # return literals\n # exprs = []\n # for expr in literals:\n # tempExprs = list(literals)\n # tempExprs.remove(expr)\n # tempExprs.append(logic.Expr('~', expr))\n # tempExprsExpr = logic.Expr('|', *tempExprs)\n # exprs.append(tempExprsExpr)\n # exprsExpr = logic.Expr('&', *exprs)\n # return logic.Expr('~', exprsExpr)", "def atMostOne(literals):\n l = [(~literals[i] | ~literals[j]) for i in range(0, len(literals)) for j in range(0, len(literals)) if i != j]\n return logic.conjoin(l)", "def cnf(self):\n\n if self.connective != \"and\"\\\n or len(self.list) != 1\\\n or self.list[0].connective != \"or\"\\\n or len(self.list[0].list) != 1:\n raise Exception(\"Wrong type of generalization\")\n\n # < [ ( X | !X ) , () ] , [ ... ] >\n # conj < ... >\n # clause [ ... ]\n # member ( ... )\n # subformula X, !X\n\n # breadth-first\n # first beta\n # then alpha\n\n # deep-first\n # resolve a non literal deeply\n # take a clause with non-literal and return a list of clauses in cnf\n clause = copy.deepcopy(self.list[0])\n return Generalization(\"and\", clause.cnf_action())", "def has_non_literal(self):\n if len(self.list) == 0:\n #raise Exception(\"Empty list of formulas\")\n # an empty generalization has a meaning\n # empty clause false, empty dual clause true\n return False\n for item in self.list:\n if isinstance(item, Formula):\n if not item.is_literal():\n return True\n # else: ignore it\n elif isinstance(item, Generalization):\n if item.has_non_literal():\n return True\n return False", "def test_already_cnf_exprs(self):\n self.assert_to_cnf_transformation(\n '(A or B) and (C or D) and E',\n '(A or B) and (C or D) and E')\n self.assert_to_cnf_transformation(\n 'A or B or C or D or E',\n 'A or B or C or D or E')\n self.assert_to_cnf_transformation(\n 'A and 1 and B',\n 'A and 1 and B')\n self.assert_to_cnf_transformation(\n '(A or B or C or D or E) and (A or B) and 0 and (A or E)',\n '(A or B or C or D or E) and (A or B) and 0 and (A or E)')", "def assign_pure_literals(clauses):\n\n\tliterals = set()\n\n\tfor clause in clauses:\n\t\tliterals.update(clause)\n\t\tyield clause\n\n\tfor literal in literals:\n\t\tif -literal in literals:\n\t\t\tcontinue\n\t\tyield set([literal])", "def add_distinct(self, literal, elems):\n if self.cc.assignment.is_false(literal):\n return\n\n if len(elems) > 2:\n self._propagator.add_constraint(self.cc, DistinctConstraint(literal, elems))\n return\n\n for i, (rhs_i, elems_i) in enumerate(elems):\n for rhs_j, elems_j in elems[i+1:]:\n rhs = rhs_i - rhs_j\n\n celems = []\n celems.extend(elems_i)\n celems.extend((-co_j, var_j) for co_j, var_j in elems_j)\n\n if not celems:\n if rhs == 0:\n self.cc.add_clause([-literal])\n return\n continue\n\n a = self.cc.add_literal()\n b = self.cc.add_literal()\n\n self.cc.add_clause([a, b, -literal])\n self.cc.add_clause([-a, -b])\n\n self.add_constraint(a, celems, rhs-1, False)\n self.add_constraint(b, [(-co, var) for co, var in celems], -rhs-1, False)", "def network_rule_C1(self):\n\n amount_qualifiers = ['ACRU', 'ANTO', 'CHAR', 'COUN', 'DEAL', 'EXEC', 'ISDI', 'LADT', 'LEVY', 'LOCL', 'LOCO',\n 'MARG', 'OTHR', \\\n 'REGF', 'SETT', 'SHIP', 'SPCN', 'STAM', 'STEX', 'TRAN', 'TRAX', 'VATA', 'WITH', 'COAX',\n 'ACCA']\n qualifier_repeated = []\n\n if self.swift_message_obj.SequenceE_SettlementDetails.SubSequenceE3_Amounts:\n for amounts in self.swift_message_obj.SequenceE_SettlementDetails.SubSequenceE3_Amounts:\n for amount in amounts.Amount:\n if amount.swiftTag == '19A' and amount.value() and amount.value()[1:5] in amount_qualifiers:\n qualifier_repeated.append(amount.value()[1:5])\n if len(qualifier_repeated) != len(list(set(qualifier_repeated))):\n return 'The following amount fields cannot appear in more than one occurrence of subsequence E3 Amounts %s' % str(\n qualifier_repeated)\n\n return ''", "def conjunctionOf(factlist) :\n if len(factlist) == 0 :\n ans = [\"True\"]\n elif len(factlist) == 1 :\n ans = factlist[0]\n else : \n ans = [\"and\", factlist[0], conjunctionOf(factlist[1:])]\n return ans", "def atLeastOne(expressions) :\n \"*** YOUR CODE HERE ***\"\n if len(expressions) == 1 :\n return expressions[0]\n a = expressions[0]\n for place in range(1, len(expressions)) :\n a = logic.Expr('|', a, expressions[place])\n return a", "def exactlyOne(expressions) :\n \"*** YOUR CODE HERE ***\"\n if len(expressions) == 1:\n return expressions[0]\n return logic.Expr(\"&\", *(exactlyOneAsList(expressions)))", "def atmost_one_VNF(self):\r\n self.sum_o = 0\r\n for f in self.VNFS:\r\n for u in G.nodes():\r\n name_o = self.o_template.format(f, u)\r\n RHS = self.model.getVarByName(name_o)\r\n self.sum_o += RHS\r\n self.model.addConstr(self.sum_o, GRB.LESS_EQUAL, 1)", "def atLeastOne(expressions) :\n \"*** YOUR CODE HERE ***\"\n if len(expressions) == 1:\n return expressions[0]\n return logic.Expr(\"|\", *expressions)", "def is_satisfy_all_consts(self, item):\n if type(item) is not set:\n item = set([item])\n for it in item:\n for const in self.constraints:\n if not const.match(it):\n return False\n return True", "def ground_partially_grounded_terms_exhaustively(terms: List[CompoundTerm]) -> SymbolIndex:\n variables = SymbolIndex()\n\n for term in terms:\n domains = [s.domain() for s in term.symbol.domain]\n for i in range(len(domains)):\n if isinstance(term.subterms[i], Constant):\n domains[i] = [term.subterms[i]]\n for binding in itertools.product(*domains):\n variables.add(StateVariableLite(term.symbol, binding))\n\n return variables", "def satisfiable(self):\n self.literals_values = [x.assigned_val() for x in self.terms]\n if True in self.literals_values:\n clause_sat = True\n elif None in self.literals_values:\n clause_sat = None\n else:\n clause_sat = False\n self.satisfied = clause_sat\n\n tracer(f\"Clause {str(self.index)} evaluates to {clause_sat}, details: {self.literals_values}, \"\n f\"{[x.short_str() for x in self.terms]}\", TRACE_LVL, 6)\n return clause_sat", "def generateClause(kb, cl1, cl2, atom, root):\n\n\t# adds non-redundant literals to clause\n\tclause = cl1.copy()\n\tclause.remove(atom)\n\tfor literal in cl2:\n\t\tif literal not in clause:\n\t\t\tclause.append(literal)\n\t\t# check if clause evaluates to true\n\t\tif negateAtom(literal) in clause:\n\t\t\treturn []\n\tclause.remove(negateAtom(atom))\n\n\t# clause valid, but is it in kb?\n\tif root.containsClause(sorted(clause)):\n\t\treturn []\n\telse:\n\t\troot.insert(sorted(clause))\n\t\treturn clause" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a BayesGraph object from an input file in .uai format
def __init__(self: BayesGraph, file_object: TextIO): graph_type = file_object.readline() if graph_type != "BAYES\n": raise Exception("File does not contain a Bayes network in .uai format") num_vars = file_object.readline() self.cardinalities = [ int(n) for n in file_object.readline().split() ] num_factors = int(file_object.readline()) # handle factors in preamble self.factors: List[List[int]] = [] for i in range(num_factors): factor_description = [ int(n) for n in file_object.readline().split() ] if factor_description[0] != len(factor_description) - 1: raise Exception("Number given for number of variables for factor does not match number of variables given for factor") self.factors.append(factor_description[1:]) # handle function tables ft_description = file_object.read().split() self.tables: List[Dict[Tuple, str]] = [] i = 0 # for each factor for factor_i in range(num_factors): table: Dict[Tuple, str] = {} num_entries = int(ft_description[i]) i += 1 # iterate through each assignment to local random variables assignment = [0 for i in self.factors[factor_i]] in_range = True while in_range: # and record that assignment with its probability in the map table[tuple(assignment)] = ft_description[i] i += 1 # then update the assignment to the next iterand in_range = False for j in range(len(assignment) -1, -1, -1): if assignment[j] < self.cardinalities[self.factors[factor_i][j]] - 1: assignment[j] += 1 in_range = True break else: assignment[j] = 0 self.tables.append(table) self._memo_to_formula: Optional[Tuple[List[str], Formula]] = None
[ "def from_graphML(self, in_file):\n pass", "def __init__(self, gfile):\n # open the file\n f = open(gfile, \"r\")\n # read the file\n file = f.readlines()\n\n line_count = 0\n for line in file:\n if line_count == 0:\n # initialise all vertices in graph\n num_vertices = int(line.strip())\n self.vertices = []\n for i in range(num_vertices):\n self.vertices.append(Vertex(i))\n else:\n # add edges\n edge = line.split()\n # convert to integers\n for i in range(len(edge)):\n edge[i] = int(edge[i])\n self.add_directed_edge(edge[0], edge[1], edge[2])\n self.add_directed_edge(edge[1], edge[0], edge[2])\n line_count += 1\n\n # close the file\n f.close()", "def load_data(filename):\n\tud_graph = grew.graph(filename)\n\treturn ud_graph", "def __init__(self, label, filename):\n self.nodes = []\n self.label = label\n self.relationships = []\n self.paths = 0\n if (filename): self.load(filename)", "def load_data(file):\n\n start=set() # start states\n transitions=list() # transitions: [input, start, end] \n accept=set() # accept states\n alphabet=set() # set of all input symbols\n states=set() # set of all states\n\n beginning=True \n \n for line in file:\n # start states\n if re.search(r\"^\\[(.)+\\]\\n$\", line) and beginning:\n start.add(line[1:-2])\n states.add(line[1:-2])\n\n else:\n match=re.search(r\"^((.)+),\\[((.)+)\\]->\\[((.)+)\\]\\n$\", line)\n # transitions\n if match:\n beginning=False\n transitions.append([match.group(3), match.group(1), match.group(5)]) \n alphabet.add(match.group(1))\n states.add(match.group(3))\n states.add(match.group(5))\n \n # accept states\n elif re.search(r\"^\\[(.)+\\]\\n$\", line) and not beginning:\n accept.add(line[1:-2])\n states.add(line[1:-2])\n \n # wrong file format\n else:\n raise SyntaxError(\"Wrong format!\")\n\n return Automaton(states,alphabet,transitions,start,accept)", "def __init__(self, gaf_file):\r\n super(GAFParser, self).__init__()\r\n self.gaf_file = gaf_file\r\n self.tell(f'Loading GAF file: {self.gaf_file}')\r\n self.goa = pd.read_csv(self.gaf_file,\r\n sep='\\t',\r\n names=self.gaf_columns,\r\n header=None)\r\n self.tell(f'Processing GOA tax IDs')\r\n self.goa['datetime'] = pd.to_datetime(self.goa['Date'],\r\n format='%Y%m%d')\r\n self.goa['taxa'] = self.goa['Taxon(Itaxon)'].str.split('|')\r\n self.goa = self.goa.explode('taxa')\r\n self.goa['Tax ID'] = self.goa['taxa'].str.split(':').str[1]", "def __init__(self, file_name=None):\n \n # Initialize instance variables\n self.vertices = {}\n \n # Read graph\n if file_name != None:\n self.read_file(file_name)", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(infile), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def createGraph(file):\n\n graph = Graph()\n\n with open(file) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split(',')\n\n # Ensures input file is only three entries per line\n if len(line) != 3:\n print \"Input file corrupted!\"\n sys.exit(1)\n\n graph.addEdge(str(line[0]),str(line[1]), str(line[2]))\n\n return graph", "def import_from_file(self, filepath):\n with open(filepath, \"r\") as input_file:\n self.graph = Flow(vertex_count=int(input_file.readline()))\n for i in range(self.graph.vertex_count):\n self.graph.vertexes[i] = BalanceVertex(value=i, balance=float(input_file.readline()))\n for knot in input_file:\n s, e, cost, cap = knot.split(\"\\t\")\n self.graph.add_edge(int(s), int(e), weight=float(cost.replace('\\n', '')), capacity=float(cap))", "def __init__(self, label_file=None):\n self._encoded_features = {}\n self._len_phones = {}\n self._original_matrix = {}\n if label_file:\n self.label = Label(label_file)\n self.label_file = label_file\n else:\n self.label = None\n self.label_file = None", "def load_graph(fname):\n g = nx.Graph()\n with open(fname) as fl:\n for line in fl:\n u, v = line.split(\" \")\n g.add_edge(int(u), int(v))\n print(\"Loaded graph with {} nodes\".format(len(g.nodes)))\n return g", "def __init__(self, onnx_model, onnx_type=\"pb\"):\n super(OnnxGraph,self).__init__()\n self._onnx_model = onnx_pb2.ModelProto()\n if onnx_type == \"pb\":\n with open(onnx_model, \"rb\") as onnx_stream:\n self._onnx_model.ParseFromString(onnx_stream.read())\n else:\n raise NotImplementedError(\"Onnx file type {} doesn't support yet\".format(onnx_type))\n # a map from the node to version, while the node name is unique\n self._nodes_version = {}\n # a map from the name to nodes\n self.tb_nodes = {}", "def load_trained_model(self, graph):\n\n self.bonsai_.graph = graph", "def readInput(filename):\r\n\r\n parsedGraph = Graph.Graph()\r\n isHeuristicSection = False # True if processing the heuristic values for the graph. False otherwise.\r\n sectionDivider = \"#####\"\r\n minCharsInLine = 3 # Each line with data must have at least 3 characters\r\n with open(filename, 'r') as input:\r\n for line in input.readlines():\r\n if (len(line) > minCharsInLine):\r\n line = line.strip()\r\n if(sectionDivider in line):\r\n isHeuristicSection = True\r\n elif(isHeuristicSection):\r\n state, heurStr = line.split(' ')\r\n heuristic = float(heurStr)\r\n parsedGraph.setHeuristic(state, heuristic)\r\n else:\r\n state1, state2, costStr = line.split(' ')\r\n cost = float(costStr)\r\n parsedGraph.addStatesAndEdge(state1,state2, cost)\r\n for state_key in parsedGraph.states:\r\n state = parsedGraph.states[state_key]\r\n state.edges = OrderedDict(sorted(state.edges.items()))\r\n return parsedGraph", "def load(self, filename):\n\n # reinitialize the object\n self.__init__()\n # fill in the object\n o = open(filename)\n s = o.read()\n a = ArffFile.parse(s)\n self.relation = a.relation\n self.attributes = a.attributes\n self.attribute_types = a.attribute_types\n self.attribute_data = a.attribute_data\n self.comment = a.comment\n self.data = a.data\n o.close()", "def from_file(cls, filename):\n with open(filename, \"r\") as f:\n vocab = json.load(f)\n return KoBpeTokenizer(vocab)", "def load_graph_users(u1, u2, path):\n u1_string = \"U:\" + str(u1)\n u2_string = \"U:\" + str(u2)\n if u1 >= u2:\n max_val = u1\n else:\n max_val = u2\n with open(path) as file:\n next(file)\n graph = nx.Graph()\n for lines in file:\n file_line_split = lines.split(\",\")\n if file_line_split[1] == u1_string:\n graph.add_edge(u1_string, file_line_split[0], weight=file_line_split[2])\n elif file_line_split[1] == u2_string:\n graph.add_edge(u2_string, file_line_split[0], weight=file_line_split[2])\n else:\n if max_val < int(file_line_split[1].split(\":\")[1]):\n break\n continue\n return graph", "def load_graph(g_file=None, g_type=None, g_nodes=None, g_new_edges=None, g_seed=None):\n\tif g_file is not None:\n\t\tG = read_graph(g_file)\n\telse:\n\t\tdatasets_dir = \"datasets/\"\n\t\tif g_type == \"barabasi_albert\":\n\t\t\tG = nx.generators.barabasi_albert_graph(g_nodes, g_new_edges, seed=g_seed)\n\t\telif g_type == \"wiki\":\n\t\t\tG = read_graph(datasets_dir + \"wiki-Vote.txt\", directed=True)\n\t\telif g_type == \"amazon\":\n\t\t\tG = read_graph(datasets_dir + \"amazon0302.txt\", directed=True)\n\t\telif g_type == \"twitter\":\n\t\t\tG = read_graph(datasets_dir + \"twitter_combined.txt\", directed=True)\n\t\telif g_type == \"facebook\":\n\t\t\tG = read_graph(datasets_dir + \"facebook_combined.txt\", directed=False)\n\t\telif g_type == \"CA-GrQc\":\n\t\t\tG = read_graph(datasets_dir + \"CA-GrQc.txt\", directed=True)\n\t\telif g_type == \"epinions\":\n\t\t\tG = read_graph(datasets_dir + \"soc-Epinions1.txt\", directed=True)\n\t\telif g_type == \"tiny_wiki\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_wiki_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_amazon\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_amazon_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_CA-GrQc\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_CA-GrQc_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_wiki_community\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_wiki_community_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_amazon_community\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_amazon_community_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\t\telif g_type == \"tiny_CA-GrQc_community\":\n\t\t\tG = read_graph(datasets_dir + \"Tiny_CA-GrQc_community_{}nodes_seed0.txt\".format(g_nodes), directed=True)\n\treturn G" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a CNF representing evidence (i.e. observed vertices and their values) on the graph from a .uai.evid file representation. Note that in our representation, the indicator variable that the random variable i is set to its jth value is the nth indicator variable where n is the sum of the cardinalities of all the random variables before i, then added to j.
def evidence_to_formula(self: BayesGraph, file_object: TextIO): evidence_description = [ int(i) for i in file_object.read().split()] if evidence_description[0] != (len(evidence_description) - 1) / 2: raise Exception("evidence file is improperly formatted") indicators_map = [0] for cardinality in self.cardinalities: indicators_map.append(indicators_map[-1] + cardinality) indicators_map.pop() cnf: Formula = [] i = 0 for i in range(1, len(evidence_description), 2): cnf.append([(1, indicators_map[evidence_description[i]] + evidence_description[i + 1])]) return cnf
[ "def to_formula_file_with_evidence(self: BayesGraph, evidence: TextIO, ffile: TextIO, wfile: TextIO):\n weights, cnf = self.to_formula()\n cnf.extend(self.evidence_to_formula(evidence))\n ffile.write(\"p cnf {} {}\\n\".format(len(weights), len(cnf)))\n for clause in cnf:\n clause_str = \" \".join([ (\"\" if sign else \"-\") + str(var) for sign, var in clause ]) + \" 0\\n\"\n ffile.write(clause_str)\n wfile.write(\"p {}\\n\".format(len(weights)))\n for i in range(len(weights)):\n wfile.write(\"w {} {} 0\\nw -{} 1.0 0\\n\".format(i, weights[i], i))", "def from_endf(cls, ev_or_filename, covariance=False):\n if isinstance(ev_or_filename, Evaluation):\n ev = ev_or_filename\n else:\n ev = Evaluation(ev_or_filename)\n\n atomic_number = ev.target['atomic_number']\n mass_number = ev.target['mass_number']\n metastable = ev.target['isomeric_state']\n atomic_weight_ratio = ev.target['mass']\n temperature = ev.target['temperature']\n\n # Determine name\n element = ATOMIC_SYMBOL[atomic_number]\n if metastable > 0:\n name = '{}{}_m{}'.format(element, mass_number, metastable)\n else:\n name = '{}{}'.format(element, mass_number)\n\n # Instantiate incident neutron data\n data = cls(name, atomic_number, mass_number, metastable,\n atomic_weight_ratio, [temperature])\n\n if (2, 151) in ev.section:\n data.resonances = res.Resonances.from_endf(ev)\n\n if (32, 151) in ev.section and covariance:\n data.resonance_covariance = (\n res_cov.ResonanceCovariances.from_endf(ev, data.resonances)\n )\n\n # Read each reaction\n for mf, mt, nc, mod in ev.reaction_list:\n if mf == 3:\n data.reactions[mt] = Reaction.from_endf(ev, mt)\n\n # Replace cross sections for elastic, capture, fission\n try:\n if any(isinstance(r, res._RESOLVED) for r in data.resonances):\n for mt in (2, 102, 18):\n if mt in data.reactions:\n rx = data.reactions[mt]\n rx.xs['0K'] = ResonancesWithBackground(\n data.resonances, rx.xs['0K'], mt)\n except ValueError:\n # Thrown if multiple resolved ranges (e.g. Pu239 in ENDF/B-VII.1)\n pass\n\n # If first-chance, second-chance, etc. fission are present, check\n # whether energy distributions were specified in MF=5. If not, copy the\n # energy distribution from MT=18.\n for mt, rx in data.reactions.items():\n if mt in (19, 20, 21, 38):\n if (5, mt) not in ev.section:\n if rx.products:\n neutron = data.reactions[18].products[0]\n rx.products[0].applicability = neutron.applicability\n rx.products[0].distribution = neutron.distribution\n\n # Read fission energy release (requires that we already know nu for\n # fission)\n if (1, 458) in ev.section:\n data.fission_energy = FissionEnergyRelease.from_endf(ev, data)\n\n data._evaluation = ev\n return data", "def meu(self, evidence):\n # TODO: Implement the above!\n\n # okay so we figure out the values of the Dvars ... and then use combos from itertools? I think that would be a good idea.\n # so we want EU(D=0) = P(Y=y | D= 0) * U(Y=1)\n # so in code this looks like\n \"\"\"\n We can create an array of tuples based on values of the dec_var\n [(0,we loop through each value of y(P(Y=y| D=0) * U(Y=y)) Do i need to figure out parents??\n (1, P(Y=y|D=1)*U(Y=y))\n and then we just find the max...\n lets see, so taking a first spin at this. get the arr of values from self.val using arr=self.val[\"D\"] (will need to expand for multiple dec_vars)\n arr=[0,1]\n given util_var, I guess rn we will loop through and check which key exists... Make note of the key\n\n if it exists, put the resulting dict in a variable\n tuple_list = okay convert the dictionary to a list of tuples.\n for val in arr (will need to switch to combo list soon)\n for tup in tuple_list\n \n result = helper_method(bn.predict_proba(dec_var: arr[0]),cols)\n tuples_result = result[noted_symbol]\n lets just assume for rn\n x =tuples_result[tup[0]][1] <-- this is the answer for y of P(D=0) (really P(Y=y |D=0) (saying y is 0)\n sum += x * tup[1]\n \n \"\"\"\n # create a list of tuples with each of the dec_vars values\n list_tuple_dec_vars = []\n for var in self.dec_vars:\n values = self.vals[var]\n for v in values:\n list_tuple_dec_vars.append((var, v))\n # add in evidence varibles.\n evidence_count = 0\n if evidence != {}:\n for item in evidence.items():\n evidence_count += 1\n list_tuple_dec_vars.append(item)\n\n # now create a combonation list of evidence and dec_vars\n combo_list = it.combinations(\n list_tuple_dec_vars, len(self.dec_vars) + evidence_count)\n # so now we have a giant list of combos, note, some stuff is repeats\n # now create a list of dictionarys formatted with dec_var1 = val, dec_var2 = val, evidence =e\n combo_dict_list = []\n # item is the a list element\n for item in combo_list:\n dict_to_be_listed = {}\n # content is tuple (\"symbol\" :value)\n for content in item:\n # if symbol already in dictionary, then this is a combo with a repeat\n if content[0] in dict_to_be_listed:\n break\n dict_to_be_listed[content[0]] = content[1]\n\n else:\n # if inner loop finished without a break, this means that the combo dict should be added to the list\n combo_dict_list.append(dict_to_be_listed)\n continue\n\n # we now have a list of dictionaries ready to be inserted into predict_proba.\n # now we loop through each\n # we also need to loop through values of the utility node and recover the value of this\n # we have the util map in format {Symbol : {val1 : util1 , val2 :util2}\n # to obtain the symbol in a brute force fashion, I will cycle through the cols until I find the key\n # ^ needs improvement\n symbol = \"\"\n for s in self.cols:\n if s in self.util_map:\n symbol = s\n break\n # we can also get the values we need to loop through for the util_node\n poss_util_vals = self.vals[symbol]\n # now we can easily call util_map in a loop with self.util_map[symbol][loop_val_it]\n # we also now which value to extract.\n # will attempt to store these results in a dictionary, though a list of tuples may be the only proper format...\n result_tuple_list = []\n for combo in combo_dict_list:\n sum = 0\n for val in poss_util_vals:\n # get resulting dictionary given combo dec vars and evidence. Symbol match is used to order the dictionary\n prob_dict = self.symbolMatch(\n self.bn.predict_proba(combo), self.cols)\n # now we will get the tuple list from this dictionary.\n util_var_tuples = prob_dict[symbol]\n # now we must loop through these tuples until we find the value\n prob_answer = 0\n for tup in util_var_tuples:\n if tup[0] == val:\n prob_answer = tup[1]\n break\n sum += prob_answer * self.util_map[symbol][val]\n result_tuple_list.append((combo.copy(), sum))\n # we have now stored the combo and sum in a list of tuples.\n # now loop through and find best.\n best_combo = {}\n best_util = 0\n for item in result_tuple_list:\n if item[1] > best_util:\n best_util = item[1]\n best_combo = item[0].copy()\n # now before we return the best combo, we need to remove the evidence\n for key in evidence.keys():\n best_combo.pop(key)\n return (best_combo, best_util)\n\n # We need to submit a query like P(Y=y| Dec_var=dec_val, evidence", "def read_vee(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 2:\n size1, size2 = int(line[0]), int(line[1])\n vee = np.zeros((size1, size1, size2, size2), dtype=np.float64)\n elif len(line) == 5:\n mu, nu, lmda, sgma, val = int(line[0]) - 1, int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1, np.float64(line[4])\n vee[mu,nu,lmda,sgma] = \\\n vee[nu,mu,lmda,sgma] = \\\n vee[mu,nu,sgma,lmda] = \\\n vee[nu,mu,sgma,lmda] = \\\n vee[lmda,sgma,mu,nu] = \\\n vee[sgma,lmda,mu,nu] = \\\n vee[lmda,sgma,nu,mu] = \\\n vee[sgma,lmda,nu,mu] = \\\n val\n return vee", "def addEvidence(self, evidence):\n\t\tfor key, value in evidence.items():\n\t\t\tnode = self.fg.bn.idFromName(key)\n\t\t\tneighbors = self.fg.neighbors[(node, 'children')] + self.fg.neighbors[(node, 'parents')]\n\t\t\tif len(neighbors) >= 2 and len(self.fg.neighbors[(node, 'parents')]) == 1:\n\t\t\t\tif value == 1:\n\t\t\t\t\tself.fg.node_cpt[self.fg.neighbors[(node, 'parents')][0]].fillWith([0, 1])\n\t\t\t\telse:\n\t\t\t\t\tself.fg.node_cpt[self.fg.neighbors[(node, 'parents')][0]].fillWith([1, 0])\n\t\t\t\t# print(self.fg.node_cpt[self.fg.neighbors[(node, 'parents')][0]])\n\t\t\telse:\n\t\t\t\tfactor = self.addFactor(node, key, value)\n\t\t\t\tself.fg.addEdge(factor, node)\t\t\t\t\n\t\t\t\tself.fg.messages[(node, factor)] = self.fg.node_cpt[factor]\n\t\t\t\tself.fg.messages[(factor, node)] = 1\n\t\t\t\tself.fg.neighbors[(factor, 'children')] = []\n\t\t\t\tself.fg.neighbors[(factor, 'parents')] = [node]\n\t\t\t\tself.fg.neighbors[(node, 'children')] = [factor]\n\t\t\t\t# print('node: {}, factor {}, key {}:'.format(node, factor, key))\n\t\t\t\t# print(self.fg.node_cpt[factor])", "def __init__(self: BayesGraph, file_object: TextIO):\n graph_type = file_object.readline()\n if graph_type != \"BAYES\\n\":\n raise Exception(\"File does not contain a Bayes network in .uai format\")\n num_vars = file_object.readline()\n self.cardinalities = [ int(n) for n in file_object.readline().split() ]\n num_factors = int(file_object.readline())\n # handle factors in preamble\n self.factors: List[List[int]] = []\n for i in range(num_factors):\n factor_description = [ int(n) for n in file_object.readline().split() ]\n if factor_description[0] != len(factor_description) - 1:\n raise Exception(\"Number given for number of variables for factor does not match number of variables given for factor\")\n self.factors.append(factor_description[1:])\n # handle function tables\n ft_description = file_object.read().split()\n self.tables: List[Dict[Tuple, str]] = []\n i = 0\n # for each factor\n for factor_i in range(num_factors):\n table: Dict[Tuple, str] = {}\n num_entries = int(ft_description[i])\n i += 1\n # iterate through each assignment to local random variables\n assignment = [0 for i in self.factors[factor_i]]\n in_range = True\n while in_range:\n # and record that assignment with its probability in the map\n table[tuple(assignment)] = ft_description[i]\n i += 1\n # then update the assignment to the next iterand\n in_range = False\n for j in range(len(assignment) -1, -1, -1):\n if assignment[j] < self.cardinalities[self.factors[factor_i][j]] - 1:\n assignment[j] += 1\n in_range = True\n break\n else:\n assignment[j] = 0\n self.tables.append(table)\n\n self._memo_to_formula: Optional[Tuple[List[str], Formula]] = None", "def read_aev(fname):\n\n try:\n f = open(fname, \"r\")\n except IOError:\n print(\"Could not open file:\" + fname)\n sys.exit()\n with f:\n aevd = f.readlines()\n\n n_line = len(aevd)\n npt = int(aevd[0])\n n_atom = int(aevd[1])\n dout = int(aevd[2])\n\n aev = [ [ [0]*dout for a in range(n_atom)] for p in range(npt)]\n line = 3\n for p in range(npt):\n \tfor a in range(n_atom):\n \t\tfor i in range(dout):\n \t\t\taev[p][a][i]=float(aevd[line])\n \t\t\tline += 1\n return npt, n_atom, dout, aev", "def add_variable_node(self, n):\n name = n.attr[\"cag_label\"]\n self.add_node(\n name,\n value=None,\n pred_fns=[],\n agraph_name=n,\n index=n.attr[\"index\"],\n node_type=n.attr[\"node_type\"],\n start=n.attr[\"start\"],\n end=n.attr[\"end\"],\n index_var=n.attr[\"index_var\"],\n visited=False,\n )\n\n # If the node is a loop index, set special initialization\n # and update functions.\n if n.attr[\"is_index\"] == \"True\":\n self.nodes[name][\"is_index\"] = True\n self.nodes[name][\"value\"] = int(n.attr[\"start\"])\n self.nodes[name][\"visited\"] = True\n self.nodes[name][\"update_fn\"] = (\n lambda **kwargs: int(kwargs.pop(list(kwargs.keys())[0])) + 1\n )\n self.add_edge(name, name)", "def gen_ei(self,\n size=23):\n self.ei = epsilon_index(self.am,\n self.bonds_i,\n size=size)", "def chooseFFI(na_dict):\n d = na_dict\n\n if d[\"NIV\"] > 4: # More than 4 independent variables not allowed\n raise Exception(\"NASA Ames cannot write more than 4 independent variables.\")\n\n elif d[\"NIV\"] == 4: # 4 independent variables\n return 4010\n\n elif d[\"NIV\"] == 3: # 3 independent variables\n return 3010\n\n elif d[\"NIV\"] == 2: # 2 independent variables\n if type(d[\"X\"][0][0]) == type(\"string\"):\n # 2160 - the independent unbounded variable is a character string\n return 2160\n elif type(d[\"X\"][0][1]) == type([1,2]) and len(d[\"X\"][0][1]) > 1:\n # 2110 - one independent variable changes length and the values are specified\n return 2110\n elif type(d[\"X\"][0][1]) == type([1,2]) and len(d[\"X\"][0][1]) == 1:\n # 2310 - one indepenent variable changes length but only the first value is specifically stated\n return 2310\n else: \n # 2010 - Straightforward 2-D variables\n return 2010\n\n elif d[\"NIV\"] == 1: # 1 independent variable \n if \"NAUXV\" not in d:\n # 1001 - No auxiliary variables\n return 1001\n elif \"NVPM\" in d:\n # 1020 - Implied values for independent variable\n return 1020\n else:\n # 1010 - Auxiliary variables included\n return 1010\n else:\n raise Exception(\"Could not resolve the dictionary object to create a suitable NASA Ames File Format Index (FFI). Please modify the contents and try again.\")", "def generateExampleData3():\n\n\n theta = np.linspace(0,np.pi,101)\n x = np.power((1-np.cos(theta))/2, 2)\n return create4DigitNacaAerofoil(9,5,16,x)", "def read_vee_pack(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 2:\n size1, size2 = int(line[0]), int(line[1])\n vee = np.zeros((size1, size1, size2, size2), dtype=np.float64)\n elif len(line) == 5:\n mu, nu, lmda, sgma, val = int(line[0]) - 1, int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1, np.float64(line[4])\n vee[mu,nu,lmda,sgma] = \\\n vee[nu,mu,lmda,sgma] = \\\n vee[mu,nu,sgma,lmda] = \\\n vee[nu,mu,sgma,lmda] = \\\n vee[lmda,sgma,mu,nu] = \\\n vee[sgma,lmda,mu,nu] = \\\n vee[lmda,sgma,nu,mu] = \\\n vee[sgma,lmda,nu,mu] = \\\n val\n return vee", "def model_ifu(self):\n xc = int(0.8 * self.pixels_per_arcsec)\n ifu = np.full((2*xc+1, 2*xc+1), -1, dtype=int)\n pitch = self.fibre_pitch * self.pixels_per_arcsec\n size = 0.5 * 0.94 * pitch # reproduces Fig.2.2.3a of GHOSD-09\n S3 = np.sqrt(3)\n self.add_hexagon(ifu, 0, 0, size, 9)\n for i, (fibre1, fibre2, fibre3) in enumerate(zip((6, 12, 10, 7, 11, 8),\n (3, 14, 5, 13, 15, 4),\n (0, 18, 16, 2, 17, 1))):\n self.add_hexagon(ifu, pitch * np.sin(i * np.pi / 3),\n pitch * np.cos(i * np.pi / 3), size, fibre1)\n self.add_hexagon(ifu, S3 * pitch * np.sin((i + 0.5) * np.pi / 3),\n S3 * pitch * np.cos((i + 0.5) * np.pi / 3), size, fibre2)\n self.add_hexagon(ifu, 2 * pitch * np.sin(i * np.pi / 3),\n 2 * pitch * np.cos(i * np.pi / 3), size, fibre3)\n return ifu", "def load_data(filename):\n \n evidence = []\n labels = []\n months = {'Jan': 1, 'Feb' : 2, 'Mar': 3, 'May' : 5, 'June' : 6, 'Jul' : 7, 'Aug' : 8, 'Sep' : 9, 'Oct' : 10, 'Nov' : 11, 'Dec' : 12}\n \n \n \n with open(filename, newline='') as csvfile:\n reader = csv.reader(csvfile)\n next(reader)\n for line in reader:\n evidence.append([int(line[0]), float(line[1]), int(line[2]), float(line[3]), int(line[4]), float(line[5]), \n float(line[6]), float(line[7]), float(line[8]), \n float(line[9]), months[line[10]], int(line[11]), int(line[12]), int(line[13]), int(line[14]), \n 0 if line[15] == 'New_Visitor' else 1, 0 if line[16] == 'FALSE' else 1 ])\n labels.append(0 if line[17] == 'FALSE' else 1)\n \n return (evidence, labels)", "def vesuvius_graph(**kwargs):\n target_graph = dnx.generators.chimera_graph(8, 8, 4, **kwargs)\n target_graph.graph['chip_id'] = 'Vesuvius'\n return target_graph", "def import_interactions_form_DIP(dip_file, edgelist):\n inp = open(dip_file)\n inp.readline() #header\n for line in inp:\n line=line.strip()\n line = line.split(\"\\t\")\n uniprot_ID1 = search_uniprot_id(line[0])\n uniprot_ID2 = search_uniprot_id(line[1])\n if uniprot_ID1 and uniprot_ID2:\n edgelist.add_edge(uniprot_ID1, uniprot_ID2)\n return edgelist", "def model_ifu(self):\n xc = int(0.8 * self.pixels_per_arcsec)\n ifu = np.full((2*xc+1, 2*xc+1), -1, dtype=int)\n pitch = self.fibre_pitch * self.pixels_per_arcsec\n size = 0.5 * 0.97 * pitch # reproduces Fig.2.2.3a of GHOSD-09\n self.add_hexagon(ifu, 0, 0, size, 3)\n for i, fibre in enumerate((0, 6, 4, 1, 5, 2)):\n self.add_hexagon(ifu, pitch * np.sin(i * np.pi / 3),\n pitch * np.cos(i * np.pi / 3), size, fibre)\n return ifu", "def test_parse_evidence(self):\n evidence = [\n ('--', ''),\n ('SN', '1'),\n ('ID', 'Aferr subtype specific proteins'),\n ('DN', 'Crispy Proteins'),\n ('RQ', '0'),\n ('EV', 'IPR017545; TIGR03114; sufficient;'),\n ('TG', 'GO:0043571;')\n ]\n\n parsed_evidence = parse_evidences(evidence)\n\n self.assertEqual(len(parsed_evidence), 1)\n first_evidence = parsed_evidence[0]\n\n self.assertEqual(first_evidence.evidence_identifiers, ['IPR017545', 'TIGR03114'])\n self.assertEqual(first_evidence.gene_ontology_terms, ['GO:0043571'])\n self.assertEqual(first_evidence.sufficient, True)", "def main(filename):\n out_dir = os.path.dirname(filename)\n datafile = np.genfromtxt(filename, names=True, delimiter='\\t', dtype=None)\n \n subject_col = datafile[\"Subject\"]\n subjects = np.unique(subject_col)\n \n for subject in subjects:\n subject_rows = np.where(subject_col==subject)[0]\n subject_data = datafile[subject_rows]\n exp_col = subject_data[\"ExperimentName\"]\n experiments = np.unique(exp_col)\n for exp in experiments:\n exp_rows = np.where(exp_col==exp)[0]\n exp_data = subject_data[exp_rows]\n sess_col = np.array(exp_data[\"Session\"], dtype=str)\n session = np.unique(sess_col)[0]\n\t session = str(int(session) - 1)\n \n column_names = np.array(exp_data.dtype.names, dtype=str)\n header_string = \"\\t\".join(column_names)\n out_list = [str(subject), session, str(exp), \"eprime.txt\"]\n out_file = os.path.join(out_dir, \"-\".join(out_list))\n with open(out_file, \"w\") as fo:\n np.savetxt(fo, exp_data, delimiter=\"\\t\", newline=\"\\n\",\n fmt=\"%s\", header=header_string, comments=\"\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write a CNF encoding of the graph represented by this object to a file in DIMACS format, together with an associated weights file.
def to_formula_file_with_evidence(self: BayesGraph, evidence: TextIO, ffile: TextIO, wfile: TextIO): weights, cnf = self.to_formula() cnf.extend(self.evidence_to_formula(evidence)) ffile.write("p cnf {} {}\n".format(len(weights), len(cnf))) for clause in cnf: clause_str = " ".join([ ("" if sign else "-") + str(var) for sign, var in clause ]) + " 0\n" ffile.write(clause_str) wfile.write("p {}\n".format(len(weights))) for i in range(len(weights)): wfile.write("w {} {} 0\nw -{} 1.0 0\n".format(i, weights[i], i))
[ "def write_graph(self, filename):\n pass", "def to_file(self, fileout):\n dirout =os.path.split(fileout)[0]\n pathlib.Path(dirout).mkdir(parents=True, exist_ok=True)\n\n jout = {'constraints': {}, 'agents': {}, 'variables': {}}\n for a in self.agents:\n agt = self.agents[a]\n jout['agents'][a] = {'vars': [v.name for v in agt.variables]}\n\n for i, v in enumerate(self.variables):\n var = self.variables[v]\n jout['variables'][v] = {'id': i, 'cons': [c.name for c in var.constraints],\n 'domain': var.domain, 'type': 1, 'value': None,\n 'agent': var.controlled_by.name}\n\n for c in self.constraints:\n con = self.constraints[c]\n jout['constraints'][c] = {'vals': [int(v) for v in con.values.values()],\n 'scope': [v.name for v in con.scope]}\n\n print('Writing dcop instance on file', fileout)\n with open(fileout, 'w') as fp:\n json.dump(jout, fp, sort_keys=True, indent=4)", "def saveCNF( name, cnf ):\n f = open( name, \"w\" )\n nbvars = max( [max(C) for C in cnf] )\n nbclauses = len(cnf)\n\n # header\n f.write(\"p cnf %d %d\\n\" % (nbvars, nbclauses))\n \n # clauses\n for C in cnf: \n s = \"\" \n for x in C:\n s += str(x) + \" \"\n s += \"0\\n\"\n f.write(s)\n\n f.close()", "def write(self, path):\n\n self.find_nodes()\n self.nodes = self.input + self.additional_nodes\n self.build_edges()\n with open(path+\".nodes.tsv\", \"w\") as f:\n f.write(\"\\n\".join(\n [\"id\\tlabel\\ttype\"] + [\n \"{}\\t{}\\t{}\".format(\n str(self.nodes.index(node)), node, str(int(node in self.input))\n ) for node in self.nodes\n ]\n ))\n\n with open(path+\".edges.tsv\", \"w\") as f:\n f.write(\"\\n\".join(\n [\"source\\ttarget\\tweight\"] + [\n \"\\t\".join(edge) for edge in self.edges\n ]\n ))", "def _write_conv(name_pfx, kernel_size, state_dict, path):\n with open(path, 'wb') as fout:\n weight = state_dict['{}.{}.weight'.format(name_pfx, kernel_size - 2)]\n weight_t = weight.transpose(1, 2) # [output chan] * [input chan] * kernel\n # => [output chan] * kernel * [input chan]\n for ch_out in weight_t:\n array('f', ch_out.contiguous().view(-1)).tofile(fout)\n bias_name = '{}.{}.bias'.format(name_pfx, kernel_size - 2)\n if bias_name in state_dict:\n bias = state_dict[bias_name]\n array('f', bias).tofile(fout) # [output chan] * 4", "def write_nx_graph(graph, filename):\n fx = open(filename, \"w\")\n fx.write(\"digraph grn\\n{\\n\")\n for edge in graph.edges():\n fx.write(\" %s -> %s [label=%d]\\n\" % edge)\n \n fx.write(\"}\")\n fx.close()", "def write_rdf(self, graph, filename, format='turtle'):\n logging.info(\"Writing %s triples to %s\" %\n (len(graph), filename))\n graph.bind('bib', BIB, override=True)\n graph.bind('dct', DCT, override=True)\n graph.bind('vivo', VIVO, override=True)\n graph.bind('bf', BF, override=True)\n graph.bind('madsrdf', MADSRDF, override=True)\n with open(filename, 'wb') as fh:\n fh.write(graph.serialize(format=format))", "def create_cnf_file(self):\n # Update the CNF string with all the Sudoku constraints in CNF form\n self.get_Sudoku_cnf_constraints()\n\n # Update the CNF string with the generated board\n self.get_generated_data()\n\n # Update the header value from info generated\n cnf_header = self.create_cnf_header()\n\n # print all values to a .CNF format file\n with open(self.path+'.CNF', 'w') as f:\n f.write(cnf_header)\n f.write(self.cnf)\n return", "def writeGraphs(self, path):\n f = open(path, 'w')\n writer = nx.readwrite.GraphMLWriter()\n writer.add_graphs(self.inputFrames)\n writer.dump(f)", "def graph_to_file( g, output_filepath = None ):\n if not output_filepath:\n _outfn = 'output/workflows_output.rdf'\n else: _outfn = output_filepath\n g.serialize( _outfn )\n print(\"Written \"+str(len(g))+\" triples to \" + _outfn)", "def write(self):\n\n # Write file lines according to gaussian requirements\n with open(self.filepath, 'w') as file:\n # file.write('%Chk={}checkpoint.com\\n'.format(utils.sanitize_path(os.path.dirname(self.filepath),\n # add_slash=True)))\n file.write(self.calculation.get_calc_line() + '\\n\\n')\n file.write(self.molecule_name + '\\n\\n')\n file.write(self.multiplicity + '\\n')\n file.write(''.join(line for line in self.mol_coords))\n file.write('\\n\\n')", "def _write_dot(self):\n if self.dot_file:\n write_dot(self.graph, self.dot_file)", "def write_dot_file(self, out_file_path):\n nx.nx_agraph.write_dot(self, out_file_path)", "def to_struct_file(self, f):\n if isinstance(f, str):\n f = open(f,'w')\n f.write(\"STRUCTURE {0}\\n\".format(self.name))\n f.write(\" NUGGET {0}\\n\".format(self.nugget))\n f.write(\" NUMVARIOGRAM {0}\\n\".format(len(self.variograms)))\n for v in self.variograms:\n f.write(\" VARIOGRAM {0} {1}\\n\".format(v.name,v.contribution))\n f.write(\" TRANSFORM {0}\\n\".format(self.transform))\n f.write(\"END STRUCTURE\\n\\n\")\n for v in self.variograms:\n v.to_struct_file(f)", "def compress(self):\n with open(self.in_path) as f, open(self.out_path, \"wb\") as o, open(self.g_path, \"wb\") as g:\n text = f.read().rstrip()\n freq = self.freq_dict(text)\n self.heap_list(freq)\n self.create_graph()\n self.make_code()\n encoded_text = self.encode_text(text)\n padded_encoded_text = self.pad_text(encoded_text)\n b = self.byte_array(padded_encoded_text)\n o.write(bytes(b))\n pickle.dump(self.node, g)\n print(\"Compressed\")", "def write_conll(self, fname):\n if 'label' not in self.fields:\n raise InvalidFieldsException(\"dataset is not in CONLL format: missing label field\")\n\n def instance_to_conll(inst):\n tab = [v for k, v in inst.items() if k != 'label']\n return '{}\\n{}'.format(inst['label'], '\\n'.join(['\\t'.join(['-' if e is None else str(e) for e in row]) for row in zip(*tab)]))\n\n with open(fname, 'wb') as f:\n f.write('# {}'.format('\\t'.join([k for k in self.fields if k != 'label'])))\n for i, d in enumerate(self):\n f.write('\\n{}'.format(instance_to_conll(d)))\n if i != len(self) - 1:\n f.write('\\n')", "def write_SWC_tree_to_file(self,file_n) :\n raise Exception(\"Not yet implemented\")\n writer = open(file_n,'w')\n nodes = self.get_nodes()\n nodes.sort()\n for node in nodes :\n p3d = node.get_content()['p3d'] # update 2013-03-08\n p3d_string = p3d.swc_str()\n print 'p3d_string: ', p3d_string\n writer.write( p3d_string + '\\n' )\n writer.flush()\n writer.close() \n #print 'STree::writeSWCTreeToFile -> finished. Tree in >',fileN,'<'", "def save_chain(self):\n pprint('saving to file named bc_file.txt')\n with open('bc_file.txt', 'w') as output:\n output.write(serializer.serialize(self.chain))", "def write_to_file(self, filename):\n\t\twrite_network(self.network, filename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the cvss_score of this VulnerabilityVulnerability.
def cvss_score(self, cvss_score): self._cvss_score = cvss_score
[ "def risk_score(self, risk_score: Union[float, PaillierCiphertext]) -> None:\n self._risk_score = risk_score", "def __setScore(self, score):\n\t\tself.score = score\n\t\treturn self.score", "def setNodeScore(self, score):\n self.score = score", "def set_input_score(self, score):\n pass", "def set_score(self,new_score):\n self.__fitness = new_score", "def qm_score(self, qm_score):\n self._qm_score = qm_score", "def change_score(self, new_score):\n raise NotImplementedError", "def cvss_v3(self, cvss_v3):\n\n self._cvss_v3 = cvss_v3", "def setMinScore(self, value) -> None:\n ...", "def update_score(self, score):\n change_text(self.score_text, \"SCORE: % 4d\" % score)", "def positive_score(self, positive_score):\n\n self._positive_score = positive_score", "def negative_score(self, negative_score):\n\n self._negative_score = negative_score", "def ss_score(self):\n ss_score = 0\n for i in range(len(self.query_ss)):\n # if, for one given residue i, the secondary structure of the query\n # is the same than the ss of the template, add the query\n # confidence score\n if (self.query_ss[i] == self.tpl_ss[i] and\n self.query_ss_conf[i] != \"-\"):\n ss_score += 1\n\n # divide score by number of included residues to normalize\n if ss_score: # If ss_score different from 0\n ss_score = ss_score/sum(map(lambda res: res != '-', self.tpl_ss))\n self.score = ss_score", "def update_score(self, score: float):\n if self.score == score:\n return\n self.score = score\n for edge in self._in_edges:\n edge.top.taint()", "def system_health_score(self, system_health_score):\n\n self._system_health_score = system_health_score", "def set_high_score(self):\n with shelve.open(c.high_score_file) as current_scores:\n if '1' in current_scores:\n self.high_score = current_scores['1']", "def reset_score(self):\n\n self.score = 0", "def sub_score (self, value):\n self._cvar.set(\n self._bind_low(\n self._cvar.get() - abs(int(value))\n )\n )", "def reset_score(self):\n self.score = 0\n self._set_score()", "def risk_score(self) -> Union[float, PaillierCiphertext]:\n if self._risk_score is None:\n raise AttributeError(\"risk score is undefined\")\n return self._risk_score" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the severity of this VulnerabilityVulnerability.
def severity(self, severity): self._severity = severity
[ "def set_severity(self, severity, operator):\n if severity not in VulnerabilityQuery.VALID_SEVERITY:\n raise ApiError(\"Invalid severity\")\n self._update_criteria(\"severity\", severity, operator)\n return self", "def severity(self, severity):\n\n if not severity:\n severity = \"info\"\n\n sev = [\"info\", \"normal\", \"warning\", \"error\", \"fatal\"]\n\n if severity in sev:\n self.__severity = severity\n else:\n raise SeverityError(f'\"{severity}\" not allowed as severity')", "def _set_severity(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'EMERGENCY': {}, 'ALERT': {}, 'CRITICAL': {}, 'ERROR': {}, 'WARNING': {}, 'NOTICE': {}, 'INFORMATIONAL': {}, 'DEBUG': {}},), is_leaf=True, yang_name=\"severity\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='syslog-severity', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"severity must be of a type compatible with syslog-severity\"\"\",\n 'defined-type': \"openconfig-access-points:syslog-severity\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'EMERGENCY': {}, 'ALERT': {}, 'CRITICAL': {}, 'ERROR': {}, 'WARNING': {}, 'NOTICE': {}, 'INFORMATIONAL': {}, 'DEBUG': {}},), is_leaf=True, yang_name=\"severity\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='syslog-severity', is_config=True)\"\"\",\n })\n\n self.__severity = t\n if hasattr(self, '_set'):\n self._set()", "def severity_filter(self, severity_filter):\n\n self._severity_filter = severity_filter", "def SetLogSeverity(self, log_level='info'):\n cond = self._SetLogSeverityFunc(self.getInstance(), _char_pt(log_level))\n self._check(cond, \"Failed to set log level\")", "def severity_in(self, severity_in):\n\n self._severity_in = severity_in", "def severity_gte(self, severity_gte):\n\n self._severity_gte = severity_gte", "def severity_gt(self, severity_gt):\n\n self._severity_gt = severity_gt", "def severity_lte(self, severity_lte):\n\n self._severity_lte = severity_lte", "def severity_lt(self, severity_lt):\n\n self._severity_lt = severity_lt", "def vulnerability_type(self, vulnerability_type):\n allowed_values = [\"Critical\", \"High\", \"Medium\", \"Low\"]\n if vulnerability_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `vulnerability_type` ({0}), must be one of {1}\"\n .format(vulnerability_type, allowed_values)\n )\n\n self._vulnerability_type = vulnerability_type", "def _set_alarm_severity(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'MINOR': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'UNKNOWN': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:UNKNOWN': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:CRITICAL': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'CRITICAL': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'WARNING': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'MAJOR': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:MAJOR': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:WARNING': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:MINOR': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}},), is_leaf=True, yang_name=\"alarm-severity\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform', defining_module='openconfig-platform', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"alarm_severity must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-platform:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'MINOR': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'UNKNOWN': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:UNKNOWN': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:CRITICAL': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'CRITICAL': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'WARNING': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'MAJOR': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:MAJOR': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:WARNING': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}, 'oc-alarm-types:MINOR': {'@namespace': 'http://openconfig.net/yang/alarms/types', '@module': 'openconfig-alarm-types'}},), is_leaf=True, yang_name=\"alarm-severity\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform', defining_module='openconfig-platform', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__alarm_severity = t\n if hasattr(self, '_set'):\n self._set()", "def severity_contains(self, severity_contains):\n\n self._severity_contains = severity_contains", "def monitor_set_verbosity(self, verbosity):\n if verbosity < 0:\n raise ValueError('Verbosity level can not be negative.')\n self.monitor_verbosity = verbosity", "def set_viability(self, viability):\n self.viable = viability", "def severity_not(self, severity_not):\n\n self._severity_not = severity_not", "def allowed(self, severity):\n return self.verbosity >= self.LEVELS[severity]", "def setVerbosity( verbosity_level):\n \n global verbosity\n verbosity = verbosity_level", "def severity_starts_with(self, severity_starts_with):\n\n self._severity_starts_with = severity_starts_with", "def setLevel(self, logLevel):\n\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the details of this VulnerabilityVulnerability.
def details(self, details): self._details = details
[ "def details(self, value):\n\t\tself._details = value", "def vat_details(self, vat_details):\n\n self._vat_details = vat_details", "def details(self, details: List[Details]):\n\n self._details = details", "def recruitment_details(self, recruitment_details):\n\n self._recruitment_details = recruitment_details", "def trade_details(self, trade_details):\n\n self._trade_details = trade_details", "def holder_detail(self, holder_detail):\n\n self._holder_detail = holder_detail", "def location_details(self, location_details):\n\n self._location_details = location_details", "def add_vulnerability(self, package_name, vuln_details=None):\n self.report[self.key_vulnerable][package_name] = vuln_details", "def pickup_details(self, pickup_details):\n\n self._pickup_details = pickup_details", "def tracking_number_details(self, tracking_number_details):\n\n self._tracking_number_details = tracking_number_details", "def set_viability(self, viability):\n self.viable = viability", "def set_info_details(self, value):\n self.gui.lbl_infodetails.setText(value)", "def transfer_detail(self, transfer_detail):\n\n self._transfer_detail = transfer_detail", "def ensureDetails(self):\n if not self.has_details:\n self.getDetails()", "def vulnerability_patch(self, vulnerability_patch):\n self._vulnerability_patch = vulnerability_patch", "def package_details(self, package_details):\n\n self._package_details = package_details", "def config_change_details(self, config_change_details):\n\n self._config_change_details = config_change_details", "def parse_vulnerability_details(self, element, entity):\n cve_id_list = self.parse_by_xpath_with_ns(element, True, True, 'CVE')\n if len(cve_id_list) == 1 and is_correct_cve_id(cve_id_list[0]):\n entity.cve_id = cve_id_list[0]\n else:\n return False\n\n summary_list = self.parse_by_xpath_with_ns(element, True, True, 'Notes',\n 'Note[@Title=\"Summary\"]')\n summary = concat_strings(summary_list, '')\n entity.summary = summary\n\n base_score_list = self.parse_by_xpath_with_ns(element, True, True, 'BaseScoreV3')\n base_score = ''\n for score in base_score_list:\n score = get_number_from_string(normalize_string(score))\n if is_correct_score(score):\n base_score = score\n break\n\n temporal_score_list = self.parse_by_xpath_with_ns(element, True, True, 'TemporalScoreV3')\n temp_score = ''\n for score in temporal_score_list:\n score = get_number_from_string(normalize_string(score))\n if is_correct_score(score):\n temp_score = score\n break\n\n if base_score != '' or temp_score != '':\n cvss_v3 = CvssV3(base_sc=base_score, temp_sc=temp_score)\n entity.cvss_v3 = cvss_v3\n\n vector_list = self.parse_by_xpath_with_ns(element, True, True, 'VectorV3')\n vector = ''\n for vector_item in vector_list:\n vector_item = normalize_string(vector_item)\n if is_correct_vector_v3(vector_item):\n vector = vector_item\n break\n entity.attack_vector = vector\n\n details_list = self.parse_by_xpath_with_ns(element, True, True, 'Notes', 'Note')\n details = ''\n for detail in details_list:\n details += normalize_string(detail)\n entity.details = details\n return True", "def windows_details(self, windows_details):\n\n self._windows_details = windows_details", "def setProductInfo(self, uid, **data):\n facade = self._getFacade()\n facade.setProductInfo(uid, **data)\n audit('UI.Device.Edit', uid, data_=data)\n return DirectResponse()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the cvss_v3 of this VulnerabilityVulnerability.
def cvss_v3(self, cvss_v3): self._cvss_v3 = cvss_v3
[ "def v3_data(self, v3_data):\n\n self._v3_data = v3_data", "def _3(self, _3):\n\n self.__3 = _3", "def box3(self, box3):\n\n self._box3 = box3", "def SetVariance(self, *args) -> \"void\":\n return _itkDiscreteGaussianDerivativeImageFilterPython.itkDiscreteGaussianDerivativeImageFilterIUC3IUC3_SetVariance(self, *args)", "def voltage_3_3(self):\n return self._voltage_3_3", "def setView3D( self ):\n\t\t(vup,vpn,vrp,d,b,du,f,C,R) = \\\n\t\t\tself.config('vup','vpn','vrp','d','b','basis','f','cols','rows')\n\t\t\n\t\tdv = du * R / C\n\t\tU = vup.cross(vpn)\n\t\tvup = vpn.cross(U) # vrc needs to be orthogonal\n\t\tvtm = Mtx()\n\t\tvtm.translate(-vrp[0],-vrp[1],-vrp[2])\n\t\t\n\t\tU.normalize()\n\t\tvup.normalize()\n\t\tvpn.normalize()\n\t\tvtm.rotateXYZ( U, vup,vpn )\n\t\t\n\t\tvtm.translate(0,0,d)\n\t\t\n\t\t# scale to cvv\n\t\tvrp = vtm.form_vector( vrp )\n\t\tb += d\n\n\t\tvtm.scale(2*d/(b*du),2*d/(b*dv),1/b)\n\t\tvtm.transform[3,2] /= b\n\t\tf = ( vrp[2] + f ) / b\n\t\t\t\t\n\t\td /= b\n\t\tvtm.perspective( d )\n\n\t\tvtm.scale2D( -C/(2*d), -R/(2*d) )\n\t\tvtm.translate2D( C/2, R/2 )\n\t\t\n\t\tself._camera['vtm'] = vtm\n\t\tself.config(vup=vup,vrp=vrp,b=b,f=f,d=d)\n\n\t\tvtm.camera = self # tricksy cyclical hack, done with care\n\t\treturn vtm", "def option3_ol(self, option3_ol):\n\n self._option3_ol = option3_ol", "def SetVariance(self, *args) -> \"void\":\n return _itkDiscreteGaussianDerivativeImageFilterPython.itkDiscreteGaussianDerivativeImageFilterIF3IF3_SetVariance(self, *args)", "def __init__(self, v3store):\n self._v3store = v3store", "def SetVariance(self, *args) -> \"void\":\n return _itkDiscreteGaussianDerivativeImageFilterPython.itkDiscreteGaussianDerivativeImageFilterIUS3IUS3_SetVariance(self, *args)", "def set_attr_3(self, value):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.object.attr3\", self._object._eco_id, value)\r\n p2e._app.Exec(arg_str)", "def code3(self, code3):\n\n self._code3 = code3", "def third_vec_vec(self, x, v):\n raise NotImplementedError('Third derivative oracle is not implemented.')", "def set_3dn(self, o3dn):\n self._set_3dn(o3dn)", "def swap(self, v: 'vectoritkImageCF3') -> \"void\":\n return _itkImagePython.vectoritkImageCF3_swap(self, v)", "def setValue(self, *args):\n return _coin.SoSFVec3f_setValue(self, *args)", "def setValue(self, *args) -> \"SbVec3f &\":\n return _coin.SbVec3f_setValue(self, *args)", "def setValues(self, *args):\n return _coin.SoMFVec3s_setValues(self, *args)", "def setValue(self, *args) -> \"void\":\n return _coin.SoMFVec3s_setValue(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the windows_details of this VulnerabilityVulnerability.
def windows_details(self, windows_details): self._windows_details = windows_details
[ "def __update_os_details(self):\n self.os_details['os_name'] = self.os_details.get('ProductName',\n 'Windows')\n self.os_details['distro'] = 'Windows'\n self.os_details['str_os_kernel_bld'] = self.os_details.get('BuildLab',\n \"\")\n self.os_details['installed_apps_list'] = \\\n self.os_details['installed_app'].split(',')", "def details(self, value):\n\t\tself._details = value", "def details(self, details: List[Details]):\n\n self._details = details", "def location_details(self, location_details):\n\n self._location_details = location_details", "def tracking_number_details(self, tracking_number_details):\n\n self._tracking_number_details = tracking_number_details", "def _save_system_info(self, system_info):\n super(Firewall, self)._save_system_info(system_info)\n self.multi_vsys = system_info['system']['multi-vsys'] == 'on'", "def recruitment_details(self, recruitment_details):\n\n self._recruitment_details = recruitment_details", "def vat_details(self, vat_details):\n\n self._vat_details = vat_details", "def config_change_details(self, config_change_details):\n\n self._config_change_details = config_change_details", "def trade_details(self, trade_details):\n\n self._trade_details = trade_details", "def windows_configuration(self) -> Optional[pulumi.Input['OrchestratedVirtualMachineScaleSetOsProfileWindowsConfigurationArgs']]:\n return pulumi.get(self, \"windows_configuration\")", "def ignore_details(self, ignore_details):\n\n self._ignore_details = ignore_details", "def platform_info(self, platform_info):\n\n self._platform_info = platform_info", "def platform_windows(mocker: MockerFixture) -> None:\n mocker.patch(\"platform.system\", return_value=\"Windows\")", "def save_case_details(self, case_details_tuple, file_name):\n\n\t\t(case_number, parties_involved,\n\t\tcase_filed_date, case_closed_date,\n\t\tpacer_case_id, additional_info_json) = case_details_tuple\n\t\tMETADATA = 1\n\t\tDEFAULT = 2\n\t\tpage_value_json = {}\n\n\t\t#Save details into the database\n\t\tcase_filed_date = case_filed_date.strip('\\n')\n\t\tcase_closed_date = case_filed_date.strip('\\n')\n\t\tcase_filed_date = case_filed_date if case_filed_date != '' else None\n\t\tcase_closed_date = case_closed_date if case_closed_date != '' else None\n\t\tif case_filed_date is not None:\n\t\t\tsplit_case_filed_date = case_filed_date.split('/')\n\t\t\tcase_filed_date = split_case_filed_date[2] + '/' + split_case_filed_date[0] + '/' + split_case_filed_date[1]\n\t\tif case_closed_date is not None:\n\t\t\tsplit_case_closed_date = case_closed_date.split('/')\n\t\t\tcase_closed_date = split_case_closed_date[2] + '/' + split_case_closed_date[0] + '/' + split_case_closed_date[1]\n\n\t\t#Check for the pacer_case_id\n\t\tself.connection_cursor.execute(\"\"\"SELECT pacer_case_id from courtcase\n\t\t\t\t\t\t\t\t\t\t WHERE pacer_case_id = %s\"\"\",\n\t\t\t\t\t\t\t\t\t\t (pacer_case_id,))\n\t\texisting_pacer_case_id = self.connection_cursor.fetchone()\n\n\t\tself.connection_cursor.execute(\"SELECT id from download_tracker ORDER BY id DESC LIMIT 1\")\n\t\tdownload_tracker_id = self.connection_cursor.fetchall()\n\t\tcourtcase_insert_query = \"\"\"INSERT INTO courtcase(download_tracker_id, courtcase_source_value, pacer_case_id, case_number,\n\t\t\t\t\t\t\t\t\t parties_involved, case_filed_date, case_closed_date)\n\t\t\t\t\t\t\t\t\t VALUES(%s, %s, %s, %s, %s, %s, %s)\"\"\"\n\t\tcourtcase_source_value = METADATA\n\t\tself.connection_cursor.execute(courtcase_insert_query,\n\t\t\t\t\t\t\t\t(download_tracker_id, courtcase_source_value, pacer_case_id,\n\t\t\t\t\t\t\t\tcase_number, parties_involved,\n\t\t\t\t\t\t\t\tcase_filed_date, case_closed_date,))\n\t\tself.database_connection.commit()\n\n\t\t#Save contents into the addional_info table\n\t\tself.connection_cursor.execute(\"SELECT id from courtcase ORDER BY id DESC LIMIT 1\")\n\t\tcourtcase_id = self.connection_cursor.fetchall()\n\t\tadditional_info_insert_query = \"\"\"INSERT INTO additional_info(courtcase_id, additional_info_json)\n\t\t\t\t\t\t\t\t\tVALUES(%s, %s)\"\"\"\n\t\tself.connection_cursor.execute(additional_info_insert_query, (courtcase_id, additional_info_json,))\n\t\tself.database_connection.commit()\n\n\t\t#Save into the courtcase_source_data_path table\n\t\tpage_value_json['CASE'] = '/home/mis/DjangoProject/cso_login/extractor/contents/case/' + file_name\n\t\tpage_value_json = json.dumps(page_value_json)\n\t\tself.connection_cursor.execute(\"SELECT id from courtcase ORDER BY id DESC LIMIT 1\")\n\t\tcourtcase_id = self.connection_cursor.fetchall()\n\t\tcourtcase_source_data_path_insert_query = \"\"\"INSERT INTO courtcase_source_data_path(courtcase_id, page_value_json)\n\t\t\t\t\t\t\t\t\tVALUES(%s, %s)\"\"\"\n\t\tself.connection_cursor.execute(courtcase_source_data_path_insert_query, (courtcase_id, page_value_json,))\n\t\tself.database_connection.commit()", "def legacy_nwinfo(self, *args, **kwargs):\n return False", "def add_browser_screen_width(self, width=None):\n\n self.__data_structure_util.add_to_dict(\n source_dict=self.__system_data_set,\n working_dict=self.__system_data_structure,\n new_key=self.__SYSTEM_DATA_KEY,\n new_dict={self.__data_sets.SYSTEM_BROWSER_SCREEN_WIDTH: width}\n )", "def advapi32_TraceSetInformation(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"SessionHandle\", \"InformationClass\", \"TraceInformation\", \"InformationLength\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_win2012r2_winrm(self):\n self.override_profile_config(\n \"ec2-win2012r2-test\",\n {\n \"userdata_file\": self.copy_file(\"windows-firewall.ps1\"),\n \"win_installer\": self.copy_file(self.installer),\n \"winrm_ssl_verify\": False,\n \"use_winrm\": True,\n },\n )\n self._test_instance(\"ec2-win2012r2-test\", debug=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the source_update_time of this VulnerabilityVulnerability.
def source_update_time(self, source_update_time): self._source_update_time = source_update_time
[ "def local_update_time(self, local_update_time):\n\n self._local_update_time = local_update_time", "def set_source_path(self, source_path):\n\n self.source_path = source_path", "def set_last_update_time(self, time):\n self.last_updated = time", "def update_venue_timestamps(\n self, venue: Venue, update_time: datetime.datetime = None\n ) -> None:\n LOG.debug(\n \"Setting check time for %s to %s and update time to %s\",\n venue,\n self.check_timestamp,\n update_time,\n )\n venue.tap_list_last_check_time = self.check_timestamp\n if update_time:\n venue.tap_list_last_update_time = update_time\n venue.save()", "def update_auth_source_on_device(self, source):\n params = dict(\n type=source\n )\n uri = 'https://{0}:{1}/mgmt/tm/auth/source/'.format(\n self.client.provider['server'],\n self.client.provider['server_port']\n )\n resp = self.client.api.patch(uri, json=params)\n try:\n response = resp.json()\n except ValueError as ex:\n raise F5ModuleError(str(ex))\n\n if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:\n return True\n raise F5ModuleError(resp.content)", "def update(source):", "def _update_source(self, params: ParamsMap):", "def local_update_time_lt(self, local_update_time_lt):\n\n self._local_update_time_lt = local_update_time_lt", "def local_update_time_lte(self, local_update_time_lte):\n\n self._local_update_time_lte = local_update_time_lte", "def set_timestamp(self, timeval):\n _ldns.ldns_pkt_set_timestamp(self, timeval)\n #parameters: ldns_pkt *,struct timeval,\n #retvals: ", "def __update_time(self) -> None:\n self._last_checked_time = time.time()", "def local_update_time_in(self, local_update_time_in):\n\n self._local_update_time_in = local_update_time_in", "def updated_at_lt(self, updated_at_lt):\n\n self._updated_at_lt = updated_at_lt", "def local_update_time_gt(self, local_update_time_gt):\n\n self._local_update_time_gt = local_update_time_gt", "def setTimeStamp(self, ts):\r\n \tself.timeStamp = ts", "def refresh_update_date(self):\n self.last_updated = datetime.datetime.now()", "def local_update_time_not(self, local_update_time_not):\n\n self._local_update_time_not = local_update_time_not", "def source_tags_count(self, source_tags_count):\n\n self._source_tags_count = source_tags_count", "def set_timestamp(self, timestamp):\n self.timestamp = LogEntry.normalize_timestamp(timestamp)", "def local_update_time_starts_with(self, local_update_time_starts_with):\n\n self._local_update_time_starts_with = local_update_time_starts_with" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads information from .uio file format with relations. Returns a nested list where sublists contain the follwing tokenlevel information.
def read_relations(path_to_file): relation_info = [] with open(path_to_file) as f: lines = f.readlines() for line in lines: if line.startswith("# sent_id = "): continue elif line == "\n": continue elif line.startswith("# text = "): sent = line[10:-1] else: line_elem = line.strip().split("\t") relation = line_elem[1] orig_tag, l_ix_orig, r_ix_orig = line_elem[2].split(",") orig_token = sent[int(l_ix_orig):int(r_ix_orig)] target_tag, l_ix_target, r_ix_target = line_elem[3].split(",") target_token = sent[int(l_ix_target):int(r_ix_target)] relation_info.append([relation, orig_tag, orig_token, target_tag, target_token]) return relation_info
[ "def read_level(opt: Config):\n # Multi-Input not implemented, but if we wanted to use it, we would need to sync the tokens\n\n # with World Files, we need the coords of our actual level\n if not opt.coords:\n # Default coords: Ruins\n opt.coords = ((1044, 1060), (64, 80), (1104, 1120)) # y, z, x\n\n level, uniques, props = read_level_from_file(opt.input_dir, opt.input_name, opt.coords,\n opt.block2repr, opt.repr_type)\n # Adjust token list depending on representation\n opt.token_list = uniques\n if opt.repr_type == \"autoencoder\":\n opt.token_list = torch.load('input/minecraft/simple_autoencoder_token_list.pt')\n if uniques != opt.token_list:\n raise AssertionError(\"Tokens were read in a different order than before\")\n\n opt.props = props # Properties need to be saved for later for rendering\n logger.info(\"Tokens in level {}\", opt.token_list)\n opt.nc_current = level.shape[1] # nc = number of channels\n return level", "def parse_file(self, level_file):\n # TODO: Add in the ability to have sensible whitespace\n levels = []\n current_level = []\n lines = self.open_and_read_file(level_file)\n\n for line in lines:\n\n # Ignore comment lines\n if self.is_comment(line):\n continue\n\n # Split up the string into a proper list\n line = [l for l in line]\n\n # If the line is real, we want it\n if line:\n current_level.append(line)\n\n # At blank line, finalize the current level and add\n # it to ``this.levels``\n else:\n if current_level:\n self.add_to_levels(levels, current_level)\n current_level = []\n\n # Don't forget trailing content if the file didn't end\n # with an empty newline\n if current_level:\n self.add_to_levels(levels, current_level)\n\n return levels", "def parse(self):\n lines = self.data.splitlines()\n level = 1\n bounds = []\n for i, x in enumerate(lines):\n if re.search(r'^\\*{' + str(level) + '} ', x):\n bounds.append(i)\n bounds.append(len(lines)) # To get the last heading and its content\n\n trees = []\n for i in range(len(bounds) - 1):\n trees.append(lines[bounds[i]:bounds[i+1]])\n\n for tree in trees:\n self.children.append(OrgNode('\\n'.join(tree), **self.properties))", "def read_hier_references(jams_file, annotation_id=0, exclude_levels=[]):\n hier_bounds = []\n hier_labels = []\n hier_levels = []\n jam = jams.load(jams_file)\n namespaces = [\"segment_salami_upper\", \"segment_salami_function\",\n \"segment_open\", \"segment_tut\", \"segment_salami_lower\"]\n\n # Remove levels if needed\n for exclude in exclude_levels:\n if exclude in namespaces:\n namespaces.remove(exclude)\n\n # Build hierarchy references\n for ns in namespaces:\n ann = jam.search(namespace=ns)\n if not ann:\n continue\n ref_inters, ref_labels = ann[annotation_id].to_interval_values()\n hier_bounds.append(utils.intervals_to_times(ref_inters))\n hier_labels.append(ref_labels)\n hier_levels.append(ns)\n\n return hier_bounds, hier_labels, hier_levels", "def generate(self):\n # File opening\n with open(self.file) as file:\n level_structure = []\n # We map the file line by line\n for line in file:\n level_line = []\n # We map all the line's sprites (letters)\n for sprite in line:\n # We ignore the end of line sprites\n if sprite != '\\n':\n # If not end of line we add the sprite to the line\n level_line.append(sprite)\n # We then add the line to the level list level_structure\n level_structure.append(level_line)\n # We save the level_structure\n self.structure = level_structure", "def import_ability_tree(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n def parse_line(line):\n name_list = re.findall(\"(?<=]).+(?=:)\", line)\n label_list = re.findall(\"\\[.+\\]\", line)\n level_list = re.findall(\"(?<=:)[\\d\\s-]+\", line)\n if any(len(l)!=1 for l in [name_list,label_list,level_list]):\n raise Exception(\"Could not parse the following line:\\n\"+line)\n return name_list[0].strip(), label_list[0].strip(), level_list[0].strip()\n def indent_level(line):\n initial_spaces_list = re.findall(\"^[ ]+\", line)\n if not initial_spaces_list:\n return 0\n assert(len(initial_spaces_list) == 1)\n n = len(initial_spaces_list[0])\n if n%2==1:\n raise Exception(\"Encountered indentation issue while parsing the following line:\\n\"+line)\n return n/2\n def node_from_lines(lines):\n assert(lines)\n i = indent_level(lines[0])\n assert(all(indent_level(line)>i for line in lines[1:]))\n name, labels_str, level = parse_line(lines[0])\n try:\n labels = eval(labels_str)\n assert(len(labels)==2)\n except:\n raise Exception(\"Could not parse the labels list as a list with two numbers in the following line:\\n\"+lines[0])\n try:\n level_int = int(level)\n except ValueError:\n raise Exception(\"Could not parse ability level from the following line:\\n\"+lines[0])\n node = Node(name=name, level=level_int, labels=labels)\n child_lines = []\n for m in range(1,len(lines)):\n child_lines.append(lines[m])\n if m==len(lines)-1 or indent_level(lines[m+1])==i+1:\n node.add_child(node_from_lines(child_lines))\n child_lines=[]\n assert(not child_lines)\n return node\n return node_from_lines(lines)", "def read_graph(self):\n\n with open(self.filename,'r') as file:\n G = []\n for line in file:\n l = [] \n for number in line.split():\n # nodes are numbered from 1 to n\n # substract 1 to get a 0 to n-1 range\n l.append(int(number)-1)\n G.append(l)\n return G", "def read_puml_file(filename: str) -> List[Line]:\n with open(filename, 'r') as f:\n content = f.read()\n\n return [Line(filename, i + 1, x, x) for i, x in enumerate(content.split('\\n'))]", "def load_file(self):\n\n verbose('Reading file', self.filename)\n with open(os.path.join('assets', 'levels', self.filename)) as level_file:\n # prefers that to readlines() as there is not trailing \"\\n\"\n rows = level_file.read().splitlines()\n\n num = 0\n lev = []\n current = []\n title = None\n\n for r in rows:\n if r == '':\n # end of level\n if current != []:\n lev.append((title, current))\n current = []\n title = None\n continue\n\n if r[0] == ';':\n continue\n\n if r.startswith('Title: '):\n title = r[7:]\n\n # check if this is a valid line:\n if not valid_soko_line(r):\n continue\n\n current.append(r) # row belongs to level\n\n self.level_lines = lev", "def readData():\n\n\ttempList=[]\n\n\tfin = open(\"map\",\"r\")\n\tmapping={}\n\tfor line in fin:\n\t\tline=line.rstrip()\n\t\ttempList=line.split(\" \")\n\t\tmapping[int(tempList[1])]=int(tempList[0])\n\tfin.close()\n\n\tfin = open(\"fieldProfile\",\"r\")\n\n\ti=0\n\tfor line in fin:\n\t\tline=line.rstrip()\n\t\ttempList=line.split(\" \")\n\t\tfields[tempList[0]]=i\n\t\ti+=1\n\tfin.close()\n\n\tfin = open(\"graph\",\"r\")\n\tfor line in fin:\n\t\tline=line.rstrip()\n\t\ttempList=line.split(\" \")\n\t\tif int(tempList[0]) not in edges:\n\t\t\tedges[int(tempList[0])]=set()\n\t\tedges[int(tempList[0])].add(int(tempList[1]))\n\n\t\tif int(tempList[1]) not in edges:\n\t\t\tedges[int(tempList[1])]=set()\n\t\tedges[int(tempList[1])].add(int(tempList[0]))\n\n\t\t# if int(tempList[1])<int(tempList[0]):\n\t\t# \tprint \"Error\"\n\tfin.close()\n\n\n\tfin = open(\"belongingnessVectorDict\",\"r\")\n\n\ttempDict={}\n\tfor line in fin:\n\t\tline=line.rstrip()\n\t\ttempList=line.split(\" \",1)\n\n\t\tif int(tempList[0]) not in mapping:\n\t\t\tcontinue\n\t\tauthId=mapping[int(tempList[0])]\n\t\tbelongingnessVectorDict[authId]=[]\n\t\tfor f in fields:\n\t\t\tbelongingnessVectorDict[authId].append(0.0)\n\t\ttempDict=eval(tempList[1])\n\t\tfor f in tempDict:\n\t\t\t(belongingnessVectorDict[authId])[fields[f]]=tempDict[f]\n\n\tfin.close()", "def ReadObjectHierarchy(ifile):\n\n Objects[:] = []\n ObjectLevels[:] = []\n\n if not os.path.isfile(ifile):\n logging.debug('no *-hierarchy.tx')\n return\n\n INPUT = open(ifile, 'r', encoding='utf-8')\n\n # Only emit objects if they are supposed to be documented, or if\n # they have documented children. To implement this, we maintain a\n # stack of pending objects which will be emitted if a documented\n # child turns up.\n pending_objects = []\n pending_levels = []\n root = None\n tree = []\n for line in INPUT:\n m1 = re.search(r'\\S+', line)\n if not m1:\n continue\n\n gobject = m1.group(0)\n level = len(line[:m1.start()]) // 2 + 1\n\n if level == 1:\n root = gobject\n\n while pending_levels and pending_levels[-1] >= level:\n pending_objects.pop()\n pending_levels.pop()\n\n pending_objects.append(gobject)\n pending_levels.append(level)\n\n if gobject in KnownSymbols:\n while len(pending_levels) > 0:\n gobject = pending_objects.pop(0)\n level = pending_levels.pop(0)\n xref = MakeXRef(gobject)\n\n tree.append(' ' * (level * 4) + xref)\n Objects.append(gobject)\n ObjectLevels.append(level)\n ObjectRoots[gobject] = root\n # else\n # common.LogWarning(ifile, line_number, \"unknown type %s\" % object)\n #\n\n INPUT.close()\n\n # FIXME: use xml\n # my $old_tree_index = \"$DB_OUTPUT_DIR/tree_index.$xml\"\n old_tree_index = os.path.join(DB_OUTPUT_DIR, \"tree_index.sgml\")\n new_tree_index = os.path.join(DB_OUTPUT_DIR, \"tree_index.new\")\n\n logging.debug('got %d entries for hierarchy', len(tree))\n\n with open(new_tree_index, 'w', encoding='utf-8') as out:\n out.write(MakeDocHeader(\"screen\"))\n out.write(\"\\n<screen>\\n\")\n out.write(AddTreeLineArt(tree))\n out.write(\"\\n</screen>\\n\")\n\n common.UpdateFileIfChanged(old_tree_index, new_tree_index, 0)\n\n OutputObjectList()", "def LoadLevels():\n tree = ElementTree.parse(STANDARD_LEVELS_FILENAME)\n root = tree.getroot()\n levels = []\n \n for levelElement in root.findall('level'):\n levels.append(LoadLevel(levelElement))\n return levels", "def sokoban_load_levels(filename):\n level_list = []\n board = []\n with open(filename) as level_file: # open file\n y_axis = 0 # current y_axis we are on.\n for line in level_file: # for every line in file\n if line == \"\\n\": # if the current line is a \"new line\", we reached a new level. Save current level to level list and reset the temp lists.\n level_list.append(board)\n board = []\n y_axis = 0 # reset y_axis\n objects_in_line = list(line) #convert the current line in to a list\n for x, obj in enumerate(objects_in_line): # go through each object in the list\n if obj not in \"\\n \": #if the object is not a new line, add it to correct temp list\n add_to_objectlist(obj, x, y_axis,board)\n\n y_axis += 1 #increment y_axis because we are moving down a line in the file\n return level_list", "def conllu2list():\n data_file_1 = os.path.join('data','corpora','UD_Swedish-Talbanken','sv_talbanken-ud-train.conllu')\n data_file_2 = os.path.join('data','corpora','UD_Swedish-Talbanken','sv_talbanken-ud-test.conllu')\n data_file_3 = os.path.join('data','corpora','UD_Swedish-Talbanken','sv_talbanken-ud-dev.conllu')\n sentences = []\n corpus = []\n \n # Read conllu files\n with open(data_file_1, 'r', encoding='utf8') as f:\n data = f.read()\n sentences.extend(parse(data))\n with open(data_file_2, 'r', encoding='utf8') as f:\n data = f.read()\n sentences.extend(parse(data))\n with open(data_file_3, 'r', encoding='utf8') as f:\n data = f.read()\n sentences.extend(parse(data))\n \n # Extract tokens and POS tags\n for sentence in sentences:\n sent = []\n for token in sentence:\n sent.append((token['form'], token['upostag']))\n corpus.append(sent)\n \n # Save the corpus\n with open(os.path.join('data','corpora','UD_Swedish-Talbanken.pkl'), 'wb') as f:\n pickle.dump(corpus, f, 4)", "def parse_info(self):\n msg(\"parsing u.info\")\n lines = file('/'.join((self.datadir,\"u.info\"))).read().split(\"\\n\")\n # users\n pair = lines[0].split()\n self.user_num = int(pair[0])\n \n # items\n pair = lines[1].split()\n self.item_num = int(pair[0])\n\n # ratings\n pair = lines[2].split()\n self.rating_num = int(pair[0])", "def _read_structure_attributes(f):\n\n line = ''\n variogram_info = {}\n while \"end structure\" not in line:\n line = f.readline()\n if line == '':\n raise Exception(\"EOF while reading structure\")\n line = line.strip().lower().split()\n if line[0].startswith('#'):\n continue\n if line[0] == \"nugget\":\n nugget = float(line[1])\n elif line[0] == \"transform\":\n transform = line[1]\n elif line[0] == \"numvariogram\":\n numvariograms = int(line[1])\n elif line[0] == \"variogram\":\n variogram_info[line[1]] = float(line[2])\n elif line[0] == \"end\":\n break\n elif line[0] == \"mean\":\n warning.warn(\"'mean' attribute not supported, skipping\",PyemuWarningF)\n else:\n raise Exception(\"unrecognized line in structure definition:{0}\".\\\n format(line[0]))\n assert numvariograms == len(variogram_info)\n return nugget,transform,variogram_info", "def get_levels(self):\n filename = 'levels.json'\n with open(filename, 'r') as read_file:\n data = json.load(read_file)\n return data", "def structure_representation(self):\n lines = []\n for token in self.tokens:\n head = token.head.id if token.head is not None else 0\n lemma = token.lemma if token.lemma is not None else '_'\n line = '{token.id}\\t{token.text}\\t{lemma}\\t{token.pos}\\t_\\t_\\t' \\\n '{head}\\t{token.dependency_relation}' \\\n '' \\\n ''\n line = line.format(token=token, lemma=lemma, head=head)\n lines.append(line)\n\n return '\\n'.join(lines)", "def read_data(path): \n print(\"Model will be tested on the following files {:}...\".format(path))\n\n list_l_tokens = []\n list_l_labels = []\n\n # Read file content\n with open(path, 'r', encoding='utf-8') as f:\n f_content_1 = f.readlines()\n for line in f_content_1:\n \n # All sentences start with the '#' token \n if line[0] == \"#\":\n \n list_l_tokens.append(line[1:].split())\n \n # Labels for each line = number of words in the line\n line_length = len(line[1:].split())\n\n\n list_labels = []\n list_tokens = []\n \n # Line for each label starts with a digit \n # and has the following form '23\tMediterranean\tLOC' \n # Add all labels for sentence to list of labels\n elif line[0].isdigit():\n num = int(line.split()[0])\n \n token = line.split()[1]\n label = line.split()[-1]\n\n list_labels.append(label)\n\n if num == (line_length-1):\n list_l_labels.append(list_labels)\n\n return list_l_tokens, list_l_labels" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if get_reply_delay() returns the value previously set with set_reply_delay().
def test(device): old_value = device.get_reply_delay() assert type(old_value) is int result = device.set_reply_delay(old_value + 100) assert result is None result = device.get_reply_delay() assert type(result) is int assert result == old_value + 100 # restore old value device.set_reply_delay(old_value)
[ "def delay(self, delay):\n delay = int(delay)\n if self.__handler.delay != int(delay):\n self.__handler.delay = delay\n debug('ReplyServer.delay: set to %d ms', delay)", "def trigger_checkDELAY(self):\n self.open.write('TRIGGER:DELAY?')\n reply = self.open.read() \n return('Trigger Delay: ' + str(reply))", "def is_delayed(self):\n return self.message_type == WisepillMessage.DELAYED_EVENT", "def delayedTextEnabled(self):\n return self._dtimer is not None", "def test_tweet_is_reply_nonreply(self):\n msg = {\n 'id_str': '12345',\n 'in_reply_to_status_id_str': None,\n 'text': '@fakeuser This is a reply.',\n 'user': {},\n }\n self.assertEqual(False, self.messagetools.tweet_is_reply(msg))", "def test_disable_if_replied(self):\n job = self.create_job(only_if_noreply=True, state='checking')\n with mock_instance('sndlatr.gmail.Mailman') as mailman:\n mailman.get_thread.return_value = [\n create_thread_mail(message_id='reply_id',\n from_name='Sender',\n from_email='x@y.com')]\n job.disable_if_replied('token')\n self.assertEqual(job.state, 'disabled')\n self.assertIsNotNone(job.disabled_reply)\n self.assertEqual(job.disabled_reply.message_id, 'reply_id')\n self.assertEqual(job.disabled_reply.from_name, 'Sender')\n self.assertEqual(job.disabled_reply.from_email, 'x@y.com')", "def test_tweet_is_reply(self):\n msg = {\n 'id_str': '12345',\n 'in_reply_to_status_id_str': '12344',\n 'text': '@fakeuser This is a reply.',\n 'user': {},\n }\n self.assertEqual(True, self.messagetools.tweet_is_reply(msg))", "def wait_for_response(self, timeout, msg_type=None):\n start = time.time()\n while (time.time() - start) < timeout and not self.has_msg_type(msg_type):\n time.sleep(self.wait_delay)\n return self.has_msg_type(msg_type)", "def delay_phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"delay_phone\")", "def _get_default_delay(self):\n\n return self._default_delay", "def test_14_get_reply_pr_ready(self):\n self.fake_sfile.set_reply_buf('300 PR Ready\\r\\nprl1\\nprl2\\nprl3\\n.\\r\\n')\n self.assertEquals(self.conn._get_reply(False), ['prl1', 'prl2', 'prl3'])", "def is_default_not_only_reply_option(self, task: Dict) -> bool:\n is_default_not_only_reply_option_res: bool = True\n\n reply_options = set(\n map(str.upper, task.get(\"message\", {}).get(\"replyOptions\", []))\n )\n\n if len(reply_options) == 1 and \"#default#\".upper() in reply_options:\n error_message, error_code = Errors.playbook_only_default_reply_option(\n task.get(\"id\")\n )\n if self.handle_error(error_message, error_code, file_path=self.file_path):\n self.is_valid = is_default_not_only_reply_option_res = False\n\n return is_default_not_only_reply_option_res", "def static_check_sent_message_response() -> None:\n # Reply and non-reply sets should not overlap: This check should be static\n overlap = set(NO_REPLY_EXPECTED).intersection(set(VALID_REPLY_MESSAGE_MAP.keys()))\n if len(overlap) != 0:\n raise AssertionError(f\"Overlapping NO_REPLY_EXPECTED and VALID_REPLY_MESSAGE_MAP values: {overlap}\")", "def is_returned(self):\n\t\treturn self.return_time is not None", "def get_notification_delay(self) -> int:\n return self._settings[NOTIFICATION_DELAY_KEY].get_value()", "def queuing_delay(self):\n return self._queuing_delay", "def process_timeouts(self) -> bool:\n if self.timeout is not None:\n return self._update_timeouts()\n return False", "def get_rc_delay(self):\n pass", "def _valid_reply(self, header, sender, dest_replicas):\n return self.replies_manager.expects_seq_num(header.req_seq_num) and sender in dest_replicas" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function creates new temporary pdf file with same content, assigns given password to pdf and rename it with original file.
def set_password(input_file, user_pass): # temporary output file with name same as input file but prepended # by "temp_", inside same direcory as input file. owner_pass=user_pass path, filename = os.path.split(input_file) output_file = os.path.join(path, "temp_" + filename) output = PdfFileWriter() input_stream = PdfFileReader(open(input_file, "rb")) for i in range(0, input_stream.getNumPages()): output.addPage(input_stream.getPage(i)) outputStream = open(output_file, "wb") # Set user and owner password to pdf file output.encrypt(user_pass, owner_pass, use_128bit=True) output.write(outputStream) outputStream.close() try: os.unlink(input_file) except (OSError, IOError): _logger.error('Error when trying to remove file %s' % input_file) # Rename temporary output file with original filename, this # will automatically delete temporary fileư #os.rename(output_file, input_file) return output_file
[ "def set_password(input_file, user_pass, owner_pass):\n # temporary output file with name same as input file but prepended\n # by \"temp_\", inside same direcory as input file.\n path, filename = os.path.split(input_file)\n output_file = os.path.join(path, \"temp_\" + filename)\n\n output = PyPDF2.PdfFileWriter()\n\n input_stream = PyPDF2.PdfFileReader(open(input_file, \"rb\"))\n\n for i in range(0, input_stream.getNumPages()):\n output.addPage(input_stream.getPage(i))\n\n outputStream = open(output_file, \"wb\")\n\n # Set user and owner password to pdf file\n output.encrypt(user_pass, owner_pass, use_128bit=True)\n output.write(outputStream)\n outputStream.close()\n\n # Rename temporary output file with original filename, this\n # will automatically delete temporary file\n os.rename(output_file, input_file)", "def protect_pdf(update, password):\n if pdf_to_protect:\n output_dir = pdf_to_protect.split('.pdf')[0]\n love_protect(pdf_to_protect, password, output_dir)\n protected_file = result_file(output_dir)\n if protected_file:\n update.effective_message.chat.send_action(\n ChatAction.UPLOAD_DOCUMENT)\n update.effective_message.reply_document(\n document=open(f\"{output_dir}/{protected_file}\", \"rb\"),\n caption=\"✨ Here is your protected PDF file\",\n )\n else:\n usr_msg(update)\n bye(update)\n del_tmp()\n return ConversationHandler.END", "def rotate_pdf(update, angle):\n if pdf_to_rotate:\n output_dir = pdf_to_rotate.split('.pdf')[0]\n love_rotate(pdf_to_rotate, output_dir, int(angle))\n rotated_file = result_file(output_dir)\n if rotated_file:\n update.effective_message.chat.send_action(\n ChatAction.UPLOAD_DOCUMENT)\n update.effective_message.reply_document(\n document=open(f\"{output_dir}/{rotated_file}\", \"rb\"),\n caption=f\"✨ Here is your {angle} rotated PDF file\",\n )\n else:\n usr_msg(update)\n bye(update)\n del_tmp()\n return ConversationHandler.END", "def check_pdf_protect(update, context):\n doc = update.message.document\n if file_ok(update=update, usr_file=doc):\n file_id = doc.file_id\n file_path = f\"./tmp/{file_id}\"\n os.mkdir(file_path)\n usr_file = context.bot.getFile(file_id)\n usr_file.download(f\"{file_path}.pdf\")\n global pdf_to_protect\n pdf_to_protect = f\"{file_path}.pdf\"\n msg = \"Send me the password to protect the PDF file, please 🔑\"\n return ask_file(update, msg, WAIT_PROTECT)", "def _create_temp_password_file(user, password, filename):\n\n with open(filename) as f:\n file_data = f.readlines()\n stat_info = os.stat(filename)\n tmpfile = '%s.tmp.%d' % (filename, os.getpid())\n\n # We have to use os.open() so that we can create the file with\n # the appropriate modes. If we create it and set modes later,\n # there's a small point of time where a non-root user could\n # potentially open the file and wait for data to be written.\n fd = os.open(tmpfile,\n os.O_CREAT | os.O_TRUNC | os.O_WRONLY,\n stat_info.st_mode)\n f = None\n success = False\n try:\n os.chown(tmpfile, stat_info.st_uid, stat_info.st_gid)\n f = os.fdopen(fd, 'w')\n for line in file_data:\n if line.startswith('#'):\n f.write(line)\n continue\n try:\n (s_user, s_password, s_rest) = line.split(':', 2)\n except ValueError as exc:\n f.write(line)\n continue\n if s_user != user:\n f.write(line)\n continue\n if s_password.startswith('$'):\n # Format is '$ID$SALT$HASH' where ID defines the\n # ecnryption type. We'll re-use that, and make a salt\n # that's the same size as the old\n salt_data = s_password[1:].split('$')\n salt = '$%s$%s$' % (salt_data[0],\n _make_salt(len(salt_data[1])))\n else:\n # Default to MD5 as a minimum level of compatibility\n salt = '$1$%s$' % _make_salt(8)\n enc_pass = crypt.crypt(password, salt)\n f.write(\"%s:%s:%s\" % (s_user, enc_pass, s_rest))\n f.close()\n f = None\n success = True\n except Exception as exc:\n logging.error(\"Couldn't create temporary password file: %s\" % str(e))\n raise\n finally:\n if not success:\n # Close the file if it's open\n if f:\n try:\n os.unlink(tmpfile)\n except Exception as exc:\n pass\n # Make sure to unlink the tmpfile\n try:\n os.unlink(tmpfile)\n except Exception as exc:\n pass\n\n return tmpfile", "def makePdf(self):\n name = tkSimpleDialog.askstring('Input','Enter the desired name for the PDF, without suffix')\n if name is not None:\n proxymaker.writeData(self.cardmodel, self.safeHome+\"/\"+name+\".pdf\")", "def make_pdf_filename(paper, pdfcontent=None):\n if paper.title in [\"\", None]:\n if pdfcontent:\n paper.title = make_hash(pdfcontent)\n else:\n paper.title = make_random_string()\n\n pdf_filename = \"{}.pdf\".format(paper.title)\n\n # don't create directories\n pdf_filename = pdf_filename.replace(\"/\", \"_\")\n\n return pdf_filename", "def check_pdf_rotate(update, context):\n doc = update.message.document\n if file_ok(update=update, usr_file=doc):\n file_id = doc.file_id\n file_path = f\"./tmp/{file_id}\"\n os.mkdir(file_path)\n usr_file = context.bot.getFile(file_id)\n usr_file.download(f\"{file_path}.pdf\")\n global pdf_to_rotate\n pdf_to_rotate = f\"{file_path}.pdf\"\n msg = \"Send me the rotation angle you want, please ↩️. \" \\\n f\"*Allowed angles are: {' ,'.join(allowed_rot)}.*\"\n return ask_file(update, msg, WAIT_ANGLE)", "def copy_pdf(pdf_name: str) -> None:\n problem_folder = Paths().get_problem_dir()\n output_folder = Paths().get_output_dir()\n info_log(\"Copying problem PDF file.\")\n pdf_name += '.pdf'\n verify_path(os.path.join(problem_folder, pdf_name))\n shutil.copy2(os.path.join(problem_folder, pdf_name),\n os.path.join(output_folder, pdf_name))", "def unlock_pdf(update, context):\n\n doc = update.message.document\n if file_ok(update=update, usr_file=doc):\n msg = \"please wait a moment while I unlock it for you...\"\n usr_msg(update=update, msg=msg, error=False)\n file_id = doc.file_id\n usr_file = context.bot.getFile(file_id)\n file_path = f\"./tmp/{file_id}\"\n usr_file.download(f\"{file_path}.pdf\")\n # a file_id folder is created to know where the file is\n os.mkdir(file_path)\n # unlock the file\n love_unlock(f\"{file_path}.pdf\", file_path)\n # get the path of the compressed file\n unlocked_file = result_file(file_path)\n if unlocked_file:\n update.effective_message.chat.send_action(\n ChatAction.UPLOAD_DOCUMENT)\n update.effective_message.reply_document(\n document=open(f\"{file_path}/{unlocked_file}\", \"rb\"),\n caption=\"✨ Here is your unlocked file\",\n )\n else:\n usr_msg(update)\n bye(update)\n del_tmp()\n return ConversationHandler.END", "def creaPdf(ruta):\n\tglobal pdf\n\texiste = path.exists(ruta)\t\n\tif not existe:\n\t\tprint \"La ruta no existe, se guardara en el directorio actual\"\n\t\truta = \".\"\n\tarchivo = ruta +\"/Report-\" + time.strftime(\"%d%m%y-%H%M%S\")+\".pdf\"\t\n\tpdf = PDFDocument(archivo)\n\tpdf.init_report()\n\tprint \"Guardando en \" + archivo", "def pdf_for_users(output_fname, usernames, year, comp_date_str,\n link, private_key_file=None):\n\n merged_fd, merged_ps_file = tempfile.mkstemp()\n generate_and_merge_ps(merged_ps_file, usernames, year, comp_date_str,\n link, private_key_file)\n ps2pdf(merged_ps_file, output_fname)\n os.close(merged_fd)\n os.remove(merged_ps_file)", "def render_pdf(template, context, pwd=None):\n\n # from organization.apps import get_company_config\n # context['company_logo'] = get_company_config('logo')\n # context['company_name'] = get_company_config('name', 'Add Company Name in Configuration')\n\n outfile = BytesIO()\n pdf = pisa.CreatePDF(template.render(context), outfile, link_callback=resolve_links)\n\n if pdf.err:\n outfile = StringIO('Error generating PDF:<br />\\n<pre>%s</pre>' % pdf.err)\n elif pwd:\n # If `pwd` was specified, use it to encrypt the PDF:\n wr, rdr = PdfFileWriter(), PdfFileReader(outfile)\n for page in rdr.pages:\n wr.addPage(page)\n wr.encrypt(pwd, use_128bit=True)\n outfile = StringIO()\n wr.write(outfile)\n return outfile.getvalue()", "def _make_password_file(password):\n try:\n fd, path = tempfile.mkstemp()\n os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)\n with os.fdopen(fd, 'w') as f:\n f.write(password)\n yield path\n utils.delete_if_exists(path)\n except Exception as exc:\n with excutils.save_and_reraise_exception():\n utils.delete_if_exists(path)", "def _create_temp_file(self, content=None):\n\n file = tempfile.NamedTemporaryFile(mode='w+b', delete=False)\n if content is not None:\n if isinstance(content, str):\n content = content.encode(self.encoding)\n file.write(content)\n file.close()\n return file.name", "def clean_temp_files():\r\n delete_files('tmp/decrypted_pdf')\r\n delete_files('tmp/txt')\r\n return 1", "def compress_pdf(update, context):\n\n doc = update.message.document\n if file_ok(update=update, usr_file=doc):\n usr_msg(update=update,\n msg=\"please wait a moment while I compress it for you...\",\n error=False)\n file_id = doc.file_id\n usr_file = context.bot.getFile(file_id)\n file_path = f\"./tmp/{file_id}\"\n usr_file.download(f\"{file_path}.pdf\")\n # a file_id folder is created to know where the file is\n os.mkdir(file_path)\n # compress the file\n love_compress(file_path)\n # get the path of the compressed file\n compressed_file = result_file(file_path)\n if compressed_file:\n update.effective_message.chat.send_action(\n ChatAction.UPLOAD_DOCUMENT)\n update.effective_message.reply_document(\n document=open(f\"{file_path}/{compressed_file}\", \"rb\"),\n caption=\"✨ Here is your compressed file\",\n )\n else:\n usr_msg(update)\n bye(update)\n else:\n compress(update, context)\n del_tmp()\n return ConversationHandler.END", "def _store_temp_files(self):\n self.cert_temp = tempfile.NamedTemporaryFile(delete=False)\n self.cert_temp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, self.p12.get_certificate()))\n self.cert_temp.flush()\n\n self.pkey_temp = tempfile.NamedTemporaryFile(delete=False)\n self.pkey_temp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, self.p12.get_privatekey()))\n self.pkey_temp.flush()", "def _write_new_temp(self, d):\n if d:\n file_utils.safe_create_dir(d)\n ext = MIMETYPES.guess_extension(self.content_type() or '')\n # Exceptions because mimetypes is apparently REALLY OLD\n if ext in {'.jpe', '.jfif'}:\n ext = '.jpg'\n fd, fp = tempfile.mkstemp(\n suffix=ext or '',\n dir=d\n )\n os.close(fd)\n with open(fp, 'wb') as f:\n f.write(self.get_bytes())\n return fp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute wkhtmltopdf as a subprocess in order to convert html given in input into a pdf document.
def _run_wkhtmltopdf_enscript(self, headers, footers, bodies, landscape, paperformat, spec_paperformat_args=None, save_in_attachment=None, set_viewport_size=False, password=False ): if not password: return self._run_wkhtmltopdf(headers, footers, bodies, landscape, paperformat, spec_paperformat_args=spec_paperformat_args, save_in_attachment=save_in_attachment, set_viewport_size=set_viewport_size ) if not save_in_attachment: save_in_attachment = {} command_args = [] if set_viewport_size: command_args.extend(['--viewport-size', landscape and '1024x1280' or '1280x1024']) # Passing the cookie to wkhtmltopdf in order to resolve internal links. try: if request: command_args.extend(['--cookie', 'session_id', request.session.sid]) except AttributeError: pass # Wkhtmltopdf arguments command_args.extend(['--quiet']) # Less verbose error messages if paperformat: # Convert the paperformat record into arguments command_args.extend(self._build_wkhtmltopdf_args(paperformat, spec_paperformat_args)) # Force the landscape orientation if necessary if landscape and '--orientation' in command_args: command_args_copy = list(command_args) for index, elem in enumerate(command_args_copy): if elem == '--orientation': del command_args[index] del command_args[index] command_args.extend(['--orientation', 'landscape']) elif landscape and '--orientation' not in command_args: command_args.extend(['--orientation', 'landscape']) # Execute WKhtmltopdf pdfdocuments = [] temporary_files = [] for index, reporthtml in enumerate(bodies): local_command_args = [] pdfreport_fd, pdfreport_path = tempfile.mkstemp(suffix='.pdf', prefix='report.tmp.') temporary_files.append(pdfreport_path) # Directly load the document if we already have it if save_in_attachment and save_in_attachment['loaded_documents'].get(reporthtml[0]): with closing(os.fdopen(pdfreport_fd, 'w')) as pdfreport: pdfreport.write(save_in_attachment['loaded_documents'][reporthtml[0]]) pdfdocuments.append(pdfreport_path) continue else: os.close(pdfreport_fd) # Wkhtmltopdf handles header/footer as separate pages. Create them if necessary. if headers: head_file_fd, head_file_path = tempfile.mkstemp(suffix='.html', prefix='report.header.tmp.') temporary_files.append(head_file_path) with closing(os.fdopen(head_file_fd, 'w')) as head_file: head_file.write(headers[index]) local_command_args.extend(['--header-html', head_file_path]) if footers: foot_file_fd, foot_file_path = tempfile.mkstemp(suffix='.html', prefix='report.footer.tmp.') temporary_files.append(foot_file_path) with closing(os.fdopen(foot_file_fd, 'w')) as foot_file: foot_file.write(footers[index]) local_command_args.extend(['--footer-html', foot_file_path]) # Body stuff content_file_fd, content_file_path = tempfile.mkstemp(suffix='.html', prefix='report.body.tmp.') temporary_files.append(content_file_path) with closing(os.fdopen(content_file_fd, 'w')) as content_file: content_file.write(reporthtml[1]) try: wkhtmltopdf = [_get_wkhtmltopdf_bin()] + command_args + local_command_args wkhtmltopdf += [content_file_path] + [pdfreport_path] process = subprocess.Popen(wkhtmltopdf, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() if process.returncode not in [0, 1]: raise UserError(_('Wkhtmltopdf failed (error code: %s). ' 'Message: %s') % (str(process.returncode), err)) # Save the pdf in attachment if marked if reporthtml[0] is not False and save_in_attachment.get(reporthtml[0]): with open(pdfreport_path, 'rb') as pdfreport: attachment = { 'name': save_in_attachment.get(reporthtml[0]), 'datas': base64.encodestring(pdfreport.read()), 'datas_fname': save_in_attachment.get(reporthtml[0]), 'res_model': save_in_attachment.get('model'), 'res_id': reporthtml[0], } try: self.env['ir.attachment'].create(attachment) except AccessError: _logger.info("Cannot save PDF report %r as attachment", attachment['name']) else: _logger.info('The PDF document %s is now saved in the database', attachment['name']) pdfdocuments.append(pdfreport_path) except: raise # Return the entire document if len(pdfdocuments) == 1: entire_report_path = pdfdocuments[0] else: entire_report_path = self._merge_pdf(pdfdocuments) temporary_files.append(entire_report_path) #enscript. entire_report_path = set_password(entire_report_path, password ) with open(entire_report_path, 'rb') as pdfdocument: content = pdfdocument.read() # Manual cleanup of the temporary files for temporary_file in temporary_files: try: os.unlink(temporary_file) except (OSError, IOError): _logger.error('Error when trying to remove file %s' % temporary_file) return content
[ "def xhtml2pdf(xhtml_file, files, temp_dir, print_style, pdfgen, output_pdf, verbose=False):\n\n CSS_FILE = os.path.join(BASE_PATH, 'css', '%s.css' % print_style)\n\n # Run Prince (or an Opensource) to generate an abstract tree 1st\n strCmd = [pdfgen, '-v', '--style=%s' % CSS_FILE, '--output=%s' % output_pdf, xhtml_file]\n if verbose:\n print >> sys.stderr, \"Executing PDF generation: \" + ' '.join(strCmd)\n\n env = { }\n\n # run the program with subprocess and pipe the input and output to variables\n p = subprocess.Popen(strCmd, close_fds=True, env=env)\n # set STDIN and STDOUT and wait untill the program finishes\n _, stdErr = p.communicate()\n\n return stdErr", "def application(request):\n if request.method != 'POST':\n return\n\n request_is_json = request.content_type.endswith('json')\n\n source_files = []\n # source_file = tempfile.NamedTemporaryFile(suffix='.html')\n \n payload = json.loads(request.data)\n\n pages = payload['contents']\n\n for page in pages:\n ptf = tempfile.NamedTemporaryFile(suffix='.html', delete=False)\n ptf.write(page.decode('base64'))\n ptf.flush()\n source_files.append(ptf.name)\n\n # source_file.write(payload['contents'].decode('base64'))\n options = payload.get('options', {})\n\n if \"header-html\" in options:\n htf = tempfile.NamedTemporaryFile(suffix='.html')\n htf.write(options['header-html'].decode('base64'))\n htf.flush()\n options['header-html'] = htf.name\n\n if \"footer-html\" in options:\n ftf = tempfile.NamedTemporaryFile(suffix='.html')\n ftf.write(options['footer-html'].decode('base64'))\n ftf.flush()\n options['footer-html'] = ftf.name\n\n # source_file.flush()\n\n # Evaluate argument to run with subprocess\n args = ['wkhtmltopdf']\n\n # Add Global Options\n options['load-error-handling'] = 'ignore'\n if options:\n for option, value in options.items():\n args.append('--%s' % option)\n if value:\n args.append('\"%s\"' % value)\n\n # Add source file name and output file name\n # file_name = source_file.name\n file_name = source_files[0]\n args += source_files + [file_name + \".pdf\"]\n\n # Execute the command using executor\n execute(' '.join(args))\n\n return Response(\n wrap_file(request.environ, open(file_name + '.pdf')),\n mimetype='application/pdf',\n )", "def convert_to_pdf(htmlfolder: str, filenames: List[str], outfolder: str = \"./pdf/\", cmd: str = \"wkhtmltopdf\") -> None:\n def _convert_file_parallel(filename: str):\n infile = htmlfolder + filename.replace(\".Rmd\", \".html\")\n outfile = outfolder + filename.replace(\".Rmd\", \".pdf\")\n # Return if provided file path does not exist (also ignores symlinks)\n if not os.path.exists(infile): return\n os.system(\n \"xvfb-run --auto-servernum --server-args='-screen 0, 1920x1080x24' {} --use-xserver --javascript-delay 4000 ./{} ./{}\"\n .format(cmd, infile, outfile)\n )\n\n def _convert_file(filename: str):\n infile = htmlfolder + filename.replace(\".Rmd\", \".html\")\n outfile = outfolder + filename.replace(\".Rmd\", \".pdf\")\n # Return if provided file path does not exist (also ignores symlinks)\n if not os.path.exists(infile): return\n os.system(\"{} --javascript-delay 4000 ./{} ./{}\".format(cmd, infile, outfile))\n\n pool = ThreadPool(cli_args.jobs)\n Log.info(\"Converting {} files to PDF\", len(filenames))\n\n # Use xvfb-run if installed only on Linux, to convert files concurrently\n if which(\"xvfb-run\") and sys.platform.startswith(\"linux\"):\n Log.info(\"Detected xfvb-run. Using {} threads\", cli_args.jobs)\n try:\n pool.map(_convert_file_parallel, filenames)\n except KeyboardInterrupt:\n Log.error(\"Terminating prematurely\")\n Log.info(\"Finishing pending conversions\")\n # Wait for all conversions to finish\n pool.terminate()\n pool.join()\n sys.exit(1)\n return\n pool.close()\n pool.join()\n else:\n for fn in filenames:\n _convert_file(fn)\n\n Log.success(\"Finished converting files to PDF\")", "def test_html_to_pdf_file():\n\n test_content = \"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>A demo html page</title>\n </head>\n <body>\n <p>Hello world!</p>\n </body>\n </html>\n \"\"\"\n\n # GIVEN an HTML report to be converted to PDF:\n bytes_file = html_to_pdf_file(test_content, \"landscape\", 300)\n assert isinstance(bytes_file, BytesIO)", "def convert_html_to_pdf(html_file,pdf_file):\n options = {\n 'page-size': 'A4',\n 'margin-top': '0.1in',\n 'margin-right': '0.1in',\n 'margin-bottom': '0.1in',\n 'margin-left': '0.1in',\n 'encoding': \"UTF-8\",\n 'no-outline': None\n }\n pdfkit.from_file(html_file,pdf_file,options)\n BuiltIn().log(\"Converted `%s` to `%s`\" % (html_file,pdf_file))", "def generate_pdf(self, comm_path, report_xml, header, footer, html_list, webkit_header=False):\n if not webkit_header:\n webkit_header = report_xml.webkit_header\n tmp_dir = tempfile.gettempdir()\n out_filename = tempfile.mktemp(suffix=\".pdf\", prefix=\"webkit.tmp.\")\n file_to_del = [out_filename]\n if comm_path:\n command = [comm_path]\n else:\n command = ['wkhtmltopdf']\n\n command.append('--quiet')\n # default to UTF-8 encoding. Use <meta charset=\"latin-1\"> to override.\n command.extend(['--encoding', 'utf-8'])\n if header :\n head_file = file( os.path.join(\n tmp_dir,\n str(time.time()) + '.head.html'\n ),\n 'w'\n )\n head_file.write(header.encode('utf-8'))\n head_file.close()\n file_to_del.append(head_file.name)\n command.extend(['--header-html', head_file.name])\n if footer :\n foot_file = file( os.path.join(\n tmp_dir,\n str(time.time()) + '.foot.html'\n ),\n 'w'\n )\n foot_file.write(footer.encode('utf-8'))\n foot_file.close()\n file_to_del.append(foot_file.name)\n command.extend(['--footer-html', foot_file.name])\n\n if webkit_header.margin_top :\n command.extend(['--margin-top', str(webkit_header.margin_top).replace(',', '.')])\n if webkit_header.margin_bottom :\n command.extend(['--margin-bottom', str(webkit_header.margin_bottom).replace(',', '.')])\n if webkit_header.margin_left :\n command.extend(['--margin-left', str(webkit_header.margin_left).replace(',', '.')])\n if webkit_header.margin_right :\n command.extend(['--margin-right', str(webkit_header.margin_right).replace(',', '.')])\n if webkit_header.orientation :\n command.extend(['--orientation', str(webkit_header.orientation).replace(',', '.')])\n if webkit_header.format :\n command.extend(['--page-size', str(webkit_header.format).replace(',', '.')])\n count = 0\n for html in html_list :\n html_file = file(os.path.join(tmp_dir, str(time.time()) + str(count) +'.body.html'), 'w')\n count += 1\n html_file.write(html.encode('utf-8'))\n html_file.close()\n file_to_del.append(html_file.name)\n command.append(html_file.name)\n command.append(out_filename)\n stderr_fd, stderr_path = tempfile.mkstemp(text=True)\n file_to_del.append(stderr_path)\n try:\n status = subprocess.call(command, stderr=stderr_fd)\n os.close(stderr_fd) # ensure flush before reading\n stderr_fd = None # avoid closing again in finally block\n fobj = open(stderr_path, 'r')\n error_message = fobj.read()\n fobj.close()\n if not error_message:\n error_message = _('No diagnosis message was provided')\n else:\n error_message = _('The following diagnosis message was provided:\\n') + error_message\n if status :\n raise except_osv(_('Webkit error' ),\n _(\"The command 'wkhtmltopdf' failed with error code = %s. Message: %s\") % (status, error_message))\n pdf_file = open(out_filename, 'rb')\n pdf = pdf_file.read()\n pdf_file.close()\n finally:\n if stderr_fd is not None:\n os.close(stderr_fd)\n for f_to_del in file_to_del:\n try:\n os.unlink(f_to_del)\n except (OSError, IOError), exc:\n _logger.error('cannot remove file %s: %s', f_to_del, exc)\n return pdf", "def html_to_pdf():\n\thtml = flask.request.form.get('html', '')\n\ttitle = 'html2pdf'\n\n\ttry:\n\t\tpdf_data = phantom.html_to_pdf(html)\n\texcept OSError as e:\n\t\t# Reraise the error so flask can log it\n\t\traise e\n\n\tif pdf_data:\n\t\tresponse = flask.make_response(pdf_data)\n\t\tresponse.headers['Cache-Control'] = 'no-cache'\n\t\tresponse.headers['Content-Type'] = 'application/pdf'\n\t\tresponse.headers['Content-Disposition'] = \"attachment;filename=%s.pdf\"%title\n\t\treturn response\n\n\telse:\n\t\tflask.abort(400)", "def render_pdf_cover(input_md_path, output_pdf_path):\n\n call([\"pandoc\", \"--from\", \"markdown\", \"--output\", output_pdf_path, input_md_path], cwd=tempfile.gettempdir())", "def markdown_to_docx(path):\n\n \"\"\"pandoc titanic_transform.html -s -o titanic.docx\"\"\"\n\n \"\"\"titanic_transform.html -f markdown -t html | pandoc -f html -t docx -o titanic.docx\"\"\"\n args = ['pandoc', path, '-s', '-o', './jupyter/titanic.docx']\n child = subprocess.call(args)", "def txt_to_pdf(self):\n #path = '%s/%s.pdf' % (os.path.dirname(self.filepath), self.document)\n path = os.path.join(os.path.dirname(self.filepath), self.document) + '.pdf'\n p = Popen('a2ps --quiet --portrait --columns=1 --rows=1 -L 100 --no-header --borders=off -o - %s | ps2pdf -sPAPERSIZE=a4 - %s' % (self.filepath, path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n content = open(path, 'rb').read()\n p = Popen('rm -rf %s' % path, shell=True,stdout=PIPE, stderr=PIPE)\n return ['application/pdf', content]", "def latexpdf(argv):\r\n\t\tOPTIONS[\"TARGET\"] = \"latex\"\r\n\t\tlatex(argv)\r\n\t\tshow.info(\"Running LaTeX files through pdflatex...\")\r\n\t\tsubprocess.check_call((\"make -C %(TARGETDIR)s all-pdf\" % OPTIONS).split())\r\n\t\tshow.info(\"pdflatex finished; the PDF files are in %(TARGETDIR)s.\", OPTIONS)", "def pdf_to_pages(pdf_path):\n cmd = ['java', '-jar', PDF_BOX, 'ExtractText',\n '-html', '-console', pdf_path]\n retcode, stdout, stderr = run_command(cmd, raise_on_error=False)\n ok = retcode == 0\n if not ok:\n print('FAILURE: retcode=%d stderr=<%s>' % (retcode, stderr))\n return ok, '', []\n text = stdout.decode('utf-8')\n sep = '<div style=\"page-break-before:always; page-break-after:always\">'\n return ok, text, text.split(sep)[1:]", "def generate_output(output, out = \".dvi\"):\n print 'hi', output\n # Standard tex inputs required for compiling .tex file\n filename = os.path.join(\"c:\",\"output\")\n tex = \".tex\"; pdf = \".pdf\"; dvi = \".dvi\"; ps = \".ps\"\n begin = [\"\\documentclass[12pt]{article}\\n\",\n \"\\usepackage{amsmath,url}\\n\",\n \"\\\\begin{document}\\n\",\n \"\\section{Cross-Section}\\n\\n\"]\n end = [\"\\end{document}\"]\n \n pieces = []\n # Crappy method to find out the type of the input, and then LaTeXify it\n if not isinstance(output, str):\n \n # Input is a list. Break it up and try to LaTeXify each piece\n if isinstance(output, list):\n try:\n print 'list'\n for i in range(len(output)):\n pieces.append(sp.latex(output[i]))\n except: e\n # Input is probably just a sympy expression\n else:\n try:\n output = sp.latex(output)+\"\\n\"\n except: \n e\n print e\n \n # Input is a string\n else: output = output+\"\\n\\n\"\n\n # If the input was a list, join all the pieces into one string with 2 spaces between them. \n if pieces != []:\n output = '\\n\\n'.join(pieces)\n # If the LaTeXifed input has any commas in it, split the expression at those commas and put some blank lines in between\n else:\n if output.find(',') > 0:\n output = '\\n'.join(output.split(','))\n\n print output\n # Create file and write to it\n FILE = open(filename+tex, \"w\")\n FILE.writelines(begin)\n FILE.writelines(output)\n FILE.writelines(end)\n FILE.close()\n\n if 1:\n # Create commands\n compile = [\"latex\",filename+tex]\n disdvi = [\"yap\", filename+dvi]\n \n # Process commands\n a = sub.Popen(compile,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n a.communicate()\n a.wait()\n \n # BROKEN\n if out == \"pdf\":\n tops = [\"dvips\", filename+dvi]\n topdf = [\"ps2pdf\", filename+ps]\n dispdf = [\"C:/Program Files/Adobe/Reader 9.0/Reader/AcroRd32\", filename+pdf]\n c = sub.check_call(tops)\n # c = sub.Popen(tops,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n # c.communicate\n # c.wait()\n d = sub.Popen(topdf,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n d.communicate\n d.wait()\n e = sub.Popen(dispdf,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n e.communicate\n else:\n b = sub.Popen(disdvi,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n b.communicate()", "def testSimple(\n data=\"\"\"Hello <b>World</b><br/><img src=\"img/test.jpg\"/>\"\"\",\n dest=\"test.pdf\"):\n\n pdf = pisa.CreatePDF(\n cStringIO.StringIO(data),\n file(dest, \"wb\")\n )\n\n if pdf.err:\n dumpErrors(pdf)\n else:\n pisa.startViewer(dest)", "def generate_pdf_from_markdown(pdf_filepath, markdown_filepath,developer_mode):\n dir_name = os.path.dirname(pdf_filepath)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n \n latex_config_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'latex_configuration')\n latex_code_sections_config_path = os.path.join(latex_config_dir, 'code_sections.tex')\n\n pandoc_options = [\"--template\", os.path.join(latex_config_dir, 'template.tex'), \"--latex-engine=xelatex\", \n \"--toc\", \"--toc-depth=3\", \"--listings\", \"-H\", latex_code_sections_config_path, \n \"--from\", MD2PDF_INNER_FORMAT, \"--filter\", \"md2pdf_pandoc_filter\", \"--number-sections\",\n \"-V\", 'papersize:\"letterpaper\"', \"-V\", 'fontsize:\"10pt\"', \"-V\", 'styfolder:{}'.format(latex_config_dir)]\n\n # If developer mode is on, convert temporal file to LaTeX.\n if developer_mode == True:\n latex_filepath = os.path.join(tempfile.gettempdir(),'markdown_to_pdf_temp.tex')\n print('Generating LaTeX (developer mode) ...')\n call([\"pandoc\"] + pandoc_options + [\"--output\", latex_filepath, markdown_filepath])\n print('LaTeX generated: [%s] (developer mode)' % latex_filepath)\n\n # Generate PDF.\n print('Generating PDF...')\n pandoc_call_return_value = call([\"pandoc\"] + pandoc_options + [\"--output\", pdf_filepath, markdown_filepath])\n\n if pandoc_call_return_value != 0:\n raise RuntimeError(\n ( \n 'Conversion to PDF failed - ' +\\\n 'Pandoc failed with code: (%d)'\n ) % pandoc_call_return_value\n )\n\n print('Generating PDF...OK')", "def RunPandoc(content, extra=[]):\n\n TempFileTeX = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8')\n TempFileMD = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8')\n\n print(\"Writing temporary LaTeX file\")\n TempFileTeX.write(content)\n \n TeXContents = TempFileTeX.read()\n\n print(\"Running Pandoc\")\n subprocess.call(['pandoc', '-f', 'latex', '-t', 'markdown', TempFileTeX.name, '-o', TempFileMD.name] + extra)\n\n print(\"Reading temporary output file\")\n OutputData = TempFileMD.read()\n\n TempFileTeX.close()\n TempFileMD.close()\n\n return OutputData", "def latex2pdf(tex_in, run_dir=os.getcwd(), system=\"xelatex\"):\n\n pre, ext = os.path.splitext(os.path.basename(tex_in))\n log_out = os.path.join(run_dir, pre+\"_yaml2latex.log\")\n subprocess.run([system, pre], cwd=run_dir, stdout=open(log_out, 'wb'))\n\n return os.path.join(run_dir, pre+\".pdf\"), log_out", "def convert_pdf_to_xml(path):\n cmd = ['pdftohtml', '-xml', '-f', '1', '-l', '1',\n '-i', '-q', '-nodrm', '-hidden', '-stdout', path]\n # https://stackoverflow.com/questions/15374211/why-does-popen-communicate-return-bhi-n-instead-of-hi\n xml_string = subprocess.check_output(\n cmd, stderr=open(os.devnull, 'w'), universal_newlines=True)\n soup = BeautifulSoup(xml_string, 'xml')\n text = replaceAll(str(soup))\n return parse_xml(StringIO(remove_control_chars(text)))\n\n # return parse_xml(StringIO(xml_string))\n # return parse_xml(StringIO(remove_control_chars(xml_string)))", "def render(*html):\n\n if not PDF_URL:\n raise Exception('PDF_SERVICE_URL environment variable is not set.')\n\n if len(html) > 1:\n data = json.dumps(html)\n response = requests.post('{}/multiple'.format(PDF_URL), data=json.dumps(html), stream=True)\n elif html:\n response = requests.post('{}/pdf'.format(PDF_URL), data=html[0].encode(\"utf-8\"), stream=True)\n else:\n raise Exception('No HTML input provided')\n\n response.raise_for_status()\n return response.content" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves all games with their length.
def get_all_games(self): games = [] rows = self.session.query(Game, func.count(Action.id)).outerjoin( Action, and_(Game.id == Action.game_id, Action.code == ActionCodes.TURN)).group_by( Game.id).order_by(Game.id).all() for row in rows: game_data, game_length = row game = { 'idx': game_data.id, 'name': game_data.name, 'date': game_data.date.strftime(TIME_FORMAT), 'map': game_data.map_name, 'length': game_length, } games.append(game) return games
[ "def get(self):\n return list(Game.query.all()), 200", "def get_game_list(self):\n game_list = self.dal.get_games()\n return make_response(True, data=game_list)", "def get_all_games():\n return league.GameLog().overall()['GAME_ID'].unique()", "def get(self):\n return {'status': 'success', 'count': Games.query.count()}, 200", "def get_game_list():\n pass", "def games(self):\r\n\t\t# Do not send request on closed connection\r\n\t\tif not self.conn:\r\n\t\t\traise ValueError('operation on closed connection')\r\n\t\t# Send request and return response\r\n\t\ttry:\r\n\t\t\tself.conn.request('GAMES')\r\n\t\t\tresponse = self.conn.getresponse()\r\n\t\t\treturn response\r\n\t\texcept (NimException, ValueError) as e:\r\n\t\t\traise NimException(e.message)", "def get_all_game_squares(self):\n return GameSquare.objects.filter(game=self)", "def get_games():\n entries = feedparser.parse(FEED_URL)['entries']\n games = []\n\n for i in range(len(entries)):\n games.append(Game(\n title=entries[i]['title'],\n link=entries[i]['links'][0]['href']\n ))\n return games", "def games(self) -> Sequence[Game]:\n return list(self.store.games)", "def list_games(state, count=100, start=0):\n games = {}\n request_url = (str(API_ENDPOINT) +\n str('/games?count=') +\n str(count)+str('&start=') +\n str(start)+str('&state=') +\n str(state))\n\n response = requests.get(request_url)\n if response.status_code != 200:\n print(\"Fehler GET list_games\")\n print(response.content)\n return games\n\n games = response.json()\n\n print(\"games: \" + str(games))\n return games", "def all_games(self, limit=-1):\n self.query_empty = 0\n self.query_sequence = 0\n\n while self.query_empty < 5 and limit - self.query_sequence != 0:\n query = f'7001{self.query_sequence:08}'\n response = self.index.search(query).get('hits', [])\n self.query_sequence += 1\n\n if len(response) > 0:\n self.query_empty = 0\n for each in response:\n art = each.get('horizontalHeaderImage', None)\n price = each.get('lowestPrice')\n sale = True if each.get('salePrice') else False\n discount = round((\n 1 - float(price) / float(each.get('msrp'))\n ) * 100) if sale else 0\n\n self.games_list.append({\n 'nid': each['nsuid'],\n 'title': each['title'],\n 'desc': each['description'],\n 'url': f\"{NINTENDO_URL}{each['url']}\",\n 'img': f\"{art}\" if art else None,\n 'sale': sale,\n 'discount': discount,\n 'prices': {\n 'US': float(price) if price else None\n },\n })\n else:\n self.query_empty += 1", "def get_number_of_games(self):\n global games\n global waiting_games\n return len(games), len(waiting_games)", "def _get_top_games(self):\n _top_games = dict()\n for entry in self._client.games.get_top():\n _top_games[int(entry['game']['id'])] = entry['game']['name']\n logging.debug('>> Found the following games: ' + ', '.join(_top_games.values()))\n return _top_games", "def getVersions(cls, game):\n return cls._games[game]", "def num_games(self: \"BaseGamesRecommender\") -> int:\n return len(self.known_games)", "def by_game_state(cls, game_state, limit=10):\n games = (\n cls.query()\n .filter(cls.game_state == game_state)\n .order(-cls.last_update)\n .fetch(limit)\n )\n return games", "async def get_gameswithgold(self, ctx):\n url = f\"https://reco-public.rec.mp.microsoft.com/channels/Reco/V8.0/Lists/Collection/GamesWithGold?ItemTypes=Game&Market=US&deviceFamily=Windows.Xbox\"\n async with self.session.post(url=url) as res:\n async with ctx.typing():\n games_raw = await res.json(content_type=None)\n game_ids = []\n for game in games_raw[\"Items\"]:\n game_ids.append(game[\"Id\"])\n if len(game_ids) == 0:\n return await ctx.send(\"No games found!\")\n async with aiohttp.ClientSession() as session:\n xbl_client = await self.auth_manager(ctx, session)\n if not xbl_client:\n return\n game_data = json.loads((await xbl_client.catalog.get_products(game_ids)).json())\n products = game_data[\"products\"]\n pages = gwg_embeds(products)\n return await menu(ctx, pages, DEFAULT_CONTROLS)", "def get_games():\n rss = feedparser.parse(FEED_URL)\n list_ans = []\n for value in rss['entries']:\n list_ans.append(Game(value['title'], value['link']))\n return list_ans", "def games(self):\n games = []\n for season in self.seasons:\n games += season.games\n return games" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Closes and commits session.
def close(self): self.session.commit() self.session.close()
[ "async def __aexit__(self, *exc):\n await self.session.close()\n self.db_session.commit()", "def commit(self):\n SessionMemoryStore.sessions[self.token] = self.session", "def commit(self, session):\n session.commit()", "def session_commit(self, session):\n # this may happen when there's nothing to commit\n if not hasattr(session, 'meepo_unique_id'):\n self.logger.debug(\"skipped - session_commit\")\n return\n\n self._session_pub(session)\n self._session_del(session)", "def end_session(self):\n self.sess.close()", "def commit(self):\n self._check_closed()\n self._trans_id = self.__session.send_commit()", "def shutdown(self):\n if (self.session!=None):\n self.session.commit()\n self.session.close()\n self.session=None\n if (self.engine!=None):\n self.engine.dispose()\n self.engine=None\n if (self.connection!=None):\n self.connection.close()\n self.connection=None", "def _close_session(cls):\n cls.coord.request_stop()\n cls.coord.join(cls.thread)\n cls.sess.close()", "def commit(self):\n try:\n self.session.commit()\n except Exception: # pragma: no cover\n self.session.rollback()\n raise", "def commit_close_connection(self, connection):\n connection.commit()\n connection.close()", "def _safe_close(self, sess: session.Session):\n # pylint: disable=broad-except\n try:\n sess.close()\n except Exception:\n # Intentionally not logging to avoid user complaints that\n # they get cryptic errors. We really do not care that Close\n # fails.\n pass\n # pylint: enable=broad-except", "def session_scope():\n session = get_session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "async def close(self) -> None:\n await super().close()\n await self.eval_session.close()", "def session_closed(self):\n pass", "def test_end_session(self):\n iiq = insightiq_api.InsightiqApi(username='pat', password='a')\n iiq.end_session()\n\n self.fake_session.get.assert_called()\n self.fake_session.close.assert_called()", "def close_sessions(context):\n context.close_sessions()\n context.debug(\"CloseSessions\", \"All session are closed\")", "def test_db_session_ctx_close(fake_import: model.Import):\n with db.session_ctx() as session:\n session.add(fake_import)\n session.commit()\n session.close()\n session.close()\n\n # also, there will inevitably be a test that does a query to the db\n # after the application has closed the session, so make sure this works too\n # (it does work, because a new session-transaction is immediately created\n # on session close, see https://bit.ly/33S307r )\n all_imports = db.get_session().query(model.Import).all()\n assert len(all_imports) == 1", "async def close(self) -> None:\n await super().close()\n\n if self.http_session:\n await self.http_session.close()\n\n if self._connector:\n await self._connector.close()\n\n if self._resolver:\n await self._resolver.close()", "def save(self):\n self.session.add(self)\n self.commit_session()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses Client args from the arg string.
def parse_args(): parser = argparse.ArgumentParser(description='Parse Client args.') parser.add_argument('-p', '--port', type=int, default=8080, help='Set the port to talk to') parser.add_argument('-m', '--message', type=str, help='Message to send') return parser.parse_args()
[ "def parse(self, cli_args=str(_sys.argv)[1:-1]):\n if cli_args is not _sys.argv:\n cli_args = cli_args.split()\n for i in range(len(cli_args)):\n cli_args[i] = cli_args[i].split('=')\n cli_args = sum(cli_args, [])\n\n self._ensure_required(cli_args)\n self._ensure_exclusive(cli_args)\n self._ensure_and_assign_values(cli_args)", "def _parse_args(self, cmd_name, cmd_str):\n\n args = []\n scanner = Scanner(cmd_str)\n\n for kind in getattr(self, cmd_name)._sig:\n if kind is str:\n args.append(scanner.next())\n elif kind is Ellipsis:\n args.append(scanner.nextLine())\n\n return args", "def parse(argString, keys):\n keys = keys.keys\n\n args = []\n for a in argString.split():\n optional = False\n mat = re.search(r\"\\[([^]]+)\\]\", a)\n if mat:\n optional = True\n a = mat.group(1)\n\n needsValue = False\n mat = re.search(r\"<([^>]+)>\", a)\n if mat:\n needsValue = True\n a = mat.group(1)\n\n alternates = False\n mat = re.search(r\"@?\\(([^)]+)\\)\", a)\n if mat:\n alternates = True\n a = mat.group(1)\n\n args.append(Cmd(keys, a, optional, needsValue, alternates))\n\n return args", "def parse_cli():\n\n argv = sys.argv[:]\n argv.pop(0)\n options = {\"cmd\": argv.pop(0)}\n\n index = 0\n while index < len(argv):\n arg = argv[index]\n\n # check out options\n if \"-\" == arg[0]:\n k = re.sub(\"^-+\", \"\", argv.pop(index))\n\n # check out parameter\n if index < len(argv) and argv[index][0] != \"-\":\n options[k] = argv.pop(index)\n else:\n options[k] = True\n else:\n index += 1\n\n options[\"args\"] = argv\n return options", "def parse_message(self, message):\n try:\n args = shlex.split(message.content[1:].split('\\n', 1)[0])\n name = args.pop(0).lower()\n except (ValueError, IndexError):\n raise CommandError('invalid command format')\n return name, args", "def parseArgs (args):\n result = {}\n \n for arg in args:\n try:\n (var, val) = string.split (arg, '=', 1)\n except:\n raise (SyntaxError, '%s is in the wrond format' % (arg))\n \n if (var[:2] != '--'):\n raise (SyntaxError, 'variable names must start with a ' +\n 'double dash (%s)' % (var))\n \n result[var[2:]] = val\n return (result)", "def parse_args_string(val: str) -> TypeInspectionsArgs:\n out = {}\n\n for chunk in val.split(';'):\n args = {}\n\n alias, _, argstr = chunk.strip().partition(':')\n argstr = argstr.strip()\n\n for arg in argstr.split(','):\n name, _, val = arg.partition('=')\n val = val.strip()\n\n if val:\n args[name.strip()] = val\n\n if args:\n out[alias.strip()] = args\n\n return out", "def from_arguments(cls, argstring):\n\n obj = object.__new__(cls)\n obj.parse(argstring)\n return obj", "def test_string_argument_parsing():\n arguments = [\n {\n \"name\": \"firstname\",\n \"type\": \"str\",\n \"default\": \"Allysa P. Hacker\",\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([\"--firstname\", \"john\"])\n assert values.firstname == \"john\"", "def parse_command_line_args():\n if len(sys.argv) < 2:\n sys.stderr.write(\"Usage : python {} MMDDYY_HHMMSS_cust_id (for example 051317_000001_000015)\\n\"\n .format(sys.argv[0]))\n raise SystemExit(1) \n mmddyy, hhmmss, raw_cust_id = sys.argv[1].split('_')\n our_run_date = datetime.strptime('{} {}'.format(mmddyy, hhmmss), '%m%d%y %H%M%S')\n param_cust_id = raw_cust_id.lstrip('0')\n return our_run_date, param_cust_id", "def parse_cli():\n description = \"example: ./usb-watch.py [-d] [-p <pid file>] [-s]\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"-p\",\n \"--pid_file\",\n help=\"Location of PID file\",\n default=PID_FILE,\n required=False)\n parser.add_argument(\"-d\",\n \"--daemonize\",\n help=\"Daemonize/fork to background\",\n action=\"store_true\")\n parser.add_argument(\"-s\",\n \"--sms\",\n help=\"Disable SMS messaging\",\n action=\"store_false\")\n\n args = parser.parse_args()\n return args", "def makeargrest(ievent):\n\n if not ievent.txt:\n return\n\n try:\n ievent.args = ievent.txt.split()[1:]\n except ValueError:\n ievent.args = [] \n\n try:\n cmnd, ievent.rest = ievent.txt.split(' ', 1)\n except ValueError: \n ievent.rest = \"\" \n\n ievent.usercmnd = ievent.txt.split()[0]", "def parseCommandLine(cls): \n win32serviceutil.HandleCommandLine(cls)", "def _parseClientUNIX(**kwargs):\n try:\n kwargs['checkPID'] = bool(int(kwargs.pop('lockfile')))\n except KeyError:\n pass\n try:\n kwargs['timeout'] = int(kwargs['timeout'])\n except KeyError:\n pass\n return kwargs", "def _parse_args(self):\n self._args = self.msg.strip().split()\n\n try:\n command_uc = self.args.pop(0)\n self._command = command_uc.lower()\n except IndexError:\n return\n\n # e.g. \"!command>user arg1 arg2\"\n if \">\" in self.command:\n command_uc, self._reply_nick = command_uc.split(\">\", 1)\n self._command = command_uc.lower()\n\n if self.command.startswith(\"!\") or self.command.startswith(\".\"):\n # e.g. \"!command arg1 arg2\"\n self._is_command = True\n self._trigger = self.command[0]\n self._command = self.command[1:] # Strip the \"!\" or \".\"\n elif re.match(r\"{0}\\W*?$\".format(re.escape(self.my_nick)),\n self.command, re.U):\n # e.g. \"EarwigBot, command arg1 arg2\"\n self._is_command = True\n self._trigger = self.my_nick\n try:\n self._command = self.args.pop(0).lower()\n except IndexError:\n self._command = \"\"\n else:\n try:\n if self.msg[-1] == \".\" and self.msg[-2] != \".\":\n if self.args:\n self.args[-1] = self.args[-1][:-1]\n else:\n self._command = self.command[:-1]\n except IndexError:\n pass\n\n # e.g. \"!command >user arg1 arg2\"\n if self.args and self.args[0].startswith(\">\"):\n self._reply_nick = self.args.pop(0)[1:]", "def parse_estimator_cli_args(\n clsname: str,\n hyperparams: Sequence[str],\n) -> Tuple[Type, Dict[str, Any]]:\n return cast(Type, locate(clsname)), smepu.argparse.to_kwargs(hyperparams)", "def test_arg() -> None:\n parser = arg_parser()\n parsed = parser.parse_args(\n [\"--test\", \"test_name\", \"-n\", \"52\", \"--tool\", \"cwltool\", \"-j\", \"4\"]\n )\n assert parsed.test == \"test_name\"\n assert parsed.n == \"52\"\n assert parsed.tool == \"cwltool\"\n assert parsed.j == 4", "def _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"experiment_config\",\n type=str,\n help='experiment json file (\\'{\"dataset\":\"EmnistDataset\"}\\'',\n )\n args = parser.parse_args()\n return args", "def _parse_args(self):\n self._verify(self.args + list(self.kwargs))\n\n self.name = self.args[0]\n self.nodes = self.args[1:1+self.num_nodes]\n self.value = self._parse_values(self.args[1+self.num_nodes:])\n self.kwargs = self._parse_pairs(self.kwargs)\n # for key, value in self.kwargs.items():\n # setattr(self, key, value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear all logging configs for the `mltk` package.
def clear_logging() -> None: logger = logging.getLogger('mltk') logger.propagate = True logger.setLevel(logging.NOTSET) logger.handlers.clear()
[ "def clear_loggers():\n import logging\n\n loggers = [logging.getLogger()] + list(logging.Logger.manager.loggerDict.values())\n for logger in loggers:\n handlers = getattr(logger, \"handlers\", [])\n for handler in handlers:\n logger.removeHandler(handler)", "def clear_logs():\n with cd('%(path)s' % env):\n run('rm ./logs/*.log')", "def Reset():\n _log_manager.Reset(sys.stdout, sys.stderr)", "def clear_old_log(self):\n if (self.config.step in [\"full\", \"antares\"]) and (os.path.isfile(self.antares() + '.log')):\n os.remove(self.antares() + '.log')\n if (self.config.step in [\"full\", \"lp\"])\\\n and (os.path.isfile(self.exe_path(self.config.LP_NAMER) + '.log')):\n os.remove(self.exe_path(self.config.LP_NAMER) + '.log')", "def silence_log_messages_by_default():\n logging.basicConfig(handlers=(logging.NullHandler(),))", "def disable_logging():\n logging.shutdown()", "def clearconfig(self):\n try:\n self.ui.clear_config()\n except Exception as err:\n message = \"Error clearing switch id:%s config: %s.\" % (self.id, err, )\n self.class_logger.error(message)\n pytest.fail(message)", "def unset_logger():\n raise NotImplementedError('Unset logger function is not implemented yet.')", "def clear_all_traces(self):\n self.write(\"CALC:MEAS:DEL:ALL\")", "def reset_level():\n ActionLogger.reset_level()", "def remove_all_subscriber():\n LogManager._LogManager__implementation.remove_all_subscriber()", "def clearconfig(self):\n self.class_logger.debug(\"Performing clearConfig on real switch.\")\n super(SwitchReal, self).clearconfig()\n\n self.setup_syslog()\n\n # Set initial ports speed\n self.speed_preconfig()", "def clearExistingLoggerInstance():\n global _logger\n _logger = None", "def ResetLogs (self) :\n\t\tself.optimization_log \t\t= {}\n\t\tself.reference_signal \t\t= []\n\t\tself.log_reference_signal\t= []", "def remove_handlers_root_logger_object():\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)", "def _clear_logs(self):\n logfile = \"%s/postmaster.log\" % self.checker._logs_dir\n if not os.path.exists(logfile):\n # here i didn't check it is a file or a dir\n return\n logger.info(\"Cleanup the existed postmaster log\")\n open(logfile, 'w').close()", "def clearOldLog():\n if LOG_FOLDER is not None:\n f = open(LOG_FOLDER + '/execution.log', 'w')\n f.write(\"\\n\")", "def end_logging():\n logger = logging.getLogger(\"TopLog\")\n logging.captureWarnings(False)\n all_handlers = [h for h in logger.handlers]\n for h in all_handlers:\n logger.removeHandler(h)", "def reset(self):\n for manager in self._event_managers:\n manager.reset()\n self.collection = None\n self.description = None\n self.logs_dir = None\n self.log_format = None\n for manager in self._event_managers:\n manager.collection_object = None\n self._event_managers = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the minimum element in an observable sequence according to the optional comparer else a default greater than less than check. Example res = source.min()
def min(self, comparer=None): return self.min_by(identity, comparer).map(first_only)
[ "def min(sequence):\n return __builtin__.min(sequence)", "def min_by(collection, transform_function):\n if len(collection) == 0:\n return None\n\n min_value = transform_function(collection[0])\n min_item = collection[0]\n\n for item in collection[1:]:\n this_value = transform_function(item)\n if this_value < min_value:\n min_item = item\n min_value = this_value\n\n return min_item", "def min(*args, key=None): # known special case of min\n pass", "def get_min(a, b):\n return b if a > b else a", "def min(self) -> DataValue:\n return min(self.iterable)", "def min(self):\n minv = None\n for win in self._data:\n minv = win.min() if minv is None else min(minv, win.min())\n return minv", "def min_value(my_list):\n aux = ordered_values(my_list)\n return aux[0]", "def withmin(func, items):\n items = iter(items)\n minitem = items.next()\n minval = func(minitem)\n for it in items:\n curval = func(it)\n if minval > curval:\n minval = curval\n minitem = it\n return minitem", "def minimum_value(sequence):\r\n low = sequence[0] # need to start with some value\r\n for i in sequence:\r\n if i < low:\r\n low = i\r\n return low", "def mymin(items):\n smallest = items[0]\n for item in items[1:]:\n if item < smallest:\n smallest = item\n return smallest", "def min_by(f, x, y):\n return x if f(x) < f(y) else y", "def min(x):\n\treturn np.min(x)", "def find_min(self)->(any, any):\n #---- to do ----\n # complete this method by calling bst.find_min()\n # return the key and the value associated with the smallest key in the tree\n # raise ValueError if the tree is empty\n #---------------\n if self.num_items == 0:\n raise ValueError\n return bst.find_min(self.tree)", "def found_min(array_min):\n return min(array_min)", "def minGeneral(a, mi=0):\n if a is None or len(a) == 0:\n return mi\n return min(a)", "def argmin(arr, f):\n m = None\n i = None\n for idx, item in enumerate(arr):\n if item is not None:\n if m is None or f(item) < m:\n m = f(item)\n i = idx\n return i", "def min_element(l):\n \n \n if l != []:\n temp = int(l[0])\n for i in l:\n if int(i) < int(temp):\n temp = i\n return temp\n else:\n raise ValueError(\"List is empty\")", "def min(self):\n\n if len(self.regions) != 1:\n raise ClaripyVSAOperationError(\"'min()' onlly works on single-region value-sets.\")\n\n return self.get_si(next(iter(self.regions))).min", "def find_minimum(data):\n minimum_of_set = min(data)\n return minimum_of_set" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Caseinsensitive `getattr` for enums.
def get_caseins_enum_attr(enum: tp.NamedTuple, attr: str) -> tp.Any: lower_attr_keys = list(map(lambda x: x.lower(), enum._fields)) attr_idx = lower_attr_keys.index(attr.lower()) orig_attr = enum._fields[attr_idx] return getattr(enum, orig_attr)
[ "def get_enum_name(self):\n return self.name.upper()", "def for_name(name='', enum_type=None, enum_description=''):\n found = None\n _name = name.upper()\n\n try:\n found = enum_type[_name]\n except Exception:\n raise ValueError(\"Unsupported {0} {1}\".format(enum_description, name))\n\n return found", "def get_enum(self, name):\n for e in self.enums:\n if e.name == name:\n return e\n return None", "def get_enum_value(e: EnumMeta, k) -> Enum:\n try:\n return e[str(k).split(\".\")[-1].upper()]\n except KeyError as err:\n raise lpipe.exceptions.InvalidPathError(\n \"Payload specified an invalid path.\"\n ) from err", "def testScalar_Enum(self):\n self.scalarGetAndCheck(\"snimpyEnum\", \"down\")", "def getEnumNameFromValue(value):\n for t, tobj in list(TypeDef.typeDict.items()):\n if tobj.objtype in {s_ENUM, s_KERNEL}:\n n = tobj.getLabel(value)\n if n is not None:\n return t, n\n return None, None", "def enum_value(enum_str):\n return enum_str.strip(' \\'').lower()", "def __getattr__(self, attr):\n if attr.startswith('_'):\n return DispatchBaseClass.__getattr__(self, attr) \n \n try:\n extendedPropMap = self._prop_map_get_ex_\n except AttributeError:\n extendedPropMap = {}\n \n if attr in extendedPropMap:\n return extendedPropMap[attr](self)\n \n value = DispatchBaseClass.__getattr__(self, attr)\n if attr.endswith('s') and hasattr(self.api, attr):\n try:\n value = getattr(self.api, attr)(value)\n except:\n pass\n return value", "def __getattr__(self, name):\n if name in self.key2field:\n return self.fields[self.key2field[name]]\n else:\n raise AttributeError(\"cannot find symbol {:s}\".format(name))", "def get_localized_attr(self, attr):\n#\n# if attr.startswith('gen'):\n# from nose.tools import set_trace; set_trace()\n\n if attr.startswith('_'):\n return object.__getattribute__(self, attr)\n try:\n return self.__getattribute__(attr)\n except AttributeError:\n translated = getattr(self, '%s_translated' % self.__class__.__name__)\n return getattr(translated, attr)", "def enum_name(name):\n assert name.startswith('GL_')\n return name[3:]", "def pascal_case(value):\n if isinstance(value, enum.Enum):\n value = value.name\n return value.title().replace('_', '')", "def __getattr__(self, name):\n if name == \"_dict_of_archs\":\n return object.getattr(self, \"_dict_of_archs\") #pylint: disable=E1101\n else:\n # search for method in the architectures\n for key in self._dict_of_archs:\n if hasattr(self._dict_of_archs[key], name):\n return getattr(self._dict_of_archs[key], name)\n\n raise AttributeError(\"%r object has no attribute %s\" % (self.__class__, name))", "def enum_value_for(name_to_enum_entry_map: Dict[str, Any], name: str):\n assert name_to_enum_entry_map is not None\n assert len(name_to_enum_entry_map) >= 1\n assert name is not None\n assert name.strip() == name\n\n if name == '':\n result = None\n else:\n try:\n result = name_to_enum_entry_map[name.lower()]\n except KeyError:\n enum_type = type(next(iter(name_to_enum_entry_map.values())))\n valid_names = sorted(name_to_enum_entry_map.keys())\n raise ValueError('name %r for enum %s must be one of: %s'\n % (name, enum_type.__name__, valid_names))\n return result", "def get_member(cls, arg):\n if isinstance(arg, cls):\n return arg\n if isinstance(arg, str):\n u_arg = arg.strip().upper()\n for member in cls:\n if member.name.upper() == u_arg:\n return member\n\n if u_arg == 'CURRENT' and issubclass(cls, HasCurrent):\n return cls.get_current()\n\n if u_arg == 'DEFAULT' and issubclass(cls, HasDefault):\n return cls.default()\n\n message = \"Unknown member '{}' of {} enum, valid values are: {}\"\n joined = ','.join(member.name.lower() for member in cls)\n joined += ',current' if issubclass(cls, HasCurrent) else ''\n joined += ',default' if issubclass(cls, HasDefault) else ''\n raise ValueError(message.format(arg, cls.__name__, joined))", "def coerce(cls, val):\n try:\n return cls[val]\n except KeyError:\n try:\n return cls(val)\n except ValueError:\n msg = '\"{0}\" not a valid enum value or name of {1}, possible' \\\n ' names are {2} and possible values are {3}.'\n raise ValueError(msg.format(val, cls.__name__,\n ','.join(cls.get_names()),\n str(cls.get_values())))", "def namedtuple_lower(t):\n return type(t)(*[s.lower() for s in t])", "def raw_enum_class(self: Fdef) -> Optional[Union[type[Enum], str]]:\n self._resolve_if_needed()\n return self._raw_enum_class", "def dict_to_enum_fn(d: Dict[str, Any], enum_class: Type[Enum]) -> Enum:\n return enum_class[d[\"name\"]]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare value of an enum. `enum` is expected to be an instance of `collections.namedtuple`. `value` can a string of any case or a tuple/list of such, otherwise returns unmodified value.
def prepare_enum_value(enum: tp.NamedTuple, value: tp.Any) -> tp.Any: def _converter(x): if isinstance(x, str): return get_caseins_enum_attr(enum, str(x)) return x if isinstance(value, str): # Convert str to int value = _converter(value) elif isinstance(value, (tuple, list)): # Convert each in the list value_type = type(value) value = list(value) for i in range(len(value)): if isinstance(value[i], (tuple, list)): value[i] = prepare_enum_value(enum, value[i]) else: value[i] = _converter(value[i]) return value_type(value) return value
[ "def _parse_enum_value(enum_value_ast: dict) -> \"EnumValueNode\":\n return EnumValueNode(\n value=enum_value_ast[\"value\"],\n location=_parse_location(enum_value_ast[\"loc\"]),\n )", "def enum_converter(value: typing.Union[str, int]) -> enum.Enum:\n value = int_converter(value)\n try:\n return enum_class(value)\n except ValueError:\n raise utils.RequestError(3114)", "def one_of_enum(enum_type):\n class_name = enum_type.__class__.__name__\n\n def validator(node, value):\n if isinstance(value, string_types):\n expected = [str(et.name) for et in list(enum_type)]\n if value not in expected:\n err_msg_fmt = \"%r is not a valid %s\"\n raise Invalid(err_msg_fmt % (value, class_name))\n value = enum_type[value].value\n return value\n\n expected = [et.value for et in list(enum_type)]\n if value not in expected:\n err_msg_fmt = \"%r is not a valid value for %s\"\n raise Invalid(err_msg_fmt % (value, class_name))\n\n return value", "def parse(cls, v) -> 'EnumFieldDefinition':\n if isinstance(v, str):\n return cls(value=v, description=None)\n elif isinstance(v, tuple) and len(v) == 2:\n return cls(value=v[0], description=v[1])\n raise ValueError(\n f'Enum field should be a str or 2-value tuple, got {v}')", "def _transform_enum(self, val, field, value_identifier, suppress_invalid=False):\n data_type = field.type\n\n # Enum options are stored differently based on field type.\n if data_type == \"boolean\":\n enum_options = (\n field.descriptor[\"trueValues\"] + field.descriptor[\"falseValues\"]\n )\n # If the value is \"1.0\" or \"2.0\", make sure the decimals and 0 are stripped.\n if is_int(val):\n val = str(int(float(val)))\n elif data_type == \"integer\":\n # If the field is an integer enum and the value can be intepreted as an integer, return its integer value.\n if is_int(val):\n return (int(float(val)), True)\n enum_options = field.descriptor[\"enum_mapping\"]\n elif data_type == \"string\":\n val = str(val)\n enum_options = field.constraints[\"enum\"]\n\n if field.name in self.field_mappings:\n mapping = self.field_mappings[field.name].get_field_mapping_dict()\n else:\n mapping = {}\n\n if val in mapping:\n # Ignore the approval state, not needed here\n mapped_val, _ = mapping[val]\n\n # Return BLANK_VALUE if mapped value is empty.\n if is_blank(mapped_val):\n return (BLANK_VALUE, True)\n\n # For integer enums, the enum options are a dict mapping from string\n # values to integer values, so we use this dict to transform to int.\n return (\n (mapped_val, True)\n if data_type != \"integer\"\n else (enum_options[mapped_val], True)\n )\n elif data_type == \"integer\":\n case_insensitive_enum_options = {\n option.lower(): num for option, num in enum_options.items()\n }\n if val.lower() in case_insensitive_enum_options:\n enum_index = case_insensitive_enum_options[val.lower()]\n return (enum_index, True)\n else:\n case_insensitive_enum_options = [option.lower() for option in enum_options]\n if val.lower() in case_insensitive_enum_options:\n idx = case_insensitive_enum_options.index(val.lower())\n return (enum_options[idx], True)\n\n invalid_reason = f\"{val} is not in field mapping or valid value set\"\n\n # If field is boolean, include list of valid boolean values.\n if data_type == \"boolean\":\n invalid_reason += f\" ({str(enum_options)})\"\n\n return self._report_invalid_value(\n value_identifier, invalid_reason, suppress_invalid\n )", "def enum_value(enum_str):\n return enum_str.strip(' \\'').lower()", "def getEnumNameFromValue(value):\n for t, tobj in list(TypeDef.typeDict.items()):\n if tobj.objtype in {s_ENUM, s_KERNEL}:\n n = tobj.getLabel(value)\n if n is not None:\n return t, n\n return None, None", "def getEnumIdNameFromValue(value, default=None):\n return TypeDef.getValueNameFromType(\"vx_enum_e\", TypeDef.getEnumIdVal(value))", "def _get_name_by_value(value, enum):\n for member in enum:\n if member.value == value:\n return member.name", "def enum_value_for(name_to_enum_entry_map: Dict[str, Any], name: str):\n assert name_to_enum_entry_map is not None\n assert len(name_to_enum_entry_map) >= 1\n assert name is not None\n assert name.strip() == name\n\n if name == '':\n result = None\n else:\n try:\n result = name_to_enum_entry_map[name.lower()]\n except KeyError:\n enum_type = type(next(iter(name_to_enum_entry_map.values())))\n valid_names = sorted(name_to_enum_entry_map.keys())\n raise ValueError('name %r for enum %s must be one of: %s'\n % (name, enum_type.__name__, valid_names))\n return result", "def coerce(cls, val):\n try:\n return cls[val]\n except KeyError:\n try:\n return cls(val)\n except ValueError:\n msg = '\"{0}\" not a valid enum value or name of {1}, possible' \\\n ' names are {2} and possible values are {3}.'\n raise ValueError(msg.format(val, cls.__name__,\n ','.join(cls.get_names()),\n str(cls.get_values())))", "def _EnumValFromText(fdesc, enum_text_val, log):\n log.debug(\"converting enum val:\" + enum_text_val)\n log.debug(\"possible enum vals:\" + str(fdesc.enum_type.values_by_name.keys()))\n\n enum_val = fdesc.enum_type.values_by_name[enum_text_val.upper()].number\n log.debug(\"done enum vals\")\n return enum_val", "def resolve_enum_values(self, values):\n t = int\n i = 0\n names = [v[\"name\"] for v in values]\n for v in values:\n if \"value\" in v:\n a = v[\"value\"].strip()\n # Remove single quotes from single quoted chars (unless part of some expression\n if len(a) == 3 and a[0] == \"'\" and a[2] == \"'\":\n a = v[\"value\"] = a[1]\n if a.lower().startswith(\"0x\"):\n try:\n i = a = int(a, 16)\n except:\n pass\n elif a.isdigit():\n i = a = int(a)\n elif a in names:\n for other in values:\n if other[\"name\"] == a:\n v[\"value\"] = other[\"value\"]\n break\n\n elif '\"' in a or \"'\" in a:\n t = str # only if there are quotes it this a string enum\n else:\n try:\n a = i = ord(a)\n except:\n pass\n # Allow access of what is in the file pre-convert if converted\n if v[\"value\"] != str(a):\n v[\"raw_value\"] = v[\"value\"]\n v[\"value\"] = a\n else:\n v[\"value\"] = i\n try:\n v[\"value\"] = v[\"value\"].replace(\" < < \", \" << \").replace(\" >> \", \" >> \")\n except:\n pass\n i += 1\n return t", "def copy_value_to_enum(src: object,\n tgt: Enum,\n **kwargs) -> Enum:\n try:\n tgt = type(tgt)(src)\n except Exception as e:\n raise TypeError(\n \"Cannot construct Enum type {} from .value type {} with error {}\".format(type(tgt), type(src), str(e)))\n return tgt", "def addEnumValue(self, enumname: 'char const *', valuename: 'char const *', value: 'int') -> \"void\":\n return _coin.SoFieldData_addEnumValue(self, enumname, valuename, value)", "def __call__(cls, value=no_arg, names=None, module=None, type=None, start=1, boundary=None):\n if names is None: # simple value lookup\n return cls.__new__(cls, value)\n # otherwise, functional API: we're creating a new Enum type\n return cls._create_(value, names, module=module, type=type, start=start, boundary=boundary)", "def decode_enum(item_type: type[Enum]) -> Callable[[str], Enum]:\n\n def _decode_enum(val: str) -> Enum:\n return item_type[val]\n\n return _decode_enum", "def _make_enum_converter(enum_class: enum.Enum) -> typing.Callable:\n def enum_converter(value: typing.Union[str, int]) -> enum.Enum:\n \"\"\"Convert a number to the relevant value in an enum.\"\"\"\n value = int_converter(value)\n try:\n return enum_class(value)\n except ValueError:\n raise utils.RequestError(3114)\n return enum_converter", "def choices_from_enum(source: type) -> Tuple[Tuple[int, str], ...]:\n result = tuple((s.value, s.name.title()) for s in source)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create value map from enum.
def to_value_map(enum: tp.NamedTuple) -> dict: value_map = dict(zip(tuple(enum), enum._fields)) if -1 not in value_map: value_map[-1] = None return value_map
[ "def to_dict(self):\n d = asdict(self)\n for k, v in d.items():\n if isinstance(v, Enum):\n d[k] = v.value\n if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):\n d[k] = [x.value for x in v]\n return d", "def enum_to_dict_fn(e: Enum) -> Dict[str, Any]:\n return {\"name\": e.name}", "def _transform_enum(self, val, field, value_identifier, suppress_invalid=False):\n data_type = field.type\n\n # Enum options are stored differently based on field type.\n if data_type == \"boolean\":\n enum_options = (\n field.descriptor[\"trueValues\"] + field.descriptor[\"falseValues\"]\n )\n # If the value is \"1.0\" or \"2.0\", make sure the decimals and 0 are stripped.\n if is_int(val):\n val = str(int(float(val)))\n elif data_type == \"integer\":\n # If the field is an integer enum and the value can be intepreted as an integer, return its integer value.\n if is_int(val):\n return (int(float(val)), True)\n enum_options = field.descriptor[\"enum_mapping\"]\n elif data_type == \"string\":\n val = str(val)\n enum_options = field.constraints[\"enum\"]\n\n if field.name in self.field_mappings:\n mapping = self.field_mappings[field.name].get_field_mapping_dict()\n else:\n mapping = {}\n\n if val in mapping:\n # Ignore the approval state, not needed here\n mapped_val, _ = mapping[val]\n\n # Return BLANK_VALUE if mapped value is empty.\n if is_blank(mapped_val):\n return (BLANK_VALUE, True)\n\n # For integer enums, the enum options are a dict mapping from string\n # values to integer values, so we use this dict to transform to int.\n return (\n (mapped_val, True)\n if data_type != \"integer\"\n else (enum_options[mapped_val], True)\n )\n elif data_type == \"integer\":\n case_insensitive_enum_options = {\n option.lower(): num for option, num in enum_options.items()\n }\n if val.lower() in case_insensitive_enum_options:\n enum_index = case_insensitive_enum_options[val.lower()]\n return (enum_index, True)\n else:\n case_insensitive_enum_options = [option.lower() for option in enum_options]\n if val.lower() in case_insensitive_enum_options:\n idx = case_insensitive_enum_options.index(val.lower())\n return (enum_options[idx], True)\n\n invalid_reason = f\"{val} is not in field mapping or valid value set\"\n\n # If field is boolean, include list of valid boolean values.\n if data_type == \"boolean\":\n invalid_reason += f\" ({str(enum_options)})\"\n\n return self._report_invalid_value(\n value_identifier, invalid_reason, suppress_invalid\n )", "def generate_enum(d: dict):\n return Enum(\"Auto\", [k.upper() for k in d.keys()])", "def resolve_enum_values(self, values):\n t = int\n i = 0\n names = [v[\"name\"] for v in values]\n for v in values:\n if \"value\" in v:\n a = v[\"value\"].strip()\n # Remove single quotes from single quoted chars (unless part of some expression\n if len(a) == 3 and a[0] == \"'\" and a[2] == \"'\":\n a = v[\"value\"] = a[1]\n if a.lower().startswith(\"0x\"):\n try:\n i = a = int(a, 16)\n except:\n pass\n elif a.isdigit():\n i = a = int(a)\n elif a in names:\n for other in values:\n if other[\"name\"] == a:\n v[\"value\"] = other[\"value\"]\n break\n\n elif '\"' in a or \"'\" in a:\n t = str # only if there are quotes it this a string enum\n else:\n try:\n a = i = ord(a)\n except:\n pass\n # Allow access of what is in the file pre-convert if converted\n if v[\"value\"] != str(a):\n v[\"raw_value\"] = v[\"value\"]\n v[\"value\"] = a\n else:\n v[\"value\"] = i\n try:\n v[\"value\"] = v[\"value\"].replace(\" < < \", \" << \").replace(\" >> \", \" >> \")\n except:\n pass\n i += 1\n return t", "def enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n # get the reverse mapping\n inv_map = {}\n for k, v in enums.iteritems():\n inv_map[v] = inv_map.get(v, [])\n inv_map[v].append(k)\n enums[\"inverse_map\"] = inv_map\n return type('Enum', (), enums)", "def dict_to_enum_fn(d: Dict[str, Any], enum_class: Type[Enum]) -> Enum:\n return enum_class[d[\"name\"]]", "def define_enum(arg_enumKeys, arg_enumFirstValue = 0):\n enumDict = {}\n for idx, tKey in enumerate(arg_enumKeys):\n enumDict[tKey] = idx + arg_enumFirstValue\n\n return enumDict", "def options_from(self):\n return [(x.name, x.value) for x in self.enum_type]", "def enum_value_for(name_to_enum_entry_map: Dict[str, Any], name: str):\n assert name_to_enum_entry_map is not None\n assert len(name_to_enum_entry_map) >= 1\n assert name is not None\n assert name.strip() == name\n\n if name == '':\n result = None\n else:\n try:\n result = name_to_enum_entry_map[name.lower()]\n except KeyError:\n enum_type = type(next(iter(name_to_enum_entry_map.values())))\n valid_names = sorted(name_to_enum_entry_map.keys())\n raise ValueError('name %r for enum %s must be one of: %s'\n % (name, enum_type.__name__, valid_names))\n return result", "def _get_python_to_field_type_map(self):\n\n result = Context()\n result[int] = FormFieldTypeEnum.INTEGER\n result[float] = FormFieldTypeEnum.FLOAT\n result[Decimal] = FormFieldTypeEnum.FLOAT\n result[(int, float, Decimal)] = FormFieldTypeEnum.NUMBER\n result[(float, Decimal)] = FormFieldTypeEnum.FLOAT\n result[(int, Decimal)] = FormFieldTypeEnum.NUMBER\n result[(int, float)] = FormFieldTypeEnum.NUMBER\n result[str] = FormFieldTypeEnum.STRING\n result[bool] = FormFieldTypeEnum.BOOLEAN\n result[dict] = FormFieldTypeEnum.OBJECT\n result[date] = FormFieldTypeEnum.DATE\n result[datetime] = FormFieldTypeEnum.DATETIME\n result[time] = FormFieldTypeEnum.TIME\n result[UUID] = FormFieldTypeEnum.UUID\n return result", "def prepare_enum_value(enum: tp.NamedTuple, value: tp.Any) -> tp.Any:\n\n def _converter(x):\n if isinstance(x, str):\n return get_caseins_enum_attr(enum, str(x))\n return x\n\n if isinstance(value, str):\n # Convert str to int\n value = _converter(value)\n elif isinstance(value, (tuple, list)):\n # Convert each in the list\n value_type = type(value)\n value = list(value)\n for i in range(len(value)):\n if isinstance(value[i], (tuple, list)):\n value[i] = prepare_enum_value(enum, value[i])\n else:\n value[i] = _converter(value[i])\n return value_type(value)\n return value", "def coerce(cls, val):\n try:\n return cls[val]\n except KeyError:\n try:\n return cls(val)\n except ValueError:\n msg = '\"{0}\" not a valid enum value or name of {1}, possible' \\\n ' names are {2} and possible values are {3}.'\n raise ValueError(msg.format(val, cls.__name__,\n ','.join(cls.get_names()),\n str(cls.get_values())))", "def _read_enums(self, node):\n gname = (node.attrib['namespace'], node.attrib.get('group', '-'))\n try:\n gdata = self.enums[gname]\n except KeyError:\n gdata = {}\n self.enums[gname] = gdata\n for child in node:\n if child.tag != 'enum':\n assert child.tag == 'unused'\n continue\n value = int(child.attrib['value'], 0)\n gdata[enum_name(child.attrib['name'])] = value\n self.enums.update(gdata)", "def create_enum(**enums):\n return type('Enum', (), enums)", "def inline_enum_constants(syntax_tree : ADT) -> ADT:\n\n repl = {\n name : EEnumEntry(name).with_type(t)\n for t in all_types(syntax_tree)\n if isinstance(t, TEnum)\n for name in t.cases }\n return subst(syntax_tree, repl)", "def _EnumValFromText(fdesc, enum_text_val, log):\n log.debug(\"converting enum val:\" + enum_text_val)\n log.debug(\"possible enum vals:\" + str(fdesc.enum_type.values_by_name.keys()))\n\n enum_val = fdesc.enum_type.values_by_name[enum_text_val.upper()].number\n log.debug(\"done enum vals\")\n return enum_val", "def enum_converter(value: typing.Union[str, int]) -> enum.Enum:\n value = int_converter(value)\n try:\n return enum_class(value)\n except ValueError:\n raise utils.RequestError(3114)", "def getEnumNameFromValue(value):\n for t, tobj in list(TypeDef.typeDict.items()):\n if tobj.objtype in {s_ENUM, s_KERNEL}:\n n = tobj.getLabel(value)\n if n is not None:\n return t, n\n return None, None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a transaction based on cart information
def create_transaction_from_cart(cart: Cart) -> DbTransaction: res = DbTransaction(type=TransactionType.PURCHASE, currency=cart.currency, timestamp=now(), total_amount=cart.total_amount, state=TransactionState.INITIALIZED, id=ObjectId()) items = [] vat: Dict[Decimal, Vat] = {} for item in cart.items: ps = item.product_set.fetch() t_item = TransactionItem(amount=item.amount, count=item.count, product_set_id=ps.full_id, product_set_title=item.product_set_title) if item.manual_activation is not None: t_item.manual_activation = item.manual_activation if item.mtb_product_owner is not None: t_item.mtb_product_owner = item.mtb_product_owner if item.mtb_bearer is not None: t_item.mtb_bearer = item.mtb_bearer if item.start_of_validity is not None: t_item.start_of_validity = item.start_of_validity items.append(t_item) for percentage, amount in ps.vat().items(): amount *= item.count if percentage in vat: vat[percentage].amount += amount else: vat[percentage] = Vat(amount=amount, percentage=percentage) res.items = items res.vat = list(vat.values()) res.discount_codes = list(cart.discount_codes) res.description = cart.name return res
[ "def create(self, request):\n\n cart = Cart.objects.create(user=request.user)\n return Response({'cart': CartSerializer(cart).data})", "def create_cart_checkout(self,\r\n account_number,\r\n cart_id,\r\n cart_checkout_form):\r\n # The base uri for api requests\r\n query_builder = Configuration.BASE_URI\r\n \r\n # Prepare query string for API call\r\n query_builder += \"/accounts/{account_number}/carts/{cart_id}/checkout\"\r\n\r\n # Process optional template parameters\r\n query_builder = APIHelper.append_url_with_template_parameters(query_builder, { \r\n \"account_number\": account_number,\r\n \"cart_id\": cart_id\r\n })\r\n \r\n # Validate and preprocess url\r\n query_url = APIHelper.clean_url(query_builder)\r\n\r\n # Prepare headers\r\n headers = {\r\n \"user-agent\": \"APIMATIC 2.0\",\r\n \"accept\": \"application/json\",\r\n \"content-type\": \"application/json; charset=utf-8\",\r\n \"X-Auth-Token\": Configuration.x_auth_token,\r\n \"X-Auth-Token\": Configuration.x_auth_token\r\n }\r\n\r\n # Prepare the API call.\r\n http_request = self.http_client.post(query_url, headers=headers, parameters=APIHelper.json_serialize(cart_checkout_form))\r\n\r\n # Invoke the API call to fetch the response.\r\n response = self.http_client.execute_as_string(http_request);\r\n\r\n # Endpoint error handling using HTTP status codes.\r\n if response.status_code == 403:\r\n raise APIException(\"User not authorized to perform the operation\", 403, response.raw_body)\r\n elif response.status_code == 404:\r\n raise APIException(\"Resource not found\", 404, response.raw_body)\r\n\r\n # Global error handling using HTTP status codes.\r\n self.validate_response(response) \r\n\r\n return response.raw_body", "def create_order(request):\n if request.method == 'POST':\n current_user = request.user\n payment_info = Payment.objects.get(user_id=current_user)\n contact_info = ContactInformation.objects.get(user_id=current_user)\n instance = Order.objects.create(\n user=current_user, payment_info=payment_info, contact_info=contact_info, processed=True)\n user_cart = Cart.objects.filter(user=current_user, order_id__exact='')\n\n for user in user_cart:\n instance.cart.add(user)\n user_cart.update(order_id=instance.id)\n\n return render(request, 'order/display_order.html')", "def shoppingcart_create(request):\n \n #initiate response\n faultstring = None\n resp_code = 201\n \n #create new cart\n try:\n new_cart_id = create(\"\"\"INSERT INTO webshop.shoppingcart DEFAULT VALUES RETURNING id\"\"\")[0][0]\n except Exception as e:\n faultstring = str(e)\n resp_code = 500\n\n #create responseobject\n resp = {}\n if faultstring:\n resp['faultstring'] = faultstring\n else:\n resp['shoppingcart'] = {\"id\": new_cart_id}\n\n respjson = json.dumps(resp)\n return Response(json_body=json.loads(respjson), status=resp_code)", "def test_create_cart(self):\n add_products([{'name':'A','price':5},\n {'name':'B','price':10},\n {'name':'C','price':15}\n ])\n with self.client:\n response = self.client.post(\n '/cart',\n data=json.dumps({\"items\":[\n {\"p_id\":1,\"quantity\":2},\n {\"p_id\":2,\"quantity\":5},]\n }),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data[\"cartid\"],'1')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 200)", "def create_transaction():\n request_dict = request.get_json()\n if not validate_transaction_dict(request_dict):\n return 'Missing parameters', 400\n if not user_handler.find_users_by_uid(request_dict['user_id']):\n return 'Unknown user', 400\n transaction = Transaction(request_dict)\n transaction_handler.insert_transaction(transaction)\n return 'Transaction inserted successfully', 200", "def create_transaction(info_and_status):\n # get info from the inventory\n list_list = list_inventory()\n for item in list_list:\n if item[0].lower() == info_and_status[0]:\n item_list = item\n # set info for use\n name = info_and_status[0]\n paid = str((int(item_list[3]) * .07) + int(item_list[3]))\n deposit = str((int(item_list[-1]) / 10))\n # get revenue\n with open(\"current_revenue.txt\", \"r\") as file:\n revenue = file.read()\n # convert revenue from string\n if float(revenue).is_integer():\n revenue = int(revenue)\n else:\n revenue = float(revenue)\n\n # create string\n if info_and_status[1] == \"renting\":\n if float(paid).is_integer():\n revenue += int(paid)\n else:\n revenue += float(paid)\n trans_string = \"Rented: \" + name + \"; Paid: \" + paid + \"; Deposit: \" + deposit\n trans_string += \"........Total Revenue: \" + str(revenue)\n\n\n elif info_and_status[1] == \"returning\":\n trans_string = \"Returned: \" + name\n trans_string += \"........Total Revenue: \" + str(revenue)\n\n elif info_and_status[1] == \"replacing\":\n if float(deposit).is_integer():\n revenue += int(float(deposit))\n else:\n revenue += float(deposit)\n trans_string = \"Replaced: \" + name + \"; Paid: \" + deposit\n trans_string += \"........Total Revenue: \" + str(revenue)\n\n else:\n return \"Error!\"\n\n # write revenue to file\n with open(\"current_revenue.txt\", \"w\") as file:\n file.write(str(revenue))\n\n return trans_string", "def createTransaction(self,\n allTransactions: Ledger,\n amount: tuple,\n OPType: tuple,\n OP_Parameters: tuple,\n isCoinBase=False,\n **kwargs):\n newTransaction = Transaction(isCoinBase=isCoinBase, **kwargs)\n \"\"\" Write Your Code Below \"\"\"\n # TODO: Implement Transaction Initialization Here\n\n return newTransaction # Do Not modify this line", "def create_items(self,\r\n account_number,\r\n cart_id,\r\n item_form):\r\n # The base uri for api requests\r\n query_builder = Configuration.BASE_URI\r\n \r\n # Prepare query string for API call\r\n query_builder += \"/accounts/{account_number}/carts/{cart_id}/items\"\r\n\r\n # Process optional template parameters\r\n query_builder = APIHelper.append_url_with_template_parameters(query_builder, { \r\n \"account_number\": account_number,\r\n \"cart_id\": cart_id\r\n })\r\n \r\n # Validate and preprocess url\r\n query_url = APIHelper.clean_url(query_builder)\r\n\r\n # Prepare headers\r\n headers = {\r\n \"user-agent\": \"APIMATIC 2.0\",\r\n \"accept\": \"application/json\",\r\n \"content-type\": \"application/json; charset=utf-8\",\r\n \"X-Auth-Token\": Configuration.x_auth_token,\r\n \"X-Auth-Token\": Configuration.x_auth_token\r\n }\r\n\r\n # Prepare the API call.\r\n http_request = self.http_client.post(query_url, headers=headers, parameters=APIHelper.json_serialize(item_form))\r\n\r\n # Invoke the API call to fetch the response.\r\n response = self.http_client.execute_as_string(http_request);\r\n\r\n # Endpoint error handling using HTTP status codes.\r\n if response.status_code == 403:\r\n raise APIException(\"User not authorized to perform the operation\", 403, response.raw_body)\r\n elif response.status_code == 404:\r\n raise APIException(\"Resource\", 404, response.raw_body)\r\n\r\n # Global error handling using HTTP status codes.\r\n self.validate_response(response) \r\n\r\n return response.raw_body", "def create_purchase(request, book, quantity, is_gift, is_priest):\n p = Purchase()\n p.status = \"pending\"\n p.book = book\n p.quantity = quantity\n\n if is_priest:\n p.price = book.priest_price\n else:\n p.price = book.get_price_for_quantity(quantity)\n\n p.total_charge = p.price * p.quantity\n\n if is_gift:\n p.generate_gift_code()\n\n if request.user.is_authenticated():\n p.buyer_user = request.user\n p.buyer_email = request.user.email\n\n p.save()\n\n return p", "def create_carts(self,\r\n account_number):\r\n # The base uri for api requests\r\n query_builder = Configuration.BASE_URI\r\n \r\n # Prepare query string for API call\r\n query_builder += \"/accounts/{account_number}/carts\"\r\n\r\n # Process optional template parameters\r\n query_builder = APIHelper.append_url_with_template_parameters(query_builder, { \r\n \"account_number\": account_number\r\n })\r\n \r\n # Validate and preprocess url\r\n query_url = APIHelper.clean_url(query_builder)\r\n\r\n # Prepare headers\r\n headers = {\r\n \"user-agent\": \"APIMATIC 2.0\",\r\n \"accept\": \"application/json\",\r\n \"X-Auth-Token\": Configuration.x_auth_token,\r\n \"X-Auth-Token\": Configuration.x_auth_token\r\n }\r\n\r\n # Prepare the API call.\r\n http_request = self.http_client.post(query_url, headers=headers)\r\n\r\n # Invoke the API call to fetch the response.\r\n response = self.http_client.execute_as_string(http_request);\r\n\r\n # Endpoint error handling using HTTP status codes.\r\n if response.status_code == 403:\r\n raise APIException(\"User not authorized to perform the operation\", 403, response.raw_body)\r\n elif response.status_code == 404:\r\n raise APIException(\"Resource not found\", 404, response.raw_body)\r\n\r\n # Global error handling using HTTP status codes.\r\n self.validate_response(response) \r\n\r\n return response.raw_body", "def add_to_cart(self, cart):\n if (self.is_assigned() or self.is_reserved()):\n raise Exception('Furniture peice assigned or reserved')\n cf = self.cartfurniture_set.create(cart=cart)\n cf.save()\n return self", "def cart(request):\n\n return {'cart': Cart(request)}", "def test_create_transaction(self):\n ta = self.transaction\n\n # Make sure the data fields have been filled\n self.assertEqual(len(ta.key), 32)\n self.assertNotEqual(ta.time_created, None)\n self.assertEqual(ta.firstname, \"Donald\")\n self.assertEqual(ta.lastname, \"Duck\")\n self.assertEqual(ta.company, \"None\")\n self.assertEqual(ta.email, \"donald.duck@duckburg.inv\")\n self.assertEqual(ta.telephone, \"991234567\")\n self.assertEqual(ta.mobile, \"+358991234567\")\n self.assertEqual(ta.street, \"1313 Webfoot Walk\")\n self.assertEqual(ta.postalcode, \"00000\")\n self.assertEqual(ta.city, \"Duckburg\")\n self.assertEqual(ta.country, \"US\")\n self.assertEqual(ta.information, \"Quack, damn you!\")\n self.assertEqual(ta.token, '')\n self.assertEqual(ta.time_pending, None)\n self.assertEqual(ta.time_cancelled, None)\n self.assertEqual(ta.time_paid, None)\n self.assertEqual(ta.payment_method_name, '')\n\n # Test properties\n self.assertEqual(ta.is_cancelled, False)\n self.assertEqual(ta.is_delivered, False)\n self.assertEqual(ta.is_pending, False)\n self.assertEqual(ta.is_paid, False)\n self.assertEqual(ta.full_name, \"Donald Duck\")\n\n # Make sure this doesn't crash\n self.assertEqual(ta.qr_code.startswith(\"http\"), True)\n\n # Check price functions\n self.assertEqual(ta.get_transaction_items().count(), 6)\n self.assertEqual(ta.get_total_price(), 70) # Note discounts\n self.assertEqual(ta.get_storeitem_count(self.items[0]), 1)\n self.assertEqual(ta.get_storeitem_count(self.items[2]), 5)\n self.assertEqual(ta.get_storeitem_count(self.items[1]), 0)\n\n # Make sure transaction items went through\n for item in ta.get_transaction_items():\n self.assertIn(item.item.id, [self.items[0].id, self.items[2].id])\n self.assertNotEqual(item.variant, None)\n self.assertEqual(item.time_delivered, None)\n self.assertEqual(len(item.key), 32)\n self.assertEqual(item.is_delivered, False)\n self.assertEqual(item.qr_code.startswith(\"http\"), True)\n\n # Check amounts (manually)\n self.assertEqual(TransactionItem.objects.filter(transaction=ta, item=self.items[0]).count(), 1)\n self.assertEqual(TransactionItem.objects.filter(transaction=ta, item=self.items[2]).count(), 5)\n\n # Check discount(s)\n discount_items = TransactionItem.objects.filter(transaction=ta, item=self.items[2])\n for item in discount_items:\n self.assertEqual(item.original_price, 20)\n self.assertEqual(item.purchase_price, 10)\n non_discount_item = TransactionItem.objects.get(transaction=ta, item=self.items[0])\n self.assertEqual(non_discount_item.original_price, 20)\n self.assertEqual(non_discount_item.purchase_price, 20)", "def create_products():", "def add_item(current_cart, items_to_add):\n\n pass", "def place_order():\n session = connect()\n try:\n user_id = current_user.id\n except AttributeError:\n return \"Error getting user ID\"\n # Query for cart contents\n items = session.query(Cart).filter_by(user_id=user_id).all()\n # Redirect user if no items in order\n if not items:\n flash(\"No items in order!\")\n return redirect(url_for('show_cart'))\n # Make sure customer's address is valid\n address = get_address(current_user.address_id)\n destination = get_address_string(address)\n if validate_address(destination) is False:\n flash(\"Address is invalid or outside delivery radius!\")\n return redirect(url_for('show_cart'))\n # Create new entry in order table\n order_time = datetime.datetime.now()\n delivery_time = order_time + datetime.timedelta(0, get_delivery_time())\n new_order = Order(user_id=user_id, order_time=order_time,\n delivery_time=delivery_time)\n session.add(new_order)\n order = session.query(Order).filter_by(order_time=order_time).one()\n # Add each item to order_item table and remove from cart\n for i in items:\n order_item = OrderItem(order_id=order.id, menu_item_id=i.menu_item_id,\n quantity=i.quantity)\n session.add(order_item)\n session.delete(i)\n session.commit()\n ordered_items = session.query(OrderView).filter_by(order_id=order.id).all()\n # Calculate totals\n subtotal = 0.0\n for item in ordered_items:\n subtotal += float(item.price) * item.quantity\n if subtotal > 0:\n fee = DELIVERY_FEE\n else:\n fee = 0\n tax = (subtotal + fee) * 0.07\n total = subtotal + fee + tax\n subtotal = \"{0:.2f}\".format(subtotal)\n fee = \"{0:.2f}\".format(fee)\n tax = \"{0:.2f}\".format(tax)\n total = \"{0:.2f}\".format(total)\n # Convert delivery time to EST and format for display\n delivery_time = delivery_time - datetime.timedelta(hours=4)\n delivery_time = delivery_time.strftime('%I:%M %p')\n # Form URL for delivery map\n origin = encode_string(RESTAURANT_ADDRESS)\n destination = encode_string(destination)\n map_url = 'https://www.google.com/maps/embed/v1/directions?origin='\n map_url += origin\n map_url += '&destination='\n map_url += destination\n map_url += '&key='\n map_url += APP_KEY\n return render_template('orderComplete.html', delivery_time=delivery_time,\n items=ordered_items, subtotal=subtotal, fee=fee,\n tax=tax, total=total, map_url=map_url,\n title=\"Order Complete\")", "def create_order(coin, quantity):\n return client.create_order(\n symbol = coin,\n side = 'BUY',\n type = 'MARKET',\n quantity = quantity\n )", "def clone(self, actor):\n\n try:\n c = get_cursor()\n\n access_id = new_access_id(16)\n c.execute(\"\"\"\n insert into cart\n (\n access_id,\n cart_status_id,\n shipping_id,\n cc_encrypt\n )\n select %s, %s, shipping_id, cc_encrypt from cart\n where cart_id = %s\"\"\",\n (access_id, STATUS_NEW, self.cart['cart_id']))\n new_cart_id = c.lastrowid\n\n for line_item in self.cart['line_items']:\n c.execute(\"\"\"\n insert into line_item (\n cart_id,\n product_id,\n price,\n quantity,\n seq\n )\n values ( %s, %s, %s, %s, %s )\"\"\",\n (new_cart_id, line_item['product_id'], line_item['price'],\n line_item['quantity'], line_item['seq']))\n new_line_item_id = c.lastrowid\n new_build_id = Build.clone(line_item['build_access_id'], new_line_item_id)\n\n c.execute(\"\"\"\n insert into address\n (\n cart_id,\n bill_first_name,\n bill_last_name,\n bill_company_name,\n bill_address1,\n bill_address2,\n bill_city,\n bill_state_id,\n bill_province,\n bill_postal_code,\n bill_country_id,\n bill_phone,\n ship_first_name,\n ship_last_name,\n ship_company_name,\n ship_address1,\n ship_address2,\n ship_city,\n ship_state_id,\n ship_province,\n ship_postal_code,\n ship_country_id,\n ship_phone,\n email\n )\n select %s,\n bill_first_name,\n bill_last_name,\n bill_company_name,\n bill_address1,\n bill_address2,\n bill_city,\n bill_state_id,\n bill_province,\n bill_postal_code,\n bill_country_id,\n bill_phone,\n ship_first_name,\n ship_last_name,\n ship_company_name,\n ship_address1,\n ship_address2,\n ship_city,\n ship_state_id,\n ship_province,\n ship_postal_code,\n ship_country_id,\n ship_phone,\n email\n from address\n where address.cart_id = %s\"\"\",\n (new_cart_id, self.cart['cart_id']))\n\n import db.Cart as Cart\n new_cart = Cart.ShoppingCart(cart_id=new_cart_id)\n new_cart.recompute()\n new_cart.log(\"Cloned from cart {}.\".format(self.cart['cart_id']), actor)\n self.log(\"Cloned into cart {}.\".format(new_cart_id), actor)\n return new_cart_id\n\n except Exception as e:\n print \"Internal error: \" + e.args[0]\n import traceback\n traceback.print_exc()\n raise DbError(\"Internal error: \" + e.args[0])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finalize purchase transaction and create receipt
def _finalize_traveller_purchase(auth_ctx: AuthorizationContext, transaction: DbTransaction, finalization_data: str) \ -> None: if transaction.payment_method_transaction_data: payment_method = get_wallet_payment_method(transaction.wallet, transaction.payment_method_id) psd = get_payment_service_driver(get_payment_service(payment_method.payment_service_id)) payment_transaction = psd.create_payment_service_transaction(transaction.payment_method_transaction_data) try: psd.reserve_payment(payment_transaction) except PaymentServiceException as pse: logger.error("Failed to reserve payment: %s", pse) transaction.state = TransactionState.CANCELLED if isinstance(pse, UserInteractionCanceledException) \ else TransactionState.DENIED _release_purse_reservation(transaction) transaction.save() raise pse else: psd = None payment_transaction = None try: _finalize_create_mtb_products(transaction) except Exception as exc: logger.error("Failed to create product: %s", exc) transaction.state = TransactionState.ISSUE_ERROR _release_purse_reservation(transaction) _release_payment_method_reservation(transaction, psd, payment_transaction) _revert_mtb_products(auth_ctx, transaction) if payment_transaction: transaction.payment_method_transaction_data = payment_transaction.save_dict() transaction.save() raise exc if transaction.purse_reservation_id is not None: record = NewPurseRecord(transaction_id=str(transaction.id), reservation_id=transaction.purse_reservation_id, amount=-float(transaction.purse_amount), refundable=False) try: res = purse_create_record(transaction.wallet.purse_id, record) except ApiException as ae: logger.error("Failed to record purse transaction: %s", ae) transaction.state = TransactionState.DENIED _revert_mtb_products(auth_ctx, transaction) _release_payment_method_reservation(transaction, psd, payment_transaction) if payment_transaction: transaction.payment_method_transaction_data = payment_transaction.save_dict() transaction.save() raise ae transaction.purse_record_ids.append(res.id) transaction.purse_reservation_id = None if transaction.payment_method_transaction_data: try: transaction.payment_reference = psd.finalize_payment(payment_transaction, finalization_data) except PaymentServiceException as pse: logger.error("Failed to reserve payment: %s", pse) transaction.state = TransactionState.DENIED _revert_mtb_products(auth_ctx, transaction) _revert_purse_record(transaction) if payment_transaction: transaction.payment_method_transaction_data = payment_transaction.save_dict() transaction.save() raise pse _mark_purchased(transaction) # TODO purge some, transaction.payment_method_transaction_data try: if payment_transaction: transaction.payment_method_transaction_data = payment_transaction.save_dict() transaction.save() except Exception as exc: # TODO finer error handling transaction.state = TransactionState.ERROR _revert_mtb_products(auth_ctx, transaction) if transaction.purse_record_ids: # TODO revert record? pass if transaction.payment_method_transaction_data is not None: # TODO release payment reservation/payment? pass transaction.save() raise exc
[ "def finalize_transaction_receipt(auth_ctx: AuthorizationContext, transaction_id: str,\n receipt_req: Dict[str, Any]) -> Receipt:\n req = ReceiptRequest.from_dict(receipt_req)\n transaction = finalize_transaction(auth_ctx, transaction_id, req.payment_reference)\n return transaction.to_models(Receipt, transaction_id=str(transaction.id))", "def _finalize_vendor_purchase(auth_ctx: AuthorizationContext, transaction: DbTransaction, finalization_data: str) \\\n -> None:\n if not finalization_data:\n abort(400, \"Vendor finalization requires payment reference\")\n transaction.payment_reference = finalization_data\n try:\n _finalize_create_mtb_products(transaction)\n except Exception as exc:\n transaction.state = TransactionState.ISSUE_ERROR\n _revert_mtb_products(auth_ctx, transaction)\n transaction.save()\n raise exc\n _mark_purchased(transaction)\n transaction.save()", "def make_purchase(self):\n sale_type = self.get_sale_type()\n if len(self.rhslist) != 2:\n raise self.BrokerError(\"You must ask for both an amount and a price.\")\n amount = self.get_amount(self.rhslist[0])\n price = self.get_amount(self.rhslist[1], \"price\")\n character = self.caller.player.char_ob\n cost = price * amount\n if cost > character.currency:\n raise PayError(\n \"You cannot afford to pay %s when you only have %s silver.\"\n % (cost, character.currency)\n )\n material_type = None\n if sale_type == BrokeredSale.ACTION_POINTS:\n from evennia.server.models import ServerConfig\n\n disabled = ServerConfig.objects.conf(key=\"DISABLE_AP_TRANSFER\")\n if disabled:\n raise self.BrokerError(\"Action Point sales are temporarily disabled.\")\n elif sale_type == BrokeredSale.CRAFTING_MATERIALS:\n try:\n material_type = CraftingMaterialType.objects.get(name__iexact=self.lhs)\n except CraftingMaterialType.DoesNotExist:\n raise self.BrokerError(\n \"Could not find a material by the name '%s'.\" % self.lhs\n )\n if material_type.contraband:\n raise self.BrokerError(\n \"You can't put contraband on the broker! Seriously, how are you still alive?\"\n )\n character.pay_money(cost)\n dompc = self.caller.player_ob.Dominion\n sell_orders = BrokeredSale.objects.filter(\n broker_type=BrokeredSale.SALE,\n price__lte=price,\n sale_type=sale_type,\n amount__gt=0,\n crafting_material_type=material_type,\n ).order_by(\"price\")\n purchase, created = dompc.brokered_sales.get_or_create(\n price=price,\n sale_type=sale_type,\n crafting_material_type=material_type,\n broker_type=BrokeredSale.PURCHASE,\n )\n if not created:\n original = amount\n amount += purchase.amount\n else:\n original = 0\n for order in sell_orders:\n if amount > 0:\n seller = order.owner\n if (\n seller != dompc\n and order.owner.player.roster.current_account\n != self.caller.roster.current_account\n ):\n if amount > order.amount:\n buyamount = order.amount\n else:\n buyamount = amount\n order.make_purchase(dompc, buyamount)\n self.msg(\n \"You have bought %s %s from %s for %s silver.\"\n % (\n buyamount,\n order.material_name,\n seller,\n order.price * buyamount,\n )\n )\n amount -= buyamount\n if order.price < price:\n character.pay_money(-(price - order.price) * buyamount)\n\n purchase.amount = amount\n purchase.save()\n if amount == 0:\n purchase.delete()\n created = None\n if created:\n self.msg(\n \"You have placed an order for %s %s for %s silver each and %s total.\"\n % (amount, purchase.material_name, price, purchase.amount * price)\n )\n else:\n if amount > 0:\n self.msg(\n \"Added %s to the existing order of %s for %s silver each and %s total.\"\n % (original, purchase.material_name, price, purchase.amount * price)\n )", "def notify_purchased(self):\n notify(CheckoutComplete(self.old_cart))", "def renew_subscription(self, past_receipt, payment_info):\n self.payment = Payment(profile=self.invoice.profile,\n amount=self.invoice.total,\n invoice=self.invoice,\n created=timezone.now())\n self.payment.result = payment_info\n\n self.transaction_submitted = True\n\n self.payment.success = True\n self.payment.transaction = past_receipt.transaction\n self.payment.payee_full_name = \" \".join([self.invoice.profile.user.first_name, self.invoice.profile.user.last_name])\n \n self.payment.save()\n \n self.update_invoice_status(Invoice.InvoiceStatus.COMPLETE)\n\n self.create_receipts(self.invoice.order_items.all())", "def _create_from_receipt(self, debit_note, receipt, account, bucket):\n from Acquire.Accounting import DebitNote as _DebitNote\n from Acquire.Accounting import Refund as _Refund\n from Acquire.Accounting import TransactionRecord as _TransactionRecord\n from Acquire.Accounting import TransactionState as _TransactionState\n from Acquire.Accounting import Account as _Account\n from Acquire.Accounting import Receipt as _Receipt\n\n if not isinstance(debit_note, _DebitNote):\n raise TypeError(\"You can only create a CreditNote \"\n \"with a DebitNote\")\n\n if not isinstance(receipt, _Receipt):\n raise TypeError(\"You can only receipt a Receipt object: %s\"\n % str(receipt.__class__))\n\n # get the transaction behind this receipt and ensure it is in the\n # receipting state...\n transaction = _TransactionRecord.load_test_and_set(\n receipt.transaction_uid(),\n _TransactionState.RECEIPTING,\n _TransactionState.RECEIPTING, bucket=bucket)\n\n # ensure that the receipt matches the transaction...\n transaction.assert_matching_receipt(receipt)\n\n if account is None:\n account = _Account(transaction.credit_account_uid(), bucket)\n elif account.uid() != receipt.credit_account_uid():\n raise ValueError(\"The accounts do not match when crediting \"\n \"the receipt: %s versus %s\" %\n (account.uid(), receipt.credit_account_uid()))\n\n (uid, datetime) = account._credit_receipt(debit_note, receipt, bucket)\n\n self._account_uid = account.uid()\n self._debit_account_uid = debit_note.account_uid()\n self._datetime = datetime\n self._uid = uid\n self._debit_note_uid = debit_note.uid()\n self._value = debit_note.value()\n self._is_provisional = debit_note.is_provisional()\n\n if debit_note.is_provisional():\n self._receipt_by = debit_note.receipt_by()\n\n # finally(!) move the transaction into the receipted state\n _TransactionRecord.load_test_and_set(\n receipt.transaction_uid(),\n _TransactionState.RECEIPTING,\n _TransactionState.RECEIPTED, bucket=bucket)", "def free_payment(self):\n self.payment = Payment(profile=self.invoice.profile,\n amount=self.invoice.total,\n provider=self.provider,\n invoice=self.invoice,\n created=timezone.now()\n )\n self.payment.save()\n self.transaction_submitted = True\n\n self.payment.success = True\n self.payment.transaction = f\"{self.payment.uuid}-free\"\n self.payment.payee_full_name = \" \".join([self.invoice.profile.user.first_name, self.invoice.profile.user.last_name])\n self.payment.save()\n \n self.update_invoice_status(Invoice.InvoiceStatus.COMPLETE)\n\n self.create_receipts(self.invoice.order_items.all())", "def finalize(request_id, success):\n log.info('Provisioning finished.')\n\n if request_id:\n notify_end(request_id, success)\n else:\n log.info('There is not Orchestrate request_id. Skipping notification.')\n\n if success:\n cleanup()", "def create_receipts(self, order_items):\n for order_item in order_items.all():\n self.create_order_item_receipt(order_item)", "def _finalize_create_mtb_products(transaction):\n transaction.cancellable = True\n transaction.cancellable_expire = expire(shared.config.parameters.purchase.cancel_ttl_max)\n try:\n for item in transaction.items:\n item.mtb_product_ids = []\n for mp in create_mtb_products(item, transaction):\n item.mtb_product_ids.append(mp.id)\n if not mp.cancellable:\n transaction.cancellable = False\n transaction.cancellable_expire = None\n elif transaction.cancellable_expire and mp.cancellable_expire \\\n and transaction.cancellable_expire > mp.cancellable_expire:\n transaction.cancellable_expire = mp.cancellable_expire\n except Exception as exc:\n # TODO remove mtb_products\n transaction.cancellable_expire = None\n transaction.cancellable = False\n raise exc", "def void(self, actor):\n\n\tfrom authorize import AuthorizeClient, CreditCard, Address, exceptions\n\timport db.Db as Db\n try:\n try:\n cart = self.cart\n from authorize import AuthorizeClient, CreditCard, Address, exceptions\n client = AuthorizeClient(Db.auth_id, Db.auth_key, debug=False)\n transaction = client.transaction(cart['transaction_id'])\n transaction.void()\n self.log(\"Transaction {} voided.\".format(self.cart['transaction_id']), actor)\n except Exception as e:\n self.log(\"Could not void transaction {}: {}\".format(self.cart['transaction_id'], e.args[0]), actor)\n\n\t c = get_cursor()\n c.execute(\"\"\"\n update cart\n set transaction_id = null\n where cart_id = %s\"\"\",\n (self.cart['cart_id']))\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")", "def construct_purchase(self, lines, line_number):\n # Details starts after reference number, post date and transaction date\n iterator = line_number + 3\n details = \"\"\n\n while not self.is_money(lines[iterator]):\n details = details + \" \" + lines[iterator].replace('\\n', '')\n iterator = iterator + 1\n\n amount = lines[iterator]\n\n purchase = Purchase(ref_num=lines[line_number],\n transaction_date=lines[line_number + 1],\n post_date=lines[line_number + 2],\n details=details,\n amount=amount.replace(\",\", \"\"))\n\n return (purchase, iterator)", "def _release_purse_reservation(transaction: DbTransaction) -> None:\n if transaction.purse_reservation_id is not None:\n try:\n delete_reservation(transaction.wallet.purse_id, transaction.purse_reservation_id)\n transaction.purse_reservation_id = None\n transaction.save()\n except ApiException as ae:\n logger.error(\"Failed to delete purse reservation, purse=%s, reservation=%s\",\n transaction.wallet.purse_id, transaction.purse_reservation_id, exc_info=ae)", "def test_create_receipt(self):\n f = Faker('fi_FI')\n subject = \"Test email #{}\".format(randint(10000, 99999))\n email_from = 'Instanssi.org <{}>'.format(f.email())\n email_to = f.email()\n p = ReceiptParams()\n p.order_number(randint(10000, 99999))\n p.receipt_date(timezone.now())\n p.order_date(timezone.now())\n p.first_name(f.first_name())\n p.last_name(f.last_name())\n p.email(email_to)\n p.mobile(f.phone_number())\n p.telephone(f.phone_number())\n p.company(f.company())\n p.street(f.street_address())\n p.city(f.city())\n p.postal_code(f.postcode())\n p.country(f.country())\n p.transaction_url(get_url(reverse('store:ta_view', args=(\"1234abcd\",))))\n for k in range(3):\n p.add_item(\n item_id=randint(0, 999999),\n price=Decimal(randint(0, 100)),\n name=\"Test product name goes here {}\".format(k),\n amount=randint(1, 5),\n tax='0%'\n )\n\n # Just make sure everything looks like it should in the database object\n r = Receipt.create(\n mail_to=email_to,\n mail_from=email_from,\n subject=subject,\n params=p)\n self.assertEqual(r.subject, subject)\n self.assertEqual(r.mail_from, email_from)\n self.assertEqual(r.mail_to, email_to)\n self.assertIsNotNone(r.content)\n self.assertIsNotNone(r.params)\n self.assertIsNone(r.sent)\n\n # Try to load from database, make sure everything matches\n n = ReceiptParams(r.params)\n self.assertDictEqual(p.params, n.params)\n\n # Send and make sure date is set\n r.send()\n self.assertIsNotNone(r.sent)", "def conduct_transaction(self,trans,o):\n pass", "def build_receipt(quantities, inventory):\n return tuple(\n ReceiptItem(v, inventory[k].label, inventory[k].unitprice * v)\n for k, v in quantities.items()\n )", "def on_transaction_finish(self):\n print(\"Transaction successful\")", "def create_purchase(request, book, quantity, is_gift, is_priest):\n p = Purchase()\n p.status = \"pending\"\n p.book = book\n p.quantity = quantity\n\n if is_priest:\n p.price = book.priest_price\n else:\n p.price = book.get_price_for_quantity(quantity)\n\n p.total_charge = p.price * p.quantity\n\n if is_gift:\n p.generate_gift_code()\n\n if request.user.is_authenticated():\n p.buyer_user = request.user\n p.buyer_email = request.user.email\n\n p.save()\n\n return p", "def process_order(request, user, transaction=0):\n try:\n delivery_pk = request.POST.get(\"deliverySelection\")\n delivery_object = Delivery.objects.get(pk=delivery_pk)\n order = Order(user=user, delivery_address=delivery_object)\n except:\n order = Order(user=user)\n order.save()\n\n cart = Cart(request)\n for item in cart:\n if item[\"item\"].is_coins:\n add_coins(user, (item[\"item\"].coins_amount * item[\"quantity\"]), transaction)\n order_item = OrderItem(order=order, item=item[\"item\"], quantity=item[\"quantity\"],\n total_purchase_price=item[\"total_price\"])\n order_item.save()\n\n cart.clear()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create MTB products and set cancellable state
def _finalize_create_mtb_products(transaction): transaction.cancellable = True transaction.cancellable_expire = expire(shared.config.parameters.purchase.cancel_ttl_max) try: for item in transaction.items: item.mtb_product_ids = [] for mp in create_mtb_products(item, transaction): item.mtb_product_ids.append(mp.id) if not mp.cancellable: transaction.cancellable = False transaction.cancellable_expire = None elif transaction.cancellable_expire and mp.cancellable_expire \ and transaction.cancellable_expire > mp.cancellable_expire: transaction.cancellable_expire = mp.cancellable_expire except Exception as exc: # TODO remove mtb_products transaction.cancellable_expire = None transaction.cancellable = False raise exc
[ "def create_products():", "def make(self, product):\n resp = None\n product_id = product[0]\n product_name = product[1]\n logging.info('Coffee machine status checking.')\n status_table = self.statusTable\n\n logging.info('Make product: %s.', product_name)\n if not status_table['POWER']:\n resp = (CoffeeErr.WAS_OFF,\n 'Machine was turned OFF, turn it ON before making product')\n\n logging.warning('Coffee machine was off when try making product: '\n '%s.', product_name)\n else:\n # Before operation, check machine status\n if not status_table['WATER']:\n logging.error('Cannot make product %s, because no water.', product_name)\n return (CoffeeErr.NO_WATER,\n 'Cannot make product %s, because no water'\n % product_name)\n if not status_table['BEANS']:\n logging.error('Cannot make product %s, because no beans.', product_name)\n return (CoffeeErr.NO_BEANS,\n 'Cannot make product %s, because no beans'\n % product_name)\n if not status_table['TRAY']:\n logging.error('Cannot make product %s, because no tray.', product_name)\n return (CoffeeErr.NO_TRAY,\n 'Cannot make product %s, because no tray'\n % product_name)\n\n # Update timestamp\n tdelta = datetime.datetime.now() - self.coffee_timestamp\n if tdelta < datetime.timedelta(seconds=self.brew_timeout):\n logging.error('Cannot make product %s, because another coffee is brewing.', product_name)\n return (CoffeeErr.TIMEOUT, 'Cannot make product {}, because another coffee is brewing. Try again in {} seconds.'.format(\n product_name, int(self.brew_timeout - tdelta.total_seconds())))\n\n self.coffee_timestamp = datetime.datetime.now()\n\n # Send command to machine and wait for response\n raw_resp = self._control(CP.CoffeeCommand.OPERATION,\n CL.PRODUCTS[product_id])\n\n # Parse parsed response from self._control\n if raw_resp[0] == CoffeeErr.OK:\n # Parsing CORRECT. Parse Protobuf message instance.\n if raw_resp[1].type == CP.Response.OK:\n resp = (CoffeeErr.OK, '%s is prepared' % product_name)\n\n logging.info('%s is prepared.', product_name)\n elif raw_resp[1].type == CP.Response.OPERATION_ERR:\n resp = (CoffeeErr.DEFAULT, 'Cannot make %s' % product_name)\n logging.error('Cannot make %s', product_name)\n else:\n resp = self._err_format()\n else:\n # Parsing INCORRECT. Response error message from self._control\n resp = raw_resp\n\n return resp", "def create_product(self):\n if self.cursor:\n self.cursor.execute(\"INSERT INTO products(prod_name, \"\n \"prod_category, prod_price, prod_quantity,\"\n \"minimum_allowed,prod_description) \"\n \"VALUES(%s,%s,%s,%s,%s,%s)\",\n (self.data[\"prod_name\"],\n self.data[\"prod_category\"],\n self.data[\"prod_price\"],\n self.data[\"prod_quantity\"],\n self.data[\"minimum_allowed\"],\n self.data[\"prod_description\"],\n )\n )", "def _submit_cb(self):\n data = {}\n data['name'] = str(self._name.get())\n data['price'] = int(self._price.get())\n data['cost'] = int(self._cost.get())\n data['date_stocked'] = str(self._date_stocked.get())\n data['date_sold'] = str(self._date_sold.get())\n data['is_sold'] = int(self._is_sold.get())\n data['graphics_card'] = str(self._graphics_card.get())\n data['case'] = str(self._case.get())\n data['memory_type'] = str(self._memory_type.get())\n data['type'] = AbstractProduct.COMPUTER_TYPE\n\n url = 'http://localhost:5000/product_manager/products'\n response = requests.post(url, json=data)\n if response.status_code == 200:\n self._close_cb()\n else:\n messagebox.showwarning(\"Error\", \"Add Product Request Failed\")", "def create_spare_purchase_order(self,cr, uid, ids, context=None):\n print\"================================================\"\n picking_obj = self.pool.get('stock.picking')\n stock_move = self.pool.get('stock.move')\n purchase_obj = self.pool.get('purchase.order')\n rec=self.browse(cr, uid, ids)[0]\n qoute_ids = [qoute.id for qoute in rec.q_ids if qoute.state == 'done']\n if not rec.hq:\n if[ir for ir in self.browse(cr, uid, ids) if purchase_obj.search(cr, uid, [('ir_id','=',ir.id)])]:\n raise osv.except_osv(_('Purchase Order(s) Exsits !'), _('The Purchase Order(s) from this purchase requesition was alreadry created..\\n Please .. Check Purchase Orders List ..'))\n else:\n purchase_id = self.pool.get('pur.quote').make_purchase_order(cr, uid, qoute_ids)\n print\">>>>>>>>>>>>>>>>>>>>>>>>purchase_id\",purchase_id\n purchase_obj.write(cr, uid, purchase_id, {'location_id':rec.location_id.id}, context=context)\n self.write(cr, uid, ids, {'state':'wait_purchase','purchase_id':purchase_id[0]}, context=context) \n else:\n quote=self.pool.get('pur.quote').browse(cr, uid, qoute_ids)[0]\n pick_id = picking_obj.create(cr, uid , {\n 'type': 'in',\n 'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),\n 'origin': rec.name,\n 'date': rec.ir_date,\n 'executing_agency': rec.executing_agency,\n 'partner_id': quote.supplier_id.id,\n 'state': 'draft',\n 'department_id':rec.department_id.id,\n 'move_lines' : [],\n 'maintenance':True,\n })\n print\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>pick_id\",pick_id\n for pro in quote.pq_pro_ids:\n move_id = stock_move.create(cr, uid, {\n 'name':pro.name,\n 'picking_id': pick_id,\n 'product_id': pro.product_id.id,\n 'product_qty': pro.product_qty,\n 'product_uos_qty': pro.product_id.uom_id.id,\n 'product_uos': pro.product_id.uom_id.id,\n 'product_uom': pro.product_id.uom_id.id,\n 'location_id': quote.supplier_id.property_stock_supplier.id,\n 'location_dest_id': rec.location_id.id,\n 'price_unit': pro.price_unit,\n 'state': 'draft',\n 'type':'in', \n }) \n self.write(cr, uid, ids, {'picking_id':pick_id}, context=context)\n self.write(cr, uid, ids, {'state':'purchase_officer'}, context=context)\n print\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>move_id\",move_id\n return True", "def _merge(self, cr, uid, ids, context):\n\n if len(ids) <= 1:\n return False\n\n main = self.browse(cr, uid, ids[0], context)\n if main.state != 'confirmed':\n raise orm.except_orm(_('Error !'), _('Production order \"%s\" is not in \"Waiting Goods\" state.') % main.name)\n\n # Create new production, but ensure product_lines is kept empty.\n new_production_id = self.copy(cr, uid, ids[0], {\n 'product_lines': [],\n 'move_prod_id': False,\n }, context=context)\n new_production = self.browse(cr, uid, new_production_id, context)\n new_move_lines = {}\n new_move_created_ids = {}\n\n # Consider fields that are NOT required.\n new_bom_id = new_production.bom_id and new_production.bom_id.id or False\n new_routing_id = new_production.routing_id and new_production.routing_id.id or False\n new_product_uos = new_production.product_uos and new_production.product_uos.id or False\n\n product_qty = 0\n product_uos_qty = 0\n picking_ids = []\n temp_origin = []\n for production in self.browse(cr, uid, ids, context):\n if production.state != 'confirmed':\n raise orm.except_orm(_('Error !'), _('Production order \"%s\" is not in \"Waiting Goods\" state.') % production.name)\n # Check required fields are equal\n if production.product_id != new_production.product_id:\n raise orm.except_orm(_('Error !'), _('Production order \"%s\" product is different from the one in the first selected order.') % production.name)\n if production.product_uom != new_production.product_uom:\n raise orm.except_orm(_('Error !'), _('Production order \"%s\" UOM is different from the one in the first selected order.') % production.name)\n\n # Check not required fields are equal\n bom_id = production.bom_id and production.bom_id.id or False\n if bom_id != new_bom_id:\n raise orm.except_orm(_('Error !'), _('Production order \"%s\" BOM is different from the one in the first selected order.') % production.name)\n\n routing_id = production.routing_id and production.routing_id.id or False\n if routing_id != new_routing_id:\n raise orm.except_orm(_('Error !'), _('Production order \"%s\" routing is different from the one in the first selected order.%s - %s') % (production.name, production.routing_id, new_production.routing_id) )\n\n product_uos = production.product_uos and production.product_uos.id or False\n if product_uos != new_product_uos:\n raise orm.except_orm(_('Error !'), _('Production order \"%s\" UOS is different from the one in the first selected order.') % production.name)\n\n product_qty += production.product_qty\n product_uos_qty += production.product_uos_qty\n\n picking_ids.append( production.picking_id.id )\n temp_origin.append(production.origin)\n\n self.write(cr, uid, [new_production_id], {\n 'product_qty': product_qty,\n 'product_uos_qty': product_uos_qty,\n 'origin': \": \".join(temp_origin),\n }, context )\n\n # As workflow calls may commit to db we do it at the very end of the process\n # so we minimize the probabilities of problems.\n\n self.action_compute(cr, uid, [new_production_id])\n workflow = netsvc.LocalService(\"workflow\")\n workflow.trg_validate(uid, 'mrp.production', new_production_id, 'button_confirm', cr)\n\n self.write(cr, uid, ids, {\n 'merged_into_id': new_production_id,\n }, context)\n\n # Cancel 'old' production: We must cancel pickings before cancelling production orders\n for id in picking_ids:\n workflow.trg_validate(uid, 'stock.picking', id, 'button_cancel', cr)\n for id in ids:\n workflow.trg_validate(uid, 'mrp.production', id, 'button_cancel', cr)\n\n return new_production_id", "def _mark_purchased(transaction: DbTransaction) -> None:\n transaction.state = TransactionState.PURCHASED\n for item in transaction.items:\n if item.mtb_product_ids:\n for mp_id in item.mtb_product_ids:\n try:\n mtb_prod = get_db_mtb_product(None, mp_id, all=True, refresh=False)\n mtb_prod.purchased = True\n mtb_prod.save()\n except Exception as exc:\n logger.error(\"Failed to mark mtb_product {mp_id} as purchase\", exc_info=exc)", "def _create_product_backlog(self):\n def _create_story(props):\n \"\"\"Creates a ticket of type story and returns it\"\"\"\n return self.teh.create_ticket(Type.USER_STORY, props=props)\n \n r1 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '3000'})\n self.assert_true(r1.link_to(_create_story({Key.STORY_PRIORITY: 'Linear'})))\n self.assert_true(r1.link_to(_create_story({Key.STORY_PRIORITY: 'Exciter'})))\n self.assert_true(r1.link_to(_create_story({Key.STORY_PRIORITY: 'Mandatory'})))\n r2 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '1200'})\n self.assert_true(r2.link_to(_create_story({Key.STORY_PRIORITY: 'Mandatory'})))\n self.assert_true(r2.link_to(_create_story({Key.STORY_PRIORITY: 'Exciter'})))\n r3 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '2000'})\n self.assert_true(r3.link_to(_create_story({Key.STORY_PRIORITY: 'Mandatory'})))\n r4 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '800'})\n self.assert_true(r4.link_to(_create_story({Key.STORY_PRIORITY: 'Linear'})))\n r5 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '3000'})\n self.assert_true(r5.link_to(_create_story({Key.STORY_PRIORITY: 'Exciter'})))\n self.assert_true(r5.link_to(_create_story({Key.STORY_PRIORITY: 'Mandatory'})))\n product_backlog = self.bmm.get(name=\"Product Backlog\")\n self.assert_equals(len(product_backlog), 14)\n return product_backlog", "def test_create_product(self):\n access_token = self.user_token_get()\n response = self.client().post('/api/v1/products', data=self.add_product,\n content_type='application/json',\n headers=dict(Authorization=\"Bearer \" + access_token),\n )\n self.assertEqual(response.status_code, 201)", "def test_Computer_getSoftwareReleaseList_SetupResource_CancelledState(self):\n sequence_list = SequenceList()\n sequence_string = self.prepare_software_release_purchase_packing_list + '\\\n LoginDefaultUser \\\n CancelPurchasePackingList \\\n Tic \\\n Logout \\\n SlapLoginCurrentComputer \\\n CheckEmptyComputerGetSoftwareReleaseListCall \\\n SlapLogout \\\n LoginERP5TypeTestCase \\\n CheckSiteConsistency \\\n Logout \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)", "def cancel_payment_transaction(auth_ctx: AuthorizationContext, transaction: DbTransaction) -> None:\n for item in transaction.items:\n if item.mtb_product_ids:\n for mp_id in item.mtb_product_ids:\n try:\n prod = get_db_mtb_product(auth_ctx, mp_id)\n check_mtb_product_useable(auth_ctx, prod)\n except AbortException:\n abort(409, \"Transaction contains lent products\")\n state = transaction.state\n if state == TransactionState.PURCHASED or state == TransactionState.FINALIZE_PENDING \\\n or state == TransactionState.USER_INTERACTION_PENDING:\n _refund_payment(transaction, \"Cancel requested\")\n _refund_purse(transaction)\n else:\n abort(501, f\"Transaction in state {state} isn't cancellable\")\n\n for item in transaction.items:\n if item.mtb_product_ids:\n for mp_id in item.mtb_product_ids:\n try:\n cancel_mtb_product(auth_ctx, mp_id, transaction)\n except Exception as exc:\n logger.error(\"Failed to cancel mtb_product {mp_id}\", exc_info=exc)\n # TODO remove product from traveller\n transaction.cancellable = False\n transaction.cancellable_expire = None\n transaction.state = TransactionState.CANCELLED\n transaction.save()\n # TODO error handling", "def do_create_batch(self, item, transfer, lot):\n date_done = transfer.picking_id.date_done\n partner = transfer.picking_id.partner_id\n product = item.product_id\n packaging = product.packaging_ids[0]\n\n serial = self.env['estate.nursery.batch'].search_count([]) + 1\n\n batch_data = {\n 'name': \"Batch %d\" % serial,\n 'lot_id': lot.id,\n 'variety_id': item.variety_id.id,\n 'progeny_id': item.progeny_id.id,\n 'date_received': date_done,\n 'age_seed': transfer.age_seed,\n 'qty_received': item.quantity,\n 'picking_id': transfer.picking_id.id,\n 'state': 'draft'\n }\n\n # print \"Create Seed Batch. %s (v: %s, p: %s) is received at %s from %s\" % (item.product_id.name,\n # item.variety_id.name,\n # item.progeny_id.name,\n # date_done,\n # partner.name)\n\n # Check and create batch (box) and batchline (bag) for seed product.\n # if product has no package\n # create one box and one bag\n # else\n # create batch and its batchline as product package.\n # Check and create lot for current good receipt\n # print \"Create Box and Bag Packaging is %s (box: %s, bag: %s @ %s)\" % (product.name,\n # packaging.ul_container.name,\n # packaging.ul.name,\n # packaging.qty * packaging.ul_qty)\n\n return self.env['estate.nursery.batch'].create(batch_data)", "def create(self, context=None):\n values = self.obj_get_changes()\n db_bay = self.dbapi.create_bay(values)\n self._from_db_object(self, db_bay)", "def setMultipleCreateEnabled(self, state):\n self._multipleCreateEnabled = state", "def test_i_decide_not_to_buy_the_product():", "def test_product_creation(self):\n response = self.client.post(\n '/v2/products',\n data=json.dumps({\n 'name': \"Laptop\",\n 'stock': \"available\",\n 'price': 50000\n }),\n content_type=\"application/json\"\n )\n self.assertEqual(response.status_code, 201)", "def __init__(self):\n self.product = self._factory_method()", "def _new_batch_item():\n item = Batch_Item()\n item.batch_start_time = log_time\n item.batch_status = BATCH_STATUS[1] #'Running'\n item.vessel_no = self.vessel_no\n self.batch_items.append(item)", "def buy_product(self, product_id, label_summary, label_inserted_money):\n if not product_id:\n label_summary['text'] = \"Wprowadź numer produktu\"\n else:\n result = self.vending_machine.buy_product(product_id)\n displayed_text = self.get_displayed_text(result, product_id)\n if result in (vm.BAD_PRODUCT_ID, vm.NOT_ENOUGH_MONEY, vm.STOCK_SHORTAGE):\n self.show_retry_dialog_window(displayed_text, label_summary, label_inserted_money)\n else:\n self.show_transaction_summary(displayed_text, label_summary, label_inserted_money)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark products and transaction as purchased
def _mark_purchased(transaction: DbTransaction) -> None: transaction.state = TransactionState.PURCHASED for item in transaction.items: if item.mtb_product_ids: for mp_id in item.mtb_product_ids: try: mtb_prod = get_db_mtb_product(None, mp_id, all=True, refresh=False) mtb_prod.purchased = True mtb_prod.save() except Exception as exc: logger.error("Failed to mark mtb_product {mp_id} as purchase", exc_info=exc)
[ "def notify_purchased(self):\n notify(CheckoutComplete(self.old_cart))", "def make_purchase(self):\n sale_type = self.get_sale_type()\n if len(self.rhslist) != 2:\n raise self.BrokerError(\"You must ask for both an amount and a price.\")\n amount = self.get_amount(self.rhslist[0])\n price = self.get_amount(self.rhslist[1], \"price\")\n character = self.caller.player.char_ob\n cost = price * amount\n if cost > character.currency:\n raise PayError(\n \"You cannot afford to pay %s when you only have %s silver.\"\n % (cost, character.currency)\n )\n material_type = None\n if sale_type == BrokeredSale.ACTION_POINTS:\n from evennia.server.models import ServerConfig\n\n disabled = ServerConfig.objects.conf(key=\"DISABLE_AP_TRANSFER\")\n if disabled:\n raise self.BrokerError(\"Action Point sales are temporarily disabled.\")\n elif sale_type == BrokeredSale.CRAFTING_MATERIALS:\n try:\n material_type = CraftingMaterialType.objects.get(name__iexact=self.lhs)\n except CraftingMaterialType.DoesNotExist:\n raise self.BrokerError(\n \"Could not find a material by the name '%s'.\" % self.lhs\n )\n if material_type.contraband:\n raise self.BrokerError(\n \"You can't put contraband on the broker! Seriously, how are you still alive?\"\n )\n character.pay_money(cost)\n dompc = self.caller.player_ob.Dominion\n sell_orders = BrokeredSale.objects.filter(\n broker_type=BrokeredSale.SALE,\n price__lte=price,\n sale_type=sale_type,\n amount__gt=0,\n crafting_material_type=material_type,\n ).order_by(\"price\")\n purchase, created = dompc.brokered_sales.get_or_create(\n price=price,\n sale_type=sale_type,\n crafting_material_type=material_type,\n broker_type=BrokeredSale.PURCHASE,\n )\n if not created:\n original = amount\n amount += purchase.amount\n else:\n original = 0\n for order in sell_orders:\n if amount > 0:\n seller = order.owner\n if (\n seller != dompc\n and order.owner.player.roster.current_account\n != self.caller.roster.current_account\n ):\n if amount > order.amount:\n buyamount = order.amount\n else:\n buyamount = amount\n order.make_purchase(dompc, buyamount)\n self.msg(\n \"You have bought %s %s from %s for %s silver.\"\n % (\n buyamount,\n order.material_name,\n seller,\n order.price * buyamount,\n )\n )\n amount -= buyamount\n if order.price < price:\n character.pay_money(-(price - order.price) * buyamount)\n\n purchase.amount = amount\n purchase.save()\n if amount == 0:\n purchase.delete()\n created = None\n if created:\n self.msg(\n \"You have placed an order for %s %s for %s silver each and %s total.\"\n % (amount, purchase.material_name, price, purchase.amount * price)\n )\n else:\n if amount > 0:\n self.msg(\n \"Added %s to the existing order of %s for %s silver each and %s total.\"\n % (original, purchase.material_name, price, purchase.amount * price)\n )", "def save(self, *args, **kwargs):\n orders = Order.objects.filter(product=self)\n\n # We exclude completed orders\n orders = orders.exclude(state=\"COM\")\n\n for order in orders:\n order.unit_price = self.unit_price\n order.save()\n\n super().save(*args, **kwargs)", "def addPurchase(self):\n oldPurchase=self.purchase\n if self.purchase<len(self.rentPurchase):\n self.purchase+=1\n self.updateRent()\n if oldPurchase<>self.purchase: Logger.info(\"purchase added, new purchases =\" + str(self.purchase))\n return oldPurchase<>self.purchase", "def create_spare_purchase_order(self,cr, uid, ids, context=None):\n print\"================================================\"\n picking_obj = self.pool.get('stock.picking')\n stock_move = self.pool.get('stock.move')\n purchase_obj = self.pool.get('purchase.order')\n rec=self.browse(cr, uid, ids)[0]\n qoute_ids = [qoute.id for qoute in rec.q_ids if qoute.state == 'done']\n if not rec.hq:\n if[ir for ir in self.browse(cr, uid, ids) if purchase_obj.search(cr, uid, [('ir_id','=',ir.id)])]:\n raise osv.except_osv(_('Purchase Order(s) Exsits !'), _('The Purchase Order(s) from this purchase requesition was alreadry created..\\n Please .. Check Purchase Orders List ..'))\n else:\n purchase_id = self.pool.get('pur.quote').make_purchase_order(cr, uid, qoute_ids)\n print\">>>>>>>>>>>>>>>>>>>>>>>>purchase_id\",purchase_id\n purchase_obj.write(cr, uid, purchase_id, {'location_id':rec.location_id.id}, context=context)\n self.write(cr, uid, ids, {'state':'wait_purchase','purchase_id':purchase_id[0]}, context=context) \n else:\n quote=self.pool.get('pur.quote').browse(cr, uid, qoute_ids)[0]\n pick_id = picking_obj.create(cr, uid , {\n 'type': 'in',\n 'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),\n 'origin': rec.name,\n 'date': rec.ir_date,\n 'executing_agency': rec.executing_agency,\n 'partner_id': quote.supplier_id.id,\n 'state': 'draft',\n 'department_id':rec.department_id.id,\n 'move_lines' : [],\n 'maintenance':True,\n })\n print\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>pick_id\",pick_id\n for pro in quote.pq_pro_ids:\n move_id = stock_move.create(cr, uid, {\n 'name':pro.name,\n 'picking_id': pick_id,\n 'product_id': pro.product_id.id,\n 'product_qty': pro.product_qty,\n 'product_uos_qty': pro.product_id.uom_id.id,\n 'product_uos': pro.product_id.uom_id.id,\n 'product_uom': pro.product_id.uom_id.id,\n 'location_id': quote.supplier_id.property_stock_supplier.id,\n 'location_dest_id': rec.location_id.id,\n 'price_unit': pro.price_unit,\n 'state': 'draft',\n 'type':'in', \n }) \n self.write(cr, uid, ids, {'picking_id':pick_id}, context=context)\n self.write(cr, uid, ids, {'state':'purchase_officer'}, context=context)\n print\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>move_id\",move_id\n return True", "def credits_purchased(self, credits_purchased):\n\n self._credits_purchased = credits_purchased", "def create_products():", "def trigger_product_changes(self):\n old = self.TD['old'] or {}\n new = self.TD['new'] or {}\n dirty_product_ids = []\n for product_id in [ old.get('id'), new.get('id') ]:\n if not product_id: continue\n dirty_product_ids.append(product_id)\n self.mark_products_dirty(dirty_product_ids)", "def test_multiple_purchases_update_product_price(self):\n\n # Generate timestamps for correct timing of purchases and updates\n t1 = datetime.datetime.now() - datetime.timedelta(seconds=30)\n t2 = datetime.datetime.now() - datetime.timedelta(seconds=25)\n t3 = datetime.datetime.now() - datetime.timedelta(seconds=20)\n t4 = datetime.datetime.now() - datetime.timedelta(seconds=15)\n t5 = datetime.datetime.now() - datetime.timedelta(seconds=10)\n t6 = datetime.datetime.now() - datetime.timedelta(seconds=5)\n # Update product price\n pp = ProductPrice(product_id=1, price=300, admin_id=1, timestamp=t1)\n db.session.add(pp)\n db.session.commit()\n # Get the first product price\n product = Product.query.filter_by(id=1).first()\n pr_1 = copy(product.price)\n # Do first purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t2)\n db.session.add(purchase)\n db.session.commit()\n # Update product price\n pp = ProductPrice(product_id=1, price=100, admin_id=1, timestamp=t3)\n db.session.add(pp)\n db.session.commit()\n # Get the second product price\n product = Product.query.filter_by(id=1).first()\n pr_2 = copy(product.price)\n # Do second purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t4)\n db.session.add(purchase)\n # Update product price\n pp = ProductPrice(product_id=1, price=600, admin_id=1, timestamp=t5)\n db.session.add(pp)\n db.session.commit()\n # Get the third product price\n product = Product.query.filter_by(id=1).first()\n pr_3 = copy(product.price)\n # Do third purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t6)\n db.session.add(purchase)\n db.session.commit()\n\n # Check the product prices\n self.assertEqual(pr_1, 300)\n self.assertEqual(pr_2, 100)\n self.assertEqual(pr_3, 600)\n\n # Check user credit\n user = User.query.filter_by(id=1).first()\n self.assertEqual(len(user.purchases.all()), 3)\n self.assertEqual(user.credit, -(pr_1 + pr_2 + pr_3))\n\n # Check purchase prices\n purchases = Purchase.query.all()\n self.assertEqual(purchases[0].price, 300)\n self.assertEqual(purchases[1].price, 100)\n self.assertEqual(purchases[2].price, 600)", "def _buy(self):\r\n self._handleLogs(self.game.buy())\r\n self.redraw()", "def change_product_qty(self):\n Inventory = self.env['stock.inventory']\n consumption_obj = self.env['consumption.record']\n for wizard in self:\n product = wizard.product_id.with_context(location=wizard.location_id.id, lot_id=wizard.lot_id.id)\n line_data = wizard._prepare_inventory_line()\n\n if wizard.product_id.id and wizard.lot_id.id:\n inventory_filter = 'none'\n elif wizard.product_id.id:\n inventory_filter = 'product'\n else:\n inventory_filter = 'none'\n\n date_obj = datetime.strptime(wizard.date, DATE_FORMAT)\n date = date_obj.strftime(\"%Y-%m-%d 00:00:00\")\n staff_ids = []\n for line in wizard.staff_ids:\n staff_ids.append(line.id)\n inventory = Inventory.create({\n 'name': _('INV: %s') % tools.ustr(wizard.product_id.name),\n 'filter': inventory_filter,\n 'product_id': wizard.product_id.id,\n 'location_id': wizard.location_id.id,\n 'lot_id': wizard.lot_id.id,\n 'date': date,\n 'line_ids': [(0, 0, line_data)],\n })\n inventory.action_done()\n for i in inventory.move_ids:\n i.consumed = True\n consumption_obj.create({\n 'name': self.env['ir.sequence'].next_by_code('consumption.record'),\n 'product_id': wizard.product_id.id,\n 'product_tmpl_id': wizard.product_tmpl_id.id,\n 'product_variant_count': wizard.product_variant_count,\n 'new_quantity': wizard.new_quantity,\n 'lot_id': wizard.lot_id.id,\n 'location_id': wizard.location_id.id,\n 'barcode': wizard.barcode,\n 'staff_ids': [(6, 0, staff_ids)],\n 'user_id': self.env.uid,\n 'note': wizard.note,\n 'date': wizard.date,\n 'inventory_id': inventory.id\n })\n return {'type': 'ir.actions.act_window_close'}", "async def purchase(self, ctx, *, factory_name):\n author = ctx.author\n for item in self._factories[\"factory\"]:\n if item[\"name\"].lower() == factory_name.lower():\n for item2 in list(set(self.settings[\"user\"][str(author.id)][\"items\"])): \n itemamount = self.settings[\"user\"][str(author.id)][\"items\"].count(item2) \n if item[\"item\"] == item2:\n if item[\"price\"] <= itemamount:\n await ctx.send(\"You just bought a `{}`\".format(item[\"name\"]))\n for x in range(item[\"price\"]):\n self.settings[\"user\"][str(author.id)][\"items\"].remove(item2)\n self.settings[\"user\"][str(author.id)][\"items\"].append(item[\"name\"])\n dataIO.save_json(self._factories_file, self._factories)\n dataIO.save_json(self.location, self.settings)\n else:\n await ctx.send(\"You don't have enough `{}` to buy this :no_entry:\".format(item2))", "def test_insert_simple_purchase(self):\n user = User.query.filter_by(id=1).first()\n self.assertEqual(len(user.purchases.all()), 0)\n self.assertEqual(user.credit, 0)\n product = Product.query.filter_by(id=1).first()\n purchase = Purchase(user_id=user.id, product_id=product.id, amount=1)\n db.session.add(purchase)\n db.session.commit()\n user = User.query.first()\n self.assertEqual(len(user.purchases.all()), 1)\n self.assertEqual(user.credit, -product.price)", "def _finalize_create_mtb_products(transaction):\n transaction.cancellable = True\n transaction.cancellable_expire = expire(shared.config.parameters.purchase.cancel_ttl_max)\n try:\n for item in transaction.items:\n item.mtb_product_ids = []\n for mp in create_mtb_products(item, transaction):\n item.mtb_product_ids.append(mp.id)\n if not mp.cancellable:\n transaction.cancellable = False\n transaction.cancellable_expire = None\n elif transaction.cancellable_expire and mp.cancellable_expire \\\n and transaction.cancellable_expire > mp.cancellable_expire:\n transaction.cancellable_expire = mp.cancellable_expire\n except Exception as exc:\n # TODO remove mtb_products\n transaction.cancellable_expire = None\n transaction.cancellable = False\n raise exc", "def buy_item(self, item):\n try:\n self.lock(item)\n num_left = self.validate_purchase(item)\n except InvalidItemType:\n print(\"Sorry, we don't sell {}\".format(str(item)))\n except OutOfStock:\n print(\"Sorry, the item is out of stock.\")\n else:\n print(\"Purchase complete. There are {} {}s left\".format(num_left, item.get_name()))\n finally:\n self.unlock(item)", "def buy_product(self, product_id, label_summary, label_inserted_money):\n if not product_id:\n label_summary['text'] = \"Wprowadź numer produktu\"\n else:\n result = self.vending_machine.buy_product(product_id)\n displayed_text = self.get_displayed_text(result, product_id)\n if result in (vm.BAD_PRODUCT_ID, vm.NOT_ENOUGH_MONEY, vm.STOCK_SHORTAGE):\n self.show_retry_dialog_window(displayed_text, label_summary, label_inserted_money)\n else:\n self.show_transaction_summary(displayed_text, label_summary, label_inserted_money)", "def set_quantity(self, product, quantity):\r\n quantity = int(quantity)\r\n if quantity < 0:\r\n raise ValueError('Quantity must be positive when updating cart')\r\n if product in self.products:\r\n self._items_dict[product.pk].quantity = quantity\r\n if self._items_dict[product.pk].quantity < 1:\r\n del self._items_dict[product.pk]\r\n self.update_session()", "def test_purchase_revokes(self):\n # Insert some purchases\n for _ in range(1, 11):\n purchase = Purchase(user_id=1, product_id=1, amount=1)\n db.session.add(purchase)\n db.session.commit()\n user = User.query.filter(User.id == 1).first()\n self.assertEqual(user.credit, -3000)\n # Revoke some purchases\n purchases = Purchase.query.all()\n purchases[0].set_revoked(revoked=True)\n purchases[4].set_revoked(revoked=True)\n purchases[6].set_revoked(revoked=True)\n db.session.commit()\n # Check user credit\n user = User.query.filter(User.id == 1).first()\n self.assertEqual(user.credit, -2100)\n # Un-Revoke one purchase\n purchases = Purchase.query.all()\n purchases[4].set_revoked(revoked=False)\n db.session.commit()\n # Check user credit\n user = User.query.filter(User.id == 1).first()\n self.assertEqual(user.credit, -2400)", "def deliver(self, product_id, delivered_amount, time_step):\n\n undo = False\n if (time_step, product_id, -delivered_amount) in self._delivered:\n self._delivered.remove((time_step, product_id, -delivered_amount))\n undo = True\n else:\n self._delivered.append((time_step, product_id, delivered_amount))\n for request in self._requests:\n if str(request.product_id) == str(product_id):\n request.delivered += delivered_amount\n request.changed = True\n if delivered_amount == 0 and not undo:\n request.delivered = request.requested\n elif delivered_amount == 0 and undo:\n request.delivered = 0\n if self._is_fulfilled_at is None and self.is_fulfilled(time_step):\n self._is_fulfilled_at = time_step" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finalize vendor transaction and create receipt
def _finalize_vendor_purchase(auth_ctx: AuthorizationContext, transaction: DbTransaction, finalization_data: str) \ -> None: if not finalization_data: abort(400, "Vendor finalization requires payment reference") transaction.payment_reference = finalization_data try: _finalize_create_mtb_products(transaction) except Exception as exc: transaction.state = TransactionState.ISSUE_ERROR _revert_mtb_products(auth_ctx, transaction) transaction.save() raise exc _mark_purchased(transaction) transaction.save()
[ "def finalize_transaction_receipt(auth_ctx: AuthorizationContext, transaction_id: str,\n receipt_req: Dict[str, Any]) -> Receipt:\n req = ReceiptRequest.from_dict(receipt_req)\n transaction = finalize_transaction(auth_ctx, transaction_id, req.payment_reference)\n return transaction.to_models(Receipt, transaction_id=str(transaction.id))", "def _finalize_traveller_purchase(auth_ctx: AuthorizationContext, transaction: DbTransaction, finalization_data: str) \\\n -> None:\n if transaction.payment_method_transaction_data:\n payment_method = get_wallet_payment_method(transaction.wallet, transaction.payment_method_id)\n psd = get_payment_service_driver(get_payment_service(payment_method.payment_service_id))\n payment_transaction = psd.create_payment_service_transaction(transaction.payment_method_transaction_data)\n try:\n psd.reserve_payment(payment_transaction)\n except PaymentServiceException as pse:\n logger.error(\"Failed to reserve payment: %s\", pse)\n transaction.state = TransactionState.CANCELLED if isinstance(pse, UserInteractionCanceledException) \\\n else TransactionState.DENIED\n _release_purse_reservation(transaction)\n transaction.save()\n raise pse\n else:\n psd = None\n payment_transaction = None\n try:\n _finalize_create_mtb_products(transaction)\n except Exception as exc:\n logger.error(\"Failed to create product: %s\", exc)\n transaction.state = TransactionState.ISSUE_ERROR\n _release_purse_reservation(transaction)\n _release_payment_method_reservation(transaction, psd, payment_transaction)\n _revert_mtb_products(auth_ctx, transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise exc\n if transaction.purse_reservation_id is not None:\n record = NewPurseRecord(transaction_id=str(transaction.id),\n reservation_id=transaction.purse_reservation_id,\n amount=-float(transaction.purse_amount),\n refundable=False)\n try:\n res = purse_create_record(transaction.wallet.purse_id, record)\n except ApiException as ae:\n logger.error(\"Failed to record purse transaction: %s\", ae)\n transaction.state = TransactionState.DENIED\n _revert_mtb_products(auth_ctx, transaction)\n _release_payment_method_reservation(transaction, psd, payment_transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise ae\n transaction.purse_record_ids.append(res.id)\n transaction.purse_reservation_id = None\n if transaction.payment_method_transaction_data:\n try:\n transaction.payment_reference = psd.finalize_payment(payment_transaction, finalization_data)\n except PaymentServiceException as pse:\n logger.error(\"Failed to reserve payment: %s\", pse)\n transaction.state = TransactionState.DENIED\n _revert_mtb_products(auth_ctx, transaction)\n _revert_purse_record(transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise pse\n _mark_purchased(transaction)\n # TODO purge some, transaction.payment_method_transaction_data\n try:\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n except Exception as exc:\n # TODO finer error handling\n transaction.state = TransactionState.ERROR\n _revert_mtb_products(auth_ctx, transaction)\n if transaction.purse_record_ids:\n # TODO revert record?\n pass\n if transaction.payment_method_transaction_data is not None:\n # TODO release payment reservation/payment?\n pass\n transaction.save()\n raise exc", "def void(self, actor):\n\n\tfrom authorize import AuthorizeClient, CreditCard, Address, exceptions\n\timport db.Db as Db\n try:\n try:\n cart = self.cart\n from authorize import AuthorizeClient, CreditCard, Address, exceptions\n client = AuthorizeClient(Db.auth_id, Db.auth_key, debug=False)\n transaction = client.transaction(cart['transaction_id'])\n transaction.void()\n self.log(\"Transaction {} voided.\".format(self.cart['transaction_id']), actor)\n except Exception as e:\n self.log(\"Could not void transaction {}: {}\".format(self.cart['transaction_id'], e.args[0]), actor)\n\n\t c = get_cursor()\n c.execute(\"\"\"\n update cart\n set transaction_id = null\n where cart_id = %s\"\"\",\n (self.cart['cart_id']))\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")", "def finalize(request_id, success):\n log.info('Provisioning finished.')\n\n if request_id:\n notify_end(request_id, success)\n else:\n log.info('There is not Orchestrate request_id. Skipping notification.')\n\n if success:\n cleanup()", "def make_purchase(self):\n sale_type = self.get_sale_type()\n if len(self.rhslist) != 2:\n raise self.BrokerError(\"You must ask for both an amount and a price.\")\n amount = self.get_amount(self.rhslist[0])\n price = self.get_amount(self.rhslist[1], \"price\")\n character = self.caller.player.char_ob\n cost = price * amount\n if cost > character.currency:\n raise PayError(\n \"You cannot afford to pay %s when you only have %s silver.\"\n % (cost, character.currency)\n )\n material_type = None\n if sale_type == BrokeredSale.ACTION_POINTS:\n from evennia.server.models import ServerConfig\n\n disabled = ServerConfig.objects.conf(key=\"DISABLE_AP_TRANSFER\")\n if disabled:\n raise self.BrokerError(\"Action Point sales are temporarily disabled.\")\n elif sale_type == BrokeredSale.CRAFTING_MATERIALS:\n try:\n material_type = CraftingMaterialType.objects.get(name__iexact=self.lhs)\n except CraftingMaterialType.DoesNotExist:\n raise self.BrokerError(\n \"Could not find a material by the name '%s'.\" % self.lhs\n )\n if material_type.contraband:\n raise self.BrokerError(\n \"You can't put contraband on the broker! Seriously, how are you still alive?\"\n )\n character.pay_money(cost)\n dompc = self.caller.player_ob.Dominion\n sell_orders = BrokeredSale.objects.filter(\n broker_type=BrokeredSale.SALE,\n price__lte=price,\n sale_type=sale_type,\n amount__gt=0,\n crafting_material_type=material_type,\n ).order_by(\"price\")\n purchase, created = dompc.brokered_sales.get_or_create(\n price=price,\n sale_type=sale_type,\n crafting_material_type=material_type,\n broker_type=BrokeredSale.PURCHASE,\n )\n if not created:\n original = amount\n amount += purchase.amount\n else:\n original = 0\n for order in sell_orders:\n if amount > 0:\n seller = order.owner\n if (\n seller != dompc\n and order.owner.player.roster.current_account\n != self.caller.roster.current_account\n ):\n if amount > order.amount:\n buyamount = order.amount\n else:\n buyamount = amount\n order.make_purchase(dompc, buyamount)\n self.msg(\n \"You have bought %s %s from %s for %s silver.\"\n % (\n buyamount,\n order.material_name,\n seller,\n order.price * buyamount,\n )\n )\n amount -= buyamount\n if order.price < price:\n character.pay_money(-(price - order.price) * buyamount)\n\n purchase.amount = amount\n purchase.save()\n if amount == 0:\n purchase.delete()\n created = None\n if created:\n self.msg(\n \"You have placed an order for %s %s for %s silver each and %s total.\"\n % (amount, purchase.material_name, price, purchase.amount * price)\n )\n else:\n if amount > 0:\n self.msg(\n \"Added %s to the existing order of %s for %s silver each and %s total.\"\n % (original, purchase.material_name, price, purchase.amount * price)\n )", "def send_receipt(to, cleaner_name, receipt_id):\n\tmessage = (\"{0} has finished cleaning your place! {1}/receipt/{2}\".format(cleaner_name, DOMAIN_NAME, receipt_id))\n\tsend_SMS(to, message)", "def free_payment(self):\n self.payment = Payment(profile=self.invoice.profile,\n amount=self.invoice.total,\n provider=self.provider,\n invoice=self.invoice,\n created=timezone.now()\n )\n self.payment.save()\n self.transaction_submitted = True\n\n self.payment.success = True\n self.payment.transaction = f\"{self.payment.uuid}-free\"\n self.payment.payee_full_name = \" \".join([self.invoice.profile.user.first_name, self.invoice.profile.user.last_name])\n self.payment.save()\n \n self.update_invoice_status(Invoice.InvoiceStatus.COMPLETE)\n\n self.create_receipts(self.invoice.order_items.all())", "def run_transaction():\n if not request.json:\n abort(400)\n args = request.json\n query = {'_id': ObjectId(args['order_id'])}\n order = mongo_cli.db['orders'].find_one(query)\n if order is None:\n abort(404)\n client = mongo_cli.db['clients'].find_one({'_id': order['client_id']})\n\n credit_card_details = factory.CreditCardData(\n CardNumber=args['credit_card_number'],\n CardExpiration=args['credit_card_expiry_month'] + args['credit_card_expiry_year'],\n CardCode=args['credit_card_cvv'],\n AvsZip=order['shipping_address']['zip'],\n AvsStreet=order['shipping_address']['street_1'] + ' ' + order['shipping_address']['street_2']\n )\n transaction_details = factory.TransactionDetail(\n Description=order['type'],\n Amount=order['total_price'],\n Invoice=order['order_number']\n )\n req_data = factory.TransactionRequestObject(\n AccountHolder=f\"{client['first_name']} {client['last_name']}\",\n Details=transaction_details,\n CreditCardData=credit_card_details\n )\n token = build_token()\n response = usaepay_client.service.runTransaction(token, req_data)\n results = {}\n if response is None:\n results['message'] = 'Error, No Response was received'\n return jsonify(results), 204\n if response.ResultCode == \"A\":\n results['message'] = 'Transaction Approved'\n results['reference_number'] = str(response.RefNum)\n results['result_code'] = str(response.ResultCode)\n return jsonify(results), 200\n if response.ResultCode == \"D\":\n results['message'] = ('Transaction Declined, Reason: {}'\n .format(response.Error))\n results['result_code'] = str(response.ResultCode)\n return jsonify(results), 400\n else:\n results['message'] = ('Transaction Error, Reason: {}'\n .format(response.Error))\n results['result_code'] = str(response.ResultCode)\n return jsonify(results), 400", "def on_transaction_finish(self):\n print(\"Transaction successful\")", "def complete_order(self):\n\t\tprint()\n\t\tprint('Complete Order')\n\n\t\t# Init Electronic\n\t\tself.order.pl_init(self.serial_number, self.path, self.file_name)", "def create_budget_confirmation_invoice(self):\n confirmation_pool = self.env['account.budget.confirmation']\n currency_pool = self.env['res.currency']\n new_confirm_id = False\n flag = False\n for invoice in self:\n # v9: if invoice.invoice_type in ('purchase','sale'): super(account_invoice,self).compute_tax(cr, uid, [invoice.id], context=context)\n if invoice.journal_id.type == 'purchase':\n for invoice_line in invoice.invoice_line_ids:\n if invoice_line.account_budget_required == True:\n # v9: TEST ME if invoice_line.account_id and invoice_line.account_id.user_type_id.analytic_wk:\n if invoice_line.account_id:\n total_amount = invoice.company_id.currency_id.with_context(date=invoice.date).compute(\n invoice_line.price_subtotal, invoice.currency_id)\n amount = invoice.company_id.currency_id.with_context(date=invoice.date).compute(\n invoice_line.price_subtotal,invoice.currency_id)\n val = {\n 'reference': invoice.number,\n 'partner_id': invoice.partner_id.id,\n 'account_id': invoice_line.account_id.id,\n 'date': invoice.date_invoice,\n 'analytic_account_id': invoice_line.account_analytic_id and invoice_line.account_analytic_id.id,\n 'amount': total_amount or amount,\n 'residual_amount': total_amount or amount,\n #'type': self._context.get('type', 'other'),\n 'type': 'other',\n 'note': invoice_line.name or '/',\n\n }\n\n if invoice_line.invoice_line_tax_ids:\n val_amount = val.get('amount', 0)\n net_amount = 0\n total = 0\n tax_amount = 0\n tax_info = invoice_line.invoice_line_tax_ids.compute_all(invoice_line.price_unit, invoice.currency_id,\n invoice_line.quantity, invoice_line.product_id,\n invoice.partner_id)\n total += tax_info.get('total_included', 0.0)\n tax_amount += sum([t.get('amount', 0.0) for t in tax_info.get('taxes', False)])\n net_amount = tax_amount + val_amount\n val.update({'amount': net_amount or amount, })\n new_confirm_id = False\n\n if invoice_line.budget_confirm_id:\n flag = True\n # confirmation_pool.write([invoice_line.budget_confirm_id.id], val)\n # new_confirm_id = invoice_line.budget_confirm_id.id\n elif not invoice_line.budget_confirm_id:\n flag = True\n confirm = confirmation_pool.create(val)\n new_confirm_id = int(confirm)\n invoice_line.write({'budget_confirm_id': new_confirm_id})\n # v11 condition is worng ???\n # if new_confirm_id and not invoice.company_id.auto_budget:#v9: test me\n if new_confirm_id and invoice.company_id.auto_budget:\n confirmation_pool.browse(new_confirm_id).action_cancel_draft()\n confirmation_pool.browse(new_confirm_id).budget_complete()\n confirmation_pool.browse(new_confirm_id).check_budget_invoice()\n\n return flag", "def _finalize_create_mtb_products(transaction):\n transaction.cancellable = True\n transaction.cancellable_expire = expire(shared.config.parameters.purchase.cancel_ttl_max)\n try:\n for item in transaction.items:\n item.mtb_product_ids = []\n for mp in create_mtb_products(item, transaction):\n item.mtb_product_ids.append(mp.id)\n if not mp.cancellable:\n transaction.cancellable = False\n transaction.cancellable_expire = None\n elif transaction.cancellable_expire and mp.cancellable_expire \\\n and transaction.cancellable_expire > mp.cancellable_expire:\n transaction.cancellable_expire = mp.cancellable_expire\n except Exception as exc:\n # TODO remove mtb_products\n transaction.cancellable_expire = None\n transaction.cancellable = False\n raise exc", "def conduct_transaction(self,trans,o):\n pass", "def test_handle_transaction_receipt_ii(self):\n # setup\n ledger_api_dialogue = cast(\n LedgerApiDialogue,\n self.prepare_skill_dialogue(\n dialogues=self.ledger_api_dialogues,\n messages=self.list_of_ledger_api_messages[:5],\n counterparty=LEDGER_API_ADDRESS,\n ),\n )\n fipa_dialogue = cast(\n FipaDialogue,\n self.prepare_skill_dialogue(\n dialogues=self.fipa_dialogues,\n messages=self.list_of_fipa_messages[:4],\n is_agent_to_agent_messages=True,\n ),\n )\n ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue\n\n fipa_dialogue._incoming_messages = []\n\n fipa_dialogue.terms = self.terms\n incoming_message = cast(\n LedgerApiMessage,\n self.build_incoming_message_for_skill_dialogue(\n dialogue=ledger_api_dialogue,\n performative=LedgerApiMessage.Performative.TRANSACTION_RECEIPT,\n transaction_receipt=self.transaction_receipt,\n ),\n )\n\n # operation\n with patch.object(\n self.ledger_api_handler.context.behaviours.transaction, \"finish_processing\"\n ):\n with patch.object(LedgerApis, \"is_transaction_settled\", return_value=True):\n with patch.object(self.logger, \"log\"):\n with pytest.raises(\n ValueError, match=\"Could not retrieve last fipa message\"\n ):\n self.ledger_api_handler.handle(incoming_message)\n\n # after\n self.assert_quantity_in_outbox(0)", "def completetx(self, tx):\n\n for txin in tx.inputs():\n # find matching inputs\n if txin['address'] != self.address:\n continue\n sig = txin['signatures'][0]\n if not sig:\n continue\n sig = bytes.fromhex(sig)\n\n if txin['scriptSig'] == self.dummy_scriptsig_redeem:\n script = [\n len(sig), sig,\n len(self.redeemscript), self.redeemscript,\n ]\n else:\n # already completed..?\n continue\n txin['scriptSig'] = joinbytes(script).hex()\n # need to update the raw, otherwise weird stuff happens.\n tx.raw = tx.serialize()", "def done(self, cr, uid, ids, context={}):\n '''payment_enrich_lines_obj = self.pool.get('payment.enrich.lines')\n for fuel_plan in self.browse(cr, uid, ids,context):\n if not fuel_plan.quantity_ids:\n raise osv.except_osv(_('ValidateError'), _('In Order To Complete Fuel Plan Order You need To Enter Fuel Quantities!'))\n if fuel_plan.payment_method == 'enrich':\n details = 'Fixed Fuel Plan No:'+fuel_plan.name\n payment_enrich_lines_obj.create(cr, uid, {\n 'enrich_id':fuel_plan.enrich_id.id,\n 'cost': fuel_plan.cost,\n 'date':time.strftime('%Y-%m-%d'),\n 'state':'draft',\n 'name':details,\n 'department_id':fuel_plan.department_id.id,\n 'model_id':'fuel.plan',\n }, context=context)\n copy_attachments(self,cr,uid,[fuel_plan.id],'fuel.plan',fuel_plan.enrich_id.id,'payment.enrich', context)\n elif fuel_plan.payment_method == 'voucher': \n self.create_voucher(cr,uid,ids,context)'''\n return self.write(cr, uid, ids, {'state':'done'}, context=context)", "def notify_purchased(self):\n notify(CheckoutComplete(self.old_cart))", "def done(self,cr,uid,ids,context={}):\n for fees in self.browse(cr, uid, ids, context=context):\n contract = fees.contract_id\n voucher_id = super(contract_co_operative_fees, self).create_invoice(cr, uid, ids, context)\n fees.write({'state':'done'})\n \"\"\"user_obj = self.pool.get('res.users')\n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n\t\n for fees in self.browse(cr, uid, ids, context=context):\n\t \n contract = fees.contract_id\n \n voucher_id = voucher_obj.create(cr, uid, {\n 'contract_id': fees.contract_id.id,\n 'amount': fees.fees_amount,\n 'type': 'purchase',\n 'date': time.strftime('%Y-%m-%d'),\n 'partner_id': contract.partner_id.id , \n #'journal_id': 67,\n 'reference': contract.name+\"/\"+ fees.name,\n 'state': 'draft',\n # 'name':'Project fees:'+fees.name +'project :'+contract.department_id.name,\n # 'currency_id':contract.currency_id.id,\n })\n voucher_obj.write(cr,uid,[voucher_id],{'amount': fees.fees_amount}, context=context)\n \n \n vocher_line_id = voucher_line_obj.create(cr, uid, {\n 'amount': fees.fees_amount,\n 'voucher_id': voucher_id,\n 'type': 'dr',\n 'account_id': contract.contract_account.id,\n 'name': fees.name,\n })\n contract.write({'voucher_ids': [(4, voucher_id)]}, context=context)\n fees.write({'state':'done'})\n\t print \"voucher id:\",voucher_id\n\t print \"amount:\",fees.fees_amount\n\n \n Workflow function to change the state to confirm.\n \n @return: True\n \"\"\"\n currency_obj = self.pool.get('res.currency')\n new_amount = 0.0\n for fees in self.browse(cr, uid, ids):\n \n contract_currency = contract.currency_id.id\n euro_id = currency_obj.search(cr, uid, [('name','=','EUR')],limit=1)\n curren = currency_obj.browse(cr, uid, euro_id)\n new_amount = currency_obj.compute(cr, uid, contract_currency, curren[0].id, fees.fees_amount, fees.fees_date) \n all_amount = contract.fees_total_amount + fees.fees_amount\n if all_amount > contract.contract_amount :\n raise osv.except_osv(_('Amount exceed !'), _('The total fees amount well be more than the contract amount ..'))\n else:\n contract.write({'fees_total_amount': all_amount}) \n self.write(cr,uid,ids,{'fees_amount_in_euro':new_amount })\n\n return True", "def create_work_order_receipt(self, wo_params,\n client_private_key):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finalize transaction and create receipt
def finalize_transaction_receipt(auth_ctx: AuthorizationContext, transaction_id: str, receipt_req: Dict[str, Any]) -> Receipt: req = ReceiptRequest.from_dict(receipt_req) transaction = finalize_transaction(auth_ctx, transaction_id, req.payment_reference) return transaction.to_models(Receipt, transaction_id=str(transaction.id))
[ "def _finalize_traveller_purchase(auth_ctx: AuthorizationContext, transaction: DbTransaction, finalization_data: str) \\\n -> None:\n if transaction.payment_method_transaction_data:\n payment_method = get_wallet_payment_method(transaction.wallet, transaction.payment_method_id)\n psd = get_payment_service_driver(get_payment_service(payment_method.payment_service_id))\n payment_transaction = psd.create_payment_service_transaction(transaction.payment_method_transaction_data)\n try:\n psd.reserve_payment(payment_transaction)\n except PaymentServiceException as pse:\n logger.error(\"Failed to reserve payment: %s\", pse)\n transaction.state = TransactionState.CANCELLED if isinstance(pse, UserInteractionCanceledException) \\\n else TransactionState.DENIED\n _release_purse_reservation(transaction)\n transaction.save()\n raise pse\n else:\n psd = None\n payment_transaction = None\n try:\n _finalize_create_mtb_products(transaction)\n except Exception as exc:\n logger.error(\"Failed to create product: %s\", exc)\n transaction.state = TransactionState.ISSUE_ERROR\n _release_purse_reservation(transaction)\n _release_payment_method_reservation(transaction, psd, payment_transaction)\n _revert_mtb_products(auth_ctx, transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise exc\n if transaction.purse_reservation_id is not None:\n record = NewPurseRecord(transaction_id=str(transaction.id),\n reservation_id=transaction.purse_reservation_id,\n amount=-float(transaction.purse_amount),\n refundable=False)\n try:\n res = purse_create_record(transaction.wallet.purse_id, record)\n except ApiException as ae:\n logger.error(\"Failed to record purse transaction: %s\", ae)\n transaction.state = TransactionState.DENIED\n _revert_mtb_products(auth_ctx, transaction)\n _release_payment_method_reservation(transaction, psd, payment_transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise ae\n transaction.purse_record_ids.append(res.id)\n transaction.purse_reservation_id = None\n if transaction.payment_method_transaction_data:\n try:\n transaction.payment_reference = psd.finalize_payment(payment_transaction, finalization_data)\n except PaymentServiceException as pse:\n logger.error(\"Failed to reserve payment: %s\", pse)\n transaction.state = TransactionState.DENIED\n _revert_mtb_products(auth_ctx, transaction)\n _revert_purse_record(transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise pse\n _mark_purchased(transaction)\n # TODO purge some, transaction.payment_method_transaction_data\n try:\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n except Exception as exc:\n # TODO finer error handling\n transaction.state = TransactionState.ERROR\n _revert_mtb_products(auth_ctx, transaction)\n if transaction.purse_record_ids:\n # TODO revert record?\n pass\n if transaction.payment_method_transaction_data is not None:\n # TODO release payment reservation/payment?\n pass\n transaction.save()\n raise exc", "def commit(self):\n\t\tdel self.transaction_log[:] \n\t\tself.transaction_mode = False", "def commit_transaction(self):\n self.tx.commit()", "def _finalize_vendor_purchase(auth_ctx: AuthorizationContext, transaction: DbTransaction, finalization_data: str) \\\n -> None:\n if not finalization_data:\n abort(400, \"Vendor finalization requires payment reference\")\n transaction.payment_reference = finalization_data\n try:\n _finalize_create_mtb_products(transaction)\n except Exception as exc:\n transaction.state = TransactionState.ISSUE_ERROR\n _revert_mtb_products(auth_ctx, transaction)\n transaction.save()\n raise exc\n _mark_purchased(transaction)\n transaction.save()", "def on_transaction_finish(self):\n print(\"Transaction successful\")", "def tpc_finish(transaction, func = lambda tid: None):", "def create_receipts(self, order_items):\n for order_item in order_items.all():\n self.create_order_item_receipt(order_item)", "def _create_from_receipt(self, debit_note, receipt, account, bucket):\n from Acquire.Accounting import DebitNote as _DebitNote\n from Acquire.Accounting import Refund as _Refund\n from Acquire.Accounting import TransactionRecord as _TransactionRecord\n from Acquire.Accounting import TransactionState as _TransactionState\n from Acquire.Accounting import Account as _Account\n from Acquire.Accounting import Receipt as _Receipt\n\n if not isinstance(debit_note, _DebitNote):\n raise TypeError(\"You can only create a CreditNote \"\n \"with a DebitNote\")\n\n if not isinstance(receipt, _Receipt):\n raise TypeError(\"You can only receipt a Receipt object: %s\"\n % str(receipt.__class__))\n\n # get the transaction behind this receipt and ensure it is in the\n # receipting state...\n transaction = _TransactionRecord.load_test_and_set(\n receipt.transaction_uid(),\n _TransactionState.RECEIPTING,\n _TransactionState.RECEIPTING, bucket=bucket)\n\n # ensure that the receipt matches the transaction...\n transaction.assert_matching_receipt(receipt)\n\n if account is None:\n account = _Account(transaction.credit_account_uid(), bucket)\n elif account.uid() != receipt.credit_account_uid():\n raise ValueError(\"The accounts do not match when crediting \"\n \"the receipt: %s versus %s\" %\n (account.uid(), receipt.credit_account_uid()))\n\n (uid, datetime) = account._credit_receipt(debit_note, receipt, bucket)\n\n self._account_uid = account.uid()\n self._debit_account_uid = debit_note.account_uid()\n self._datetime = datetime\n self._uid = uid\n self._debit_note_uid = debit_note.uid()\n self._value = debit_note.value()\n self._is_provisional = debit_note.is_provisional()\n\n if debit_note.is_provisional():\n self._receipt_by = debit_note.receipt_by()\n\n # finally(!) move the transaction into the receipted state\n _TransactionRecord.load_test_and_set(\n receipt.transaction_uid(),\n _TransactionState.RECEIPTING,\n _TransactionState.RECEIPTED, bucket=bucket)", "def finalize(request_id, success):\n log.info('Provisioning finished.')\n\n if request_id:\n notify_end(request_id, success)\n else:\n log.info('There is not Orchestrate request_id. Skipping notification.')\n\n if success:\n cleanup()", "def conduct_transaction(self,trans,o):\n pass", "def test_create_receipt(self):\n f = Faker('fi_FI')\n subject = \"Test email #{}\".format(randint(10000, 99999))\n email_from = 'Instanssi.org <{}>'.format(f.email())\n email_to = f.email()\n p = ReceiptParams()\n p.order_number(randint(10000, 99999))\n p.receipt_date(timezone.now())\n p.order_date(timezone.now())\n p.first_name(f.first_name())\n p.last_name(f.last_name())\n p.email(email_to)\n p.mobile(f.phone_number())\n p.telephone(f.phone_number())\n p.company(f.company())\n p.street(f.street_address())\n p.city(f.city())\n p.postal_code(f.postcode())\n p.country(f.country())\n p.transaction_url(get_url(reverse('store:ta_view', args=(\"1234abcd\",))))\n for k in range(3):\n p.add_item(\n item_id=randint(0, 999999),\n price=Decimal(randint(0, 100)),\n name=\"Test product name goes here {}\".format(k),\n amount=randint(1, 5),\n tax='0%'\n )\n\n # Just make sure everything looks like it should in the database object\n r = Receipt.create(\n mail_to=email_to,\n mail_from=email_from,\n subject=subject,\n params=p)\n self.assertEqual(r.subject, subject)\n self.assertEqual(r.mail_from, email_from)\n self.assertEqual(r.mail_to, email_to)\n self.assertIsNotNone(r.content)\n self.assertIsNotNone(r.params)\n self.assertIsNone(r.sent)\n\n # Try to load from database, make sure everything matches\n n = ReceiptParams(r.params)\n self.assertDictEqual(p.params, n.params)\n\n # Send and make sure date is set\n r.send()\n self.assertIsNotNone(r.sent)", "def void(self, actor):\n\n\tfrom authorize import AuthorizeClient, CreditCard, Address, exceptions\n\timport db.Db as Db\n try:\n try:\n cart = self.cart\n from authorize import AuthorizeClient, CreditCard, Address, exceptions\n client = AuthorizeClient(Db.auth_id, Db.auth_key, debug=False)\n transaction = client.transaction(cart['transaction_id'])\n transaction.void()\n self.log(\"Transaction {} voided.\".format(self.cart['transaction_id']), actor)\n except Exception as e:\n self.log(\"Could not void transaction {}: {}\".format(self.cart['transaction_id'], e.args[0]), actor)\n\n\t c = get_cursor()\n c.execute(\"\"\"\n update cart\n set transaction_id = null\n where cart_id = %s\"\"\",\n (self.cart['cart_id']))\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")", "def commit(self):\n self._check_closed()\n self._trans_id = self.__session.send_commit()", "def create_work_order_receipt(self, wo_params,\n client_private_key):\n pass", "def test_handle_transaction_receipt_ii(self):\n # setup\n ledger_api_dialogue = cast(\n LedgerApiDialogue,\n self.prepare_skill_dialogue(\n dialogues=self.ledger_api_dialogues,\n messages=self.list_of_ledger_api_messages[:5],\n counterparty=LEDGER_API_ADDRESS,\n ),\n )\n fipa_dialogue = cast(\n FipaDialogue,\n self.prepare_skill_dialogue(\n dialogues=self.fipa_dialogues,\n messages=self.list_of_fipa_messages[:4],\n is_agent_to_agent_messages=True,\n ),\n )\n ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue\n\n fipa_dialogue._incoming_messages = []\n\n fipa_dialogue.terms = self.terms\n incoming_message = cast(\n LedgerApiMessage,\n self.build_incoming_message_for_skill_dialogue(\n dialogue=ledger_api_dialogue,\n performative=LedgerApiMessage.Performative.TRANSACTION_RECEIPT,\n transaction_receipt=self.transaction_receipt,\n ),\n )\n\n # operation\n with patch.object(\n self.ledger_api_handler.context.behaviours.transaction, \"finish_processing\"\n ):\n with patch.object(LedgerApis, \"is_transaction_settled\", return_value=True):\n with patch.object(self.logger, \"log\"):\n with pytest.raises(\n ValueError, match=\"Could not retrieve last fipa message\"\n ):\n self.ledger_api_handler.handle(incoming_message)\n\n # after\n self.assert_quantity_in_outbox(0)", "def _finalize_create_mtb_products(transaction):\n transaction.cancellable = True\n transaction.cancellable_expire = expire(shared.config.parameters.purchase.cancel_ttl_max)\n try:\n for item in transaction.items:\n item.mtb_product_ids = []\n for mp in create_mtb_products(item, transaction):\n item.mtb_product_ids.append(mp.id)\n if not mp.cancellable:\n transaction.cancellable = False\n transaction.cancellable_expire = None\n elif transaction.cancellable_expire and mp.cancellable_expire \\\n and transaction.cancellable_expire > mp.cancellable_expire:\n transaction.cancellable_expire = mp.cancellable_expire\n except Exception as exc:\n # TODO remove mtb_products\n transaction.cancellable_expire = None\n transaction.cancellable = False\n raise exc", "def send_receipt(to, cleaner_name, receipt_id):\n\tmessage = (\"{0} has finished cleaning your place! {1}/receipt/{2}\".format(cleaner_name, DOMAIN_NAME, receipt_id))\n\tsend_SMS(to, message)", "def create_transaction(info_and_status):\n # get info from the inventory\n list_list = list_inventory()\n for item in list_list:\n if item[0].lower() == info_and_status[0]:\n item_list = item\n # set info for use\n name = info_and_status[0]\n paid = str((int(item_list[3]) * .07) + int(item_list[3]))\n deposit = str((int(item_list[-1]) / 10))\n # get revenue\n with open(\"current_revenue.txt\", \"r\") as file:\n revenue = file.read()\n # convert revenue from string\n if float(revenue).is_integer():\n revenue = int(revenue)\n else:\n revenue = float(revenue)\n\n # create string\n if info_and_status[1] == \"renting\":\n if float(paid).is_integer():\n revenue += int(paid)\n else:\n revenue += float(paid)\n trans_string = \"Rented: \" + name + \"; Paid: \" + paid + \"; Deposit: \" + deposit\n trans_string += \"........Total Revenue: \" + str(revenue)\n\n\n elif info_and_status[1] == \"returning\":\n trans_string = \"Returned: \" + name\n trans_string += \"........Total Revenue: \" + str(revenue)\n\n elif info_and_status[1] == \"replacing\":\n if float(deposit).is_integer():\n revenue += int(float(deposit))\n else:\n revenue += float(deposit)\n trans_string = \"Replaced: \" + name + \"; Paid: \" + deposit\n trans_string += \"........Total Revenue: \" + str(revenue)\n\n else:\n return \"Error!\"\n\n # write revenue to file\n with open(\"current_revenue.txt\", \"w\") as file:\n file.write(str(revenue))\n\n return trans_string", "def CreateReceiptRule(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancel transaction and refund payments and invalidate products
def cancel_payment_transaction(auth_ctx: AuthorizationContext, transaction: DbTransaction) -> None: for item in transaction.items: if item.mtb_product_ids: for mp_id in item.mtb_product_ids: try: prod = get_db_mtb_product(auth_ctx, mp_id) check_mtb_product_useable(auth_ctx, prod) except AbortException: abort(409, "Transaction contains lent products") state = transaction.state if state == TransactionState.PURCHASED or state == TransactionState.FINALIZE_PENDING \ or state == TransactionState.USER_INTERACTION_PENDING: _refund_payment(transaction, "Cancel requested") _refund_purse(transaction) else: abort(501, f"Transaction in state {state} isn't cancellable") for item in transaction.items: if item.mtb_product_ids: for mp_id in item.mtb_product_ids: try: cancel_mtb_product(auth_ctx, mp_id, transaction) except Exception as exc: logger.error("Failed to cancel mtb_product {mp_id}", exc_info=exc) # TODO remove product from traveller transaction.cancellable = False transaction.cancellable_expire = None transaction.state = TransactionState.CANCELLED transaction.save() # TODO error handling
[ "def spare_cancel(self,cr,uid,ids,context=None):\n\n exchange = self.pool.get('exchange.order')\n wf_service = netsvc.LocalService(\"workflow\")\n for rec in self.browse(cr , uid ,ids):\n exchange_ref = rec.ir_ref\n exchange_id = exchange.search(cr , uid , [('name' , '=' , exchange_ref)])\n for exchange_record in exchange.browse(cr ,uid , exchange_id):\n wf_service.trg_validate(uid, 'exchange.order', exchange_record.id, 'exchange_cancel', cr)\n \n return self.write(cr, uid, ids, {'state':'spare_cancel'}, context=context)", "def action_cancel(self):\n for inv in self:\n if inv.is_deposit and inv.sale_ids.invoiced_rate: # Other invoices exists\n raise except_orm(\n _('Warning!'),\n _(\"\"\"Cancellation of advance invoice is not allowed!\n Please cancel all following invoices first.\"\"\"))\n res = super(account_invoice, self).action_cancel()\n return res", "def cancelPayment(self, **args):\n return self._client().service.cancelPayment(**args)", "def cancel(self):\n self.sa_session.rollback()", "def cancel_payment(self, transaction_id: str) -> bool:\n raise NotImplementedError()", "def void_payment(self):\n pass", "def action_cancel(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n move_obj = self.pool.get('stock.move')\n proc_obj = self.pool.get('procurement.order')\n for production in self.browse(cr, uid, ids, context=context):\n if production.final_lot_id:\n self.pool.get('stock.production.lot').write(\n cr, uid, production.final_lot_id.id, {'active': False},\n context=context)\n if production.move_created_ids:\n move_obj.action_cancel(cr, uid, [x.id for x in production.move_created_ids])\n moves = move_obj.search(cr, uid, [('move_dest_id', 'in', [x.id for x in production.move_lines])], context=context)\n if moves:\n move_ids = []\n for move in move_obj.browse(cr, uid, moves, context):\n if move.state not in ('cancel', 'done'):\n move_ids.append(move.id)\n move_obj.action_cancel(cr, uid, move_ids, context=context)\n move_obj.action_cancel(cr, uid, [x.id for x in production.move_lines])\n self.write(cr, uid, ids, {'state': 'cancel'})\n # Put related procurements in exception\n proc_obj = self.pool.get(\"procurement.order\")\n procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)],\n context=context)\n if procs:\n proc_obj.write(cr, uid, procs, {'state': 'exception'},\n context=context)\n return True", "def test_billing_recurring_cancel(self):\n pass", "def refund(self, cr, uid, ids, context=None):\n clone_list = []\n line_obj = self.pool.get('pos.order.line')\n \n for order in self.browse(cr, uid, ids, context=context):\n current_session_ids = self.pool.get('pos.session').search(cr, uid, [\n ('state', '!=', 'closed'),\n ('user_id', '=', uid)], context=context)\n if not current_session_ids:\n raise osv.except_osv(_('Error!'), _('To return product(s), you need to open a session that will be used to register the refund.'))\n\n clone_id = self.copy(cr, uid, order.id, {\n 'name': order.name + ' REFUND', # not used, name forced by create\n 'session_id': current_session_ids[0],\n 'date_order': time.strftime('%Y-%m-%d %H:%M:%S'),\n }, context=context)\n clone_list.append(clone_id)\n\n for clone in self.browse(cr, uid, clone_list, context=context):\n for order_line in clone.lines:\n line_obj.write(cr, uid, [order_line.id], {\n 'qty': -order_line.qty\n }, context=context)\n\n new_order = ','.join(map(str,clone_list))\n abs = {\n #'domain': \"[('id', 'in', [\"+new_order+\"])]\",\n 'name': _('Return Products'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'pos.order',\n 'res_id':clone_list[0],\n 'view_id': False,\n 'context':context,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n }\n return abs", "def action_cancel(self):\n context = self._context or {}\n for inv_brw in self.browse():\n if not inv_brw.wh_muni_id:\n super(AccountInvoice, self).action_cancel()\n else:\n raise exceptions.except_orm(\n _(\"Error!\"),\n _(\"No puede cancelar una factura que no tiene\"\n \"Documento de retención municipal. Primero se debe cancelar la\"\n \"factura el documento de retención municipal y luego puedes\"\n \"cancelar esta factura.\"))\n return True", "def action_cancel_salepoint(self):\n for rec in self:\n # send Email to big manager for cancel process\n user_email_list = []\n user_obj = self.env['res.users']\n from_mail = user_obj.browse(self._uid) and user_obj.login or ''\n big_manager_grp = self.env.ref(\"big_general.group_big_manager\")\n for user in big_manager_grp.users:\n user_email_list.append(user.partner_id.email\n if user.partner_id.email else '')\n email_template = self.env.ref(\n 'big_new_registration.email_surrender_connection_request')\n if email_template and user_email_list:\n user_email = ','.join(user_email_list)\n email_template.sudo().write({\n 'email_from': from_mail,\n 'email_to': user_email\n })\n email_template.send_mail(self.id, force_send=True)\n rec.state = 'cancel_sales_point'\n if rec.new_connection_id.cylinder_qty == 0:\n rec.new_connection_id.state = 'cancel_sales_point'", "def do_cancel(self, args):\n acct = Enter().account_name(1)\n memoid = Enter().memo_id(acct)\n if not db.verify_memoid(acct, memoid):\n return\n if db.cancel(acct, memoid):\n msg.message(\"The exchange has been canceled\")", "def cancel_all_open_order(self):", "async def cancel_order(self, **params):\r\n return await self.client_helper(\"cancel_order\", **params)", "def payment_cancel(request):\n # Check if the user is a player\n if not request.user.has_perm('gamestore.player'):\n return permission_denied(request, PermissionDenied)\n\n # Check if the result message is correct\n result = request.GET.get('result') # success/cancel/error, should be cancel\n if result != 'cancel':\n return bad_request(request, BadRequest)\n\n return render(request, 'post_payment.html', {'state': result})", "def action_cancel(self):\n # TDE DUMB: why is cancel_procuremetn in ctx we do quite nothing ?? like not updating the move ??\n if any(move.state == 'done' for move in self):\n raise UserError(_('You cannot cancel a stock move that has been set to \\'Done\\'.'))\n\n procurements = self.env['procurement.order']\n for move in self:\n if move.reserved_quant_ids:\n move.quants_unreserve()\n if self.env.context.get('cancel_procurement'):\n if move.propagate:\n pass\n # procurements.search([('move_dest_id', '=', move.id)]).cancel()\n else:\n if move.move_dest_id:\n if move.propagate and move.move_dest_id.state!='done':\n move.move_dest_id.action_cancel()\n elif move.move_dest_id.state == 'waiting':\n # If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead)\n move.move_dest_id.write({'state': 'confirmed'})\n if move.procurement_id:\n procurements |= move.procurement_id\n\n self.write({'state': 'cancel', 'move_dest_id': False})\n if procurements:\n procurements.check()\n return True", "def cancel(self, uid):\n order = self._orders[uid]\n if not order.active:\n return\n if order.is_buy:\n pricelevel = self._bids.pricelevel(order.price)\n pricelevel.remove(order)\n if pricelevel.is_empty():\n self._bids.remove_pricelevel(order.price)\n else:\n pricelevel = self._asks.pricelevel(order.price)\n pricelevel.remove(order)\n if pricelevel.is_empty():\n self._asks.remove_pricelevel(order.price)\n \n if uid < 0:\n self.my_cumvol_sent -= order.leavesqty\n order._cumqty = order.qty - order.leavesqty\n order.leavesqty = 0\n order.active = False", "def _refund_payment(transaction: DbTransaction, reason: str) -> None:\n if transaction.payment_method_amount:\n if transaction.payment_method_transaction_data:\n payment_method = get_wallet_payment_method(transaction.wallet, transaction.payment_method_id)\n psd = get_payment_service_driver(get_payment_service(payment_method.payment_service_id))\n payment_transaction = psd.create_payment_service_transaction(transaction.payment_method_transaction_data)\n try:\n psd.cancel_payment(payment_transaction, transaction.payment_reference, reason)\n logger.info(\"Transaction({transaction.id}) payment refunded OK\")\n transaction.state = TransactionState.CANCEL_PAYMENT_REFUNDED\n transaction.save()\n except PaymentServiceException as pse:\n logger.error(\"Failed to cancel payment: %s\", pse)\n transaction.state = TransactionState.CANCEL_FAILED\n transaction.save()\n raise pse\n else:\n abort(500, \"Payment data missing\")", "def test_cancel_checkout(self):\n accept_language = 'es'\n response = self.api.cancel_checkout('ff6918c6-5043-43b9-a7ec-d40d407d62c1', accept_language)\n self.assertIsNotNone(response)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to refund payment, abort if failed.
def _refund_payment(transaction: DbTransaction, reason: str) -> None: if transaction.payment_method_amount: if transaction.payment_method_transaction_data: payment_method = get_wallet_payment_method(transaction.wallet, transaction.payment_method_id) psd = get_payment_service_driver(get_payment_service(payment_method.payment_service_id)) payment_transaction = psd.create_payment_service_transaction(transaction.payment_method_transaction_data) try: psd.cancel_payment(payment_transaction, transaction.payment_reference, reason) logger.info("Transaction({transaction.id}) payment refunded OK") transaction.state = TransactionState.CANCEL_PAYMENT_REFUNDED transaction.save() except PaymentServiceException as pse: logger.error("Failed to cancel payment: %s", pse) transaction.state = TransactionState.CANCEL_FAILED transaction.save() raise pse else: abort(500, "Payment data missing")
[ "def refund(self, amount=None):\n gateway = get_gateway(self.gateway_name)\n\n # TODO: can this implementation live in dinero.gateways.AuthorizeNet?\n try:\n return gateway.refund(self, amount or self.price)\n except exceptions.PaymentException:\n if amount is None or amount == self.price:\n return gateway.void(self)\n else:\n raise exceptions.PaymentException(\n \"You cannot refund a transaction that hasn't been settled\"\n \" unless you refund it for the full amount.\"\n )", "def test_payment_backend_base_do_on_refund(self):\n backend = TestBasePaymentBackend()\n order = OrderFactory(state=enums.ORDER_STATE_SUBMITTED)\n billing_address = BillingAddressDictFactory()\n\n # Create payment and register it\n payment = {\n \"id\": \"pay_0\",\n \"amount\": order.total,\n \"billing_address\": billing_address,\n }\n\n backend.call_do_on_payment_success(order, payment)\n payment = Transaction.objects.get(reference=\"pay_0\")\n\n # - Order has been validated\n self.assertEqual(order.state, \"validated\")\n\n # - Refund entirely the order\n backend.call_do_on_refund(\n amount=order.total,\n invoice=payment.invoice,\n refund_reference=\"ref_0\",\n )\n\n # - Credit transaction has been created\n self.assertEqual(\n Transaction.objects.filter(reference=\"ref_0\", total=-order.total).count(),\n 1,\n )\n\n # - Order has been canceled\n order.refresh_from_db()\n self.assertEqual(order.state, \"canceled\")", "def refund(self):\n urn = \"/v1/invoices/{invoice_id}/refund\".format(invoice_id=self.id)\n\n # This below if to avoid a request because the API not allow this operation\n # but all API can to change theirs behaviors so to allow to refund\n # invoices with status difference of \"paid\".\n # The approach without if also to raise exception with error from directly\n # API responses but here the focus is less requests.\n if self.status == \"paid\":\n response = self.__conn.post(urn, [])\n obj = IuguInvoice(**response)\n else:\n raise errors.IuguGeneralException(value=\"Refund operation support only \" \\\n \"invoices with status: paid.\")\n\n return obj", "def refund_payment(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/refunds')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def test_pay_ins_universal_pay_universal_pay_post_refund(self):\n pass", "def refund(transaction_id, amount_or_options=None):\n\n return Configuration.gateway().transaction.refund(transaction_id, amount_or_options)", "def test_beta_referral_customer_refund_by_amount(referral_customer_prod_client):\n with pytest.raises(ApiError) as error:\n referral_customer_prod_client.beta_referral_customer.refund_by_amount(refund_amount=2000)\n\n assert str(error.value) == \"Refund amount is invalid. Please use a valid amount or escalate to finance.\"", "def call_do_on_refund(self, amount, invoice, refund_reference):\n self._do_on_refund(amount, invoice, refund_reference)", "def refund(**kwargs):\n kwargs.update({'status_to': 'refund'})\n return status_change(**kwargs)", "def test_sales_creditmemo_management_v1_refund_post(self):\n pass", "def void_payment(self):\n pass", "def cancel_payment(self, transaction_id: str) -> bool:\n raise NotImplementedError()", "def handle_unsolicited_payment(self, message: Message):\n amount_paid_mob = message.payment.amount_mob\n self.logger.warning(\"Could not find drop session for customer; Payment unsolicited!\")\n minimum_fee_mob = self.payments.minimum_fee_mob\n\n if minimum_fee_mob < amount_paid_mob:\n self.messenger.log_and_send_message(\n ChatStrings.UNSOLICITED_PAYMENT\n )\n self.payments.send_reply_payment(amount_paid_mob, False, memo=\"Unsolicited payment refund\")\n else:\n self.messenger.log_and_send_message(\n ChatStrings.UNSOLICITED_NOT_ENOUGH\n )", "def refund(sender, escrow_id):\n #check if sender is correct\n if not CheckWitness(sender):\n Notify(NOT_SENDER)\n return False\n\n # Check if the escrow exists\n escrow = Get(context, escrow_id)\n if not escrow:\n Notify(UNEXISTING_ESCROW)\n return False\n\n escrow = Deserialize(escrow)\n\n #get current block\n c = GetHeight()\n \n # Check if sender is moderator\n if escrow['buyer_addr'] != sender:\n Notify(WRONG_USER)\n return False\n\n #check if time-lock is over\n if escrow['expiry'] >= c:\n Notify(NOT_EXPIRY)\n return False", "def cancel_refund(self, cancel_refund):\n\n self._cancel_refund = cancel_refund", "def test_refund_routing_slips_zero_dollar_error(client, jwt):\n payload = get_routing_slip_request(cheque_receipt_numbers=[('1234567890', PaymentMethod.CHEQUE.value, 0.00)])\n token = jwt.create_jwt(get_claims(roles=[Role.FAS_CREATE.value, Role.FAS_VIEW.value, Role.FAS_REFUND.value]),\n token_header)\n headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}\n\n rv = client.post('/api/v1/fas/routing-slips', data=json.dumps(payload), headers=headers)\n assert rv.status_code == 201\n assert schema_utils.validate(rv.json, 'routing_slip')[0]\n rv = client.get('/api/v1/fas/routing-slips/{}'.format(rv.json.get('number')), headers=headers)\n assert rv.status_code == 200\n assert schema_utils.validate(rv.json, 'routing_slip')[0]\n refund_details = {\n 'mailingAddress': {\n 'city': 'Gatineau',\n 'country': 'CA',\n 'region': 'QC',\n 'postalCode': 'J8L 2K3',\n 'street': 'E-412 Rue Charles',\n 'streetAdditional': ''\n },\n 'name': 'Staff user'\n }\n rs_number = rv.json.get('number')\n rv = client.post('/api/v1/fas/routing-slips/{}/refunds'.format(rs_number),\n data=json.dumps({'status': RoutingSlipStatus.REFUND_REQUESTED.value, 'details': refund_details}),\n headers=headers)\n assert rv.status_code == 400", "def cancel_payment_transaction(auth_ctx: AuthorizationContext, transaction: DbTransaction) -> None:\n for item in transaction.items:\n if item.mtb_product_ids:\n for mp_id in item.mtb_product_ids:\n try:\n prod = get_db_mtb_product(auth_ctx, mp_id)\n check_mtb_product_useable(auth_ctx, prod)\n except AbortException:\n abort(409, \"Transaction contains lent products\")\n state = transaction.state\n if state == TransactionState.PURCHASED or state == TransactionState.FINALIZE_PENDING \\\n or state == TransactionState.USER_INTERACTION_PENDING:\n _refund_payment(transaction, \"Cancel requested\")\n _refund_purse(transaction)\n else:\n abort(501, f\"Transaction in state {state} isn't cancellable\")\n\n for item in transaction.items:\n if item.mtb_product_ids:\n for mp_id in item.mtb_product_ids:\n try:\n cancel_mtb_product(auth_ctx, mp_id, transaction)\n except Exception as exc:\n logger.error(\"Failed to cancel mtb_product {mp_id}\", exc_info=exc)\n # TODO remove product from traveller\n transaction.cancellable = False\n transaction.cancellable_expire = None\n transaction.state = TransactionState.CANCELLED\n transaction.save()\n # TODO error handling", "def invoice_payment_failed(self):\n self._update(\"subscription_status\", \"invoice_payment_failed\")\n self._update(\"is_paying\", False)\n send_email(self, EmailTemplateNames.PAYMENT_PROBLEM,\n render_params={\n \"subscription_status\": \"invoice_payment_failed\",\n \"payment_console\": f\"https://{current_config.DOMAIN}{url_for('payments_blueprint.index')}\"\n })", "def call_do_on_payment_failure(self, order):\n self._do_on_payment_failure(order)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Release possible purse reservation
def _release_purse_reservation(transaction: DbTransaction) -> None: if transaction.purse_reservation_id is not None: try: delete_reservation(transaction.wallet.purse_id, transaction.purse_reservation_id) transaction.purse_reservation_id = None transaction.save() except ApiException as ae: logger.error("Failed to delete purse reservation, purse=%s, reservation=%s", transaction.wallet.purse_id, transaction.purse_reservation_id, exc_info=ae)
[ "def release(self):\n self.free = True\n self.guest = None\n self.occupy_time = None", "def release(self):\n\n self.transaction(self.holdingshares, ['Cover', 'Sell'][self.action])\n self.holding = 0\n print \" --- %s: released %s shares at gain of %s ---\" % (self.ticker, self.shares, self.gains)", "def remove_reserve(self):\r\n self._reserves -= 1", "def release(self, ):\n\n pass\n\n '''\n // ParcelRelease\n // Release a parcel to public\n // viewer -> sim\n // reliable\n {\n \tParcelRelease Low 212 NotTrusted Unencoded\n \t{\n \t\tAgentData\t\tSingle\n \t\t{\tAgentID\t\t\tLLUUID\t}\n \t\t{\tSessionID\t\tLLUUID\t}\n \t}\n \t{\n \t\tData\t\t\tSingle\n \t\t{\tLocalID\t\t\tS32\t\t}\t// parcel ID\n \t}\n }\n '''", "def release(self, hold):\n assert hold.total_index >= 0, 'double release'\n hold.total_index = -1", "def release(self):\n #print \"RELEASING LOCK\"\n self.locked = False\n if self.timer:\n self.timer.cancel()", "def _release_payment_method_reservation(transaction: DbTransaction, psd: PaymentServiceDriver,\n payment_transaction: PaymentServiceTransaction) -> None:\n if transaction.payment_method_amount:\n if transaction.payment_method_transaction_data:\n try:\n psd.cancel_payment(payment_transaction, transaction.payment_reference,\n f\"Release payment because of {transaction.state}\")\n logger.info(\"Transaction({transaction.id}) payment released OK\")\n except PaymentServiceException as pse:\n logger.error(\"Failed to release payment: %s\", pse)\n else:\n logger.error(\"Payment data missing, failed to release payment\")", "def release(self):\n if self.value is not None:\n self.value += 1\n if self.value > self.maximum_value:\n raise ValueError(\"Too many releases\")", "def release(self, next_individual_index, next_node, current_time):\n next_individual = self.individuals.pop(next_individual_index)\n next_individual.queue_size_at_departure = len(self.individuals)\n next_individual.exit_date = current_time\n if self.c < 'Inf':\n self.detatch_server(next_individual.server, next_individual)\n self.write_individual_record(next_individual)\n self.change_state_release(next_individual)\n self.release_blocked_individual(current_time)\n self.begin_service_if_possible_release(current_time)\n next_node.accept(next_individual, current_time)", "def _finalize_traveller_purchase(auth_ctx: AuthorizationContext, transaction: DbTransaction, finalization_data: str) \\\n -> None:\n if transaction.payment_method_transaction_data:\n payment_method = get_wallet_payment_method(transaction.wallet, transaction.payment_method_id)\n psd = get_payment_service_driver(get_payment_service(payment_method.payment_service_id))\n payment_transaction = psd.create_payment_service_transaction(transaction.payment_method_transaction_data)\n try:\n psd.reserve_payment(payment_transaction)\n except PaymentServiceException as pse:\n logger.error(\"Failed to reserve payment: %s\", pse)\n transaction.state = TransactionState.CANCELLED if isinstance(pse, UserInteractionCanceledException) \\\n else TransactionState.DENIED\n _release_purse_reservation(transaction)\n transaction.save()\n raise pse\n else:\n psd = None\n payment_transaction = None\n try:\n _finalize_create_mtb_products(transaction)\n except Exception as exc:\n logger.error(\"Failed to create product: %s\", exc)\n transaction.state = TransactionState.ISSUE_ERROR\n _release_purse_reservation(transaction)\n _release_payment_method_reservation(transaction, psd, payment_transaction)\n _revert_mtb_products(auth_ctx, transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise exc\n if transaction.purse_reservation_id is not None:\n record = NewPurseRecord(transaction_id=str(transaction.id),\n reservation_id=transaction.purse_reservation_id,\n amount=-float(transaction.purse_amount),\n refundable=False)\n try:\n res = purse_create_record(transaction.wallet.purse_id, record)\n except ApiException as ae:\n logger.error(\"Failed to record purse transaction: %s\", ae)\n transaction.state = TransactionState.DENIED\n _revert_mtb_products(auth_ctx, transaction)\n _release_payment_method_reservation(transaction, psd, payment_transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise ae\n transaction.purse_record_ids.append(res.id)\n transaction.purse_reservation_id = None\n if transaction.payment_method_transaction_data:\n try:\n transaction.payment_reference = psd.finalize_payment(payment_transaction, finalization_data)\n except PaymentServiceException as pse:\n logger.error(\"Failed to reserve payment: %s\", pse)\n transaction.state = TransactionState.DENIED\n _revert_mtb_products(auth_ctx, transaction)\n _revert_purse_record(transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise pse\n _mark_purchased(transaction)\n # TODO purge some, transaction.payment_method_transaction_data\n try:\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n except Exception as exc:\n # TODO finer error handling\n transaction.state = TransactionState.ERROR\n _revert_mtb_products(auth_ctx, transaction)\n if transaction.purse_record_ids:\n # TODO revert record?\n pass\n if transaction.payment_method_transaction_data is not None:\n # TODO release payment reservation/payment?\n pass\n transaction.save()\n raise exc", "def reassign(self):\n\t\tdelete = []\t\t\t# Empty the list of reservations to delete\n\t\trandom.shuffle(self.unassigned)\t# Shuffle the unassigned reservations to make sure we're not always starting with the same one\n\t\tfor idx,r in enumerate(self.unassigned):\t# For all unassigned reservations ...\n\t\t\tmatch = 0\t# Found a matching car?\n\t\t\ttimeslot = 0\t# Is there a possible timeslot? If so, where is it?\n\t\t\tfor c in r.getCarsObj():\t# Check all possible cars for this reservation\n\t\t\t\ttimeslot = self.checkTime(c, r.getStart(), r.getDuration())\t# Is there a usable timeslot in this cars schedule?\n\t\t\t\tif timeslot > 0:\n\t\t\t\t\tif self.checkZone(r.getZoneObj(), c): \t# Is the assigned zone okay for the reservation\n\t\t\t\t\t\tself.resCars[r] = c \t\t\t\t# If yes, match found. This car will be assigned to the reservation\n\t\t\t\t\t\tmatch = 1\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\tif match:\t\t\t\t\t# If there was a car found ...\n\t\t\t\tdelete.append(idx)\t\t# Delete the reservation from the unassigned\n\t\t\t\ti = self.resCars[r]\n\t\t\t\tself.addTimeslot(timeslot, r, i)\t# Add the timeslot to the schedule of the car\n\n\t\tfor c,i in enumerate(delete):\t# Remove all matched reservations\n\t\t\tself.unassigned.pop(i-c)", "def release(self):\n with self._cache.transact(retry=True):\n value = self._cache.get(self._key, default=self._value)\n assert self._value > value, 'cannot release un-acquired semaphore'\n value += 1\n self._cache.set(\n self._key,\n value,\n expire=self._expire,\n tag=self._tag,\n )", "def expire(self, context):\n\n managers.quotas.reservation_expire(context)", "def add_reserve(self):\r\n self._reserves += 1", "def release(self, actor):\n\n try:\n cart_status_id = self.cart['cart_status']['cart_status_id']\n # Currently we're moving all carts into STATUS_REVIEW upon submit,\n # but we may allow carts to transition from new to ready eventually.\n if (cart_status_id != STATUS_REVIEW and cart_status_id != STATUS_NEW):\n raise CartInvalid(\"Cart must be in new or review to submit.\")\n if self.cart['manual_hold']:\n raise CartInvalid(\"Cannot release a held cart.\")\n\n self.jobs_del()\n self.jobs_add()\n\n self.set_status_id(STATUS_LAB_READY)\n self.log(\"Cart released for lab: \" + actor)\n return { 'cart_status_id': STATUS_LAB_READY }\n except CartInvalid as e:\n raise CartInvalid(e)\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")", "def release(self, idx):\n # Set deadline event.\n self.eventList.append(self.eventClass(\n 1, self.tasks[idx].deadline, idx))\n\n # Set next release event.\n self.eventList.append(self.eventClass(\n 0, self.tasks[idx].period, idx))\n\n # Sort the eventList.\n self.eventList = sorted(self.eventList,\n key=operator.attrgetter('delta'))\n\n # Add the workload to corresponding entry in statusTable.\n self.statusTable[idx][0] += float(self.tasks[idx].wcet)\n\n # Initialiue the flag to indicate the first execution.\n self.statusTable[idx][4] = 1\n\n # Decide the highest priority task in the system.\n self.h = self.findTheHighestWithWorkload()\n if self.h == -1:\n print(\"BUG: after release, there must be at least one task with\"\n \" workload.\")\n\n # Record the job release in the statusTable.\n self.statusTable[idx][1] += 1", "def release(self, number):\n self.q.add(number)", "def release(self, hold):\n assert hold.total_index >= 0, 'BUG: double reset/release'\n assert self._holds[-1] == hold.total_index, 'BUG: releasing bad ordered hold'\n self._holds.pop()\n self._maybe_collect()\n hold.total_index = -1", "def changeReservation(self):\n\t\tres,car = None,None \t# Save the reservation we're changing with it's previous car\n\t\tchosenCar = None \t\t# Save the new car we are going to assign to the reservation\n\t\tdelete = []\t\t\t# Keep a list of reservations which need to be deleted\n\t\twhile True:\t\t\t\t# Search for a reservation with more than 1 possible car\n\t\t\tres,car = random.choice(list(self.resCars.items()))\n\t\t\tcarCount = res.getCarsObj()\n\t\t\tif len(carCount) > 1:\n\t\t\t\twhile True:\t\t# Search for another car for this reservation which is not the previous car\n\t\t\t\t\tchosenCar = random.choice(carCount)\n\t\t\t\t\tif chosenCar is not car:\n\t\t\t\t\t\tself.resCars[res] = chosenCar\n\t\t\t\t\t\tbreak\n\t\t\t\tbreak\n\t\tfor i in range(len(self.usedCars[car])):\t\t\t# Find the used timeslot of the reservation for the old car and remove it\n\t\t\tif int(self.usedCars[car][i][0]) == int(res.getStart()):\n\t\t\t\tself.usedCars[car].pop(i)\n\t\t\t\tbreak\n\t\tif not self.checkZone(res.getZoneObj(),chosenCar):\t# If the car is in the wrong zone, ...\n\t\t\tmatchingZone = 0\n\t\t\tfor z in res.getZoneObj().getZonesObj():\t\t# Search for neighbouring zones which will work with the reservation\n\t\t\t\tif (self.checkZone(z,chosenCar)):\n\t\t\t\t\tself.carZones[chosenCar] = z \t\t\t# Change the zone of the car\n\t\t\t\t\tmatchingZone = 1\n\t\t\t\t\tbreak\n\t\t\tif not matchingZone:\t\t\t\t\t\t\t# If their are no neighbouring matching zones, ...\n\t\t\t\tself.carZones[chosenCar] = res.getZoneObj() # change the car to the zone of the reservation\n\n\t\tfor r,c in self.resCars.items():\t# Check for all reservations who use this car if they are still feasible\n\t\t\tif c is chosenCar:\n\t\t\t\tif not self.checkZone(r.getZoneObj(),c):\n\t\t\t\t\tdelete.append(r)\t\t# Otherwise, add them to the delete list\n\t\t\t\t\tfor i in range(len(self.usedCars[c])):\t# And remove their timeslot from the car\n\t\t\t\t\t\tif int(self.usedCars[c][i][0]) == int(r.getStart()):\n\t\t\t\t\t\t\tself.usedCars[c].pop(i)\n\t\t\t\t\t\t\tbreak\n\t\tfor resDel in delete:\t\t\t\t# Remove the reservations from the list\n\t\t\tdel self.resCars[resDel]\n\t\t\tself.unassigned.append(resDel)\t# Add the removed reservations to the unassigned list\n\n\t\tdelete = []\t# Empty the list of cars to delete\n\t\tdeletepop = []\t# Create a new list of timeslot to delete from the car\n\t\ttimeslot = self.checkTime(chosenCar,res.getStart(),res.getDuration())\t# Check if the reservation fits in the schedule of the car\n\t\tif timeslot > 0:\t\t# If so, add it to the schedule\n\t\t\tself.addTimeslot(timeslot, res, chosenCar)\n\t\telse:\t\t\t\t\t# If not, remove all conflicting reservations\n\t\t\ts2 = int(res.getStart())\n\t\t\td2 = int(res.getDuration())\n\t\t\tfor i in range(len(self.usedCars[chosenCar])):\n\t\t\t\ts1 = int(self.usedCars[chosenCar][i][0])\n\t\t\t\td1 = int(self.usedCars[chosenCar][i][1])\n\t\t\t\tif (((s1 < s2) and (s1 + d1 >= s2)) or ((s1 > s2) and (s1 <= s2 + d2))):\n\t\t\t\t\tdeletepop.append(i)\n\t\t\t\t\tfor r,c in self.resCars.items():\n\t\t\t\t\t\tif c is chosenCar and int(r.getStart()) == s1 :\n\t\t\t\t\t\t\tdelete.append(r)\n\t\t\tfor res in delete:\t\t\t\t\t\t\t\t# Remove the reservations from the list\n\t\t\t\tdel self.resCars[res]\n\t\t\t\tself.unassigned.append(res)\n\t\t\tfor c,i in enumerate(deletepop):\t\t\t\t# Remove conflicting timeslots\n\t\t\t\tself.usedCars[chosenCar].pop(i-c)\n\n\t\t\ttimeslot = self.checkTime(chosenCar,res.getStart(),res.getDuration())\t# If no further conflicts ...\n\t\t\tif timeslot > 0:\n\t\t\t\tself.addTimeslot(timeslot, res, chosenCar)\t# Add the reservation in the schedule\n\t\tself.reassign() # Try to reassign as much of the unassigned reservations as possible" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Release possible payment method reservation
def _release_payment_method_reservation(transaction: DbTransaction, psd: PaymentServiceDriver, payment_transaction: PaymentServiceTransaction) -> None: if transaction.payment_method_amount: if transaction.payment_method_transaction_data: try: psd.cancel_payment(payment_transaction, transaction.payment_reference, f"Release payment because of {transaction.state}") logger.info("Transaction({transaction.id}) payment released OK") except PaymentServiceException as pse: logger.error("Failed to release payment: %s", pse) else: logger.error("Payment data missing, failed to release payment")
[ "def _release_purse_reservation(transaction: DbTransaction) -> None:\n if transaction.purse_reservation_id is not None:\n try:\n delete_reservation(transaction.wallet.purse_id, transaction.purse_reservation_id)\n transaction.purse_reservation_id = None\n transaction.save()\n except ApiException as ae:\n logger.error(\"Failed to delete purse reservation, purse=%s, reservation=%s\",\n transaction.wallet.purse_id, transaction.purse_reservation_id, exc_info=ae)", "def free_payment(self):\n self.payment = Payment(profile=self.invoice.profile,\n amount=self.invoice.total,\n provider=self.provider,\n invoice=self.invoice,\n created=timezone.now()\n )\n self.payment.save()\n self.transaction_submitted = True\n\n self.payment.success = True\n self.payment.transaction = f\"{self.payment.uuid}-free\"\n self.payment.payee_full_name = \" \".join([self.invoice.profile.user.first_name, self.invoice.profile.user.last_name])\n self.payment.save()\n \n self.update_invoice_status(Invoice.InvoiceStatus.COMPLETE)\n\n self.create_receipts(self.invoice.order_items.all())", "def void_payment(self):\n pass", "def release(self):\n self.free = True\n self.guest = None\n self.occupy_time = None", "def release(self):\n\n self.transaction(self.holdingshares, ['Cover', 'Sell'][self.action])\n self.holding = 0\n print \" --- %s: released %s shares at gain of %s ---\" % (self.ticker, self.shares, self.gains)", "def donate(self):", "def test_b_renew_license_after_potential_rights_end_date(self):\n self.assertTrue(self.status.is_active(), \"The license is not active, active state awaited\")\n potential_end = self.status.get_potential_end()\n if not potential_end is None:\n with self.assertRaisesRegexp(IOError, 'POST .* HTTP error 403$'):\n self.status.renew(self.status.DEVICEID1, self.status.DEVICENAME1, potential_end+self.ADAY)", "def release_authorized_payment(self, order=None, auth=None, testing=False):\n self.log.warn('Module does not implement released_authorized_payment: %s', self.key)\n return ProcessorResult(self.key, False, _(\"Not Implemented\"))", "def release(self, next_individual_index, next_node, current_time):\n next_individual = self.individuals.pop(next_individual_index)\n next_individual.queue_size_at_departure = len(self.individuals)\n next_individual.exit_date = current_time\n if self.c < 'Inf':\n self.detatch_server(next_individual.server, next_individual)\n self.write_individual_record(next_individual)\n self.change_state_release(next_individual)\n self.release_blocked_individual(current_time)\n self.begin_service_if_possible_release(current_time)\n next_node.accept(next_individual, current_time)", "def _finalize_traveller_purchase(auth_ctx: AuthorizationContext, transaction: DbTransaction, finalization_data: str) \\\n -> None:\n if transaction.payment_method_transaction_data:\n payment_method = get_wallet_payment_method(transaction.wallet, transaction.payment_method_id)\n psd = get_payment_service_driver(get_payment_service(payment_method.payment_service_id))\n payment_transaction = psd.create_payment_service_transaction(transaction.payment_method_transaction_data)\n try:\n psd.reserve_payment(payment_transaction)\n except PaymentServiceException as pse:\n logger.error(\"Failed to reserve payment: %s\", pse)\n transaction.state = TransactionState.CANCELLED if isinstance(pse, UserInteractionCanceledException) \\\n else TransactionState.DENIED\n _release_purse_reservation(transaction)\n transaction.save()\n raise pse\n else:\n psd = None\n payment_transaction = None\n try:\n _finalize_create_mtb_products(transaction)\n except Exception as exc:\n logger.error(\"Failed to create product: %s\", exc)\n transaction.state = TransactionState.ISSUE_ERROR\n _release_purse_reservation(transaction)\n _release_payment_method_reservation(transaction, psd, payment_transaction)\n _revert_mtb_products(auth_ctx, transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise exc\n if transaction.purse_reservation_id is not None:\n record = NewPurseRecord(transaction_id=str(transaction.id),\n reservation_id=transaction.purse_reservation_id,\n amount=-float(transaction.purse_amount),\n refundable=False)\n try:\n res = purse_create_record(transaction.wallet.purse_id, record)\n except ApiException as ae:\n logger.error(\"Failed to record purse transaction: %s\", ae)\n transaction.state = TransactionState.DENIED\n _revert_mtb_products(auth_ctx, transaction)\n _release_payment_method_reservation(transaction, psd, payment_transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise ae\n transaction.purse_record_ids.append(res.id)\n transaction.purse_reservation_id = None\n if transaction.payment_method_transaction_data:\n try:\n transaction.payment_reference = psd.finalize_payment(payment_transaction, finalization_data)\n except PaymentServiceException as pse:\n logger.error(\"Failed to reserve payment: %s\", pse)\n transaction.state = TransactionState.DENIED\n _revert_mtb_products(auth_ctx, transaction)\n _revert_purse_record(transaction)\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n raise pse\n _mark_purchased(transaction)\n # TODO purge some, transaction.payment_method_transaction_data\n try:\n if payment_transaction:\n transaction.payment_method_transaction_data = payment_transaction.save_dict()\n transaction.save()\n except Exception as exc:\n # TODO finer error handling\n transaction.state = TransactionState.ERROR\n _revert_mtb_products(auth_ctx, transaction)\n if transaction.purse_record_ids:\n # TODO revert record?\n pass\n if transaction.payment_method_transaction_data is not None:\n # TODO release payment reservation/payment?\n pass\n transaction.save()\n raise exc", "def payBooking(self, selectedBooking):\n selectedBooking.setPaid(True)", "def finish_process_for_mode(self):\n coupon_mode = self.get_coupon_mode()\n if coupon_mode not in ('EDIT', 'PUBLISH'):\n # If we are not in EDIT or PUBLISH mode we need to elect a\n # product to purchase. Flyer must be removed if it exists, and\n # we need to respect previous selections (if they came back).\n add_annual_slot_choice = get_selected_product(self.request)[2]\n # Create/Recreate the product_list with the associated prices.\n # If Ad rep in session, default to monthly price unless\n # populated.\n if not self.request.session.get('product_list', False):\n if add_annual_slot_choice is not None or \\\n self.request.session.get('ad_rep_id', False):\n set_selected_product(self.request, 2)\n else:\n set_selected_product(self.request, 2)\n elif coupon_mode == 'PUBLISH' and self.create_new_slot:\n self.this.coupon = self.this.offer['coupon'][self.this.current_coupon]\n self.this.coupon['coupon_type_id'] = 3\n self.coupon.coupon_type_id = 3\n self.coupon.save()\n family_availability_dict = \\\n self.request.session['family_availability_dict']\n publish_business_coupon(family_availability_dict, self.coupon)\n send_coupon_published_email.delay(coupon=self.coupon)", "def close_ride(self,rideID):\n ride=Ride.objects.get(id=rideID)\n offer=Offer.objects.get(id=ride.offer.id)\n offer.status = 'F'\n offer.save()\n self.send_to(self.paymentmanager_port, ('payfee', rideID))", "def test_a_renew_license_at_potential_rights_end_date(self):\n self.assertTrue(self.status.is_active(), \"The license is not active, active state awaited\")\n potential_end = self.status.get_potential_end()\n if not potential_end is None:\n self.status.renew(self.status.DEVICEID1, self.status.DEVICENAME1, potential_end)\n license = self.status.update_license()\n self.assertEquals(potential_end, license.get_end(), \"The new end date of the license is not potential_rights.end\")", "def makePayment(self):\n\t\tif len(self.paid) == self.teaserMonths + 1:\n\t\t\tself.rate = self.nextRate\n\t\t\tself.payment = calcPayment(self.owed[-1], self.rate, self.months - self.teaserMonths)\n\t\tMortgage.makePayment(self)", "def test_billing_recurring_cancel(self):\n pass", "def release(self, ):\n\n pass\n\n '''\n // ParcelRelease\n // Release a parcel to public\n // viewer -> sim\n // reliable\n {\n \tParcelRelease Low 212 NotTrusted Unencoded\n \t{\n \t\tAgentData\t\tSingle\n \t\t{\tAgentID\t\t\tLLUUID\t}\n \t\t{\tSessionID\t\tLLUUID\t}\n \t}\n \t{\n \t\tData\t\t\tSingle\n \t\t{\tLocalID\t\t\tS32\t\t}\t// parcel ID\n \t}\n }\n '''", "def handle_rejected(cls, agreement): # pragma: no cover", "def close_vote(self):\n txn_dict = {'from': self.address}\n self.contract.functions.closeVote(self._private_key.export_key()).transact(txn_dict)\n print(\"All decryption keys are published, the vote is closed.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send an email with the a receipt for the transaction to the user. Return true if email address found.
def _send_receipt_email(user: User, transaction: DbTransaction, email: str = None) -> bool: if not email: email = user.receipt_email if email: locale = user.locale if locale is None: locale = shared.config.default_locale logger.debug("Send receipt in %s to %s", locale, user.receipt_email) try: t = Template(name='receipt_email', locale=locale, global_variables=shared.jinja_globals) template_env = {'transaction': transaction} subject = t.render(template_env, section='subject') message = t.render(template_env, section='body') content_type = t.content_type except TemplateNotFound: subject = "Receipt from BobCat" message = f"Receipt for transaction: {transaction.id}" content_type = 'text/plain' services.email_sender.send(email_from=None, email_to=email, subject=subject, message=message, content_type=content_type) return True else: return False
[ "def add_email(self):\n if EMAIL_CONFIRMATION:\n from . import EmailAddress\n self.is_active = False\n self.save()\n EmailAddress.objects.add_email(self, self.email)\n return True\n else:\n return False", "def confirmation_email(self):\n import EmailTemplate\n to_email = self.cart['address']['email']\n template = EmailTemplate.EmailTemplate('ORDER_CONFIRMATION', 'XXXXX@XXXXX.com', to_email)\n template.subject('Order Confirmation: {}'.format(self.cart['cart_id']))\n template.add_vars({ 'cart': self.cart })\n template.send()", "def send_mail(self):\n mail_struct = queue_mail_types[self.email_type]\n presend = mail_struct['presend'](self)\n mail = Mail(\n mail_struct['template'],\n subject=mail_struct['subject'],\n categories=mail_struct.get('categories', None)\n )\n self.data['osf_url'] = osf_settings.DOMAIN\n if presend and self.user.is_active and self.user.osf_mailing_lists.get(osf_settings.OSF_HELP_LIST):\n send_mail(self.to_addr or self.user.username, mail, **(self.data or {}))\n self.sent_at = timezone.now()\n self.save()\n return True\n else:\n self.__class__.delete(self)\n return False", "def complete_resend_email(self, actor, email):\n\n try:\n cart_status = self.cart['cart_status']\n if (cart_status['complete_email_resend'] == 0):\n raise CartInvalid(\"Cannot resend email for cart status {}\".format(cart_status['name']))\n c = get_cursor()\n\n # If the support user has changed the email address, save the new email.\n cur_email = self.cart['address']['email']\n if cur_email != email:\n c.execute(\"\"\"\n update address\n set email = %s\n where cart_id = %s\"\"\",\n (email, self.cart['cart_id']))\n self.cart['address']['email'] = email\n\n try:\n self.complete_email()\n self.log(\"Order Complete Email resent to {}\".format(email), actor)\n except Exception as e:\n self.log(\"Could not resend order complete email: {}\".format(e.args[0]))\n\n return { 'cart_logs': self.get_logs() }\n\n except CartInvalid as e:\n raise CartInvalid(e)\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")", "def add_email(self, email):\n\n # Check that this address isn't already verified\n owner = self.db.one(\"\"\"\n SELECT p.username\n FROM emails e INNER JOIN participants p\n ON e.participant_id = p.id\n WHERE e.address = %(email)s\n AND e.verified IS true\n \"\"\", locals())\n if owner:\n if owner == self.username:\n raise EmailAlreadyVerified(email)\n else:\n raise EmailTaken(email)\n\n if len(self.get_emails()) > 9:\n raise TooManyEmailAddresses(email)\n\n nonce = str(uuid.uuid4())\n verification_start = utcnow()\n\n try:\n with self.db.get_cursor() as c:\n self.app.add_event(c, 'participant', dict(id=self.id, action='add', values=dict(email=email)))\n c.run(\"\"\"\n INSERT INTO emails\n (address, nonce, verification_start, participant_id)\n VALUES (%s, %s, %s, %s)\n \"\"\", (email, nonce, verification_start, self.id))\n except IntegrityError:\n nonce = self.db.one(\"\"\"\n UPDATE emails\n SET verification_start=%s\n WHERE participant_id=%s\n AND address=%s\n AND verified IS NULL\n RETURNING nonce\n \"\"\", (verification_start, self.id, email))\n if not nonce:\n return self.add_email(email)\n\n base_url = gratipay.base_url\n username = self.username_lower\n encoded_email = encode_for_querystring(email)\n link = \"{base_url}/~{username}/emails/verify.html?email2={encoded_email}&nonce={nonce}\"\n self.app.email_queue.put( self\n , 'verification'\n , email=email\n , link=link.format(**locals())\n , include_unsubscribe=False\n )\n if self.email_address:\n self.app.email_queue.put( self\n , 'verification_notice'\n , new_email=email\n , include_unsubscribe=False\n\n # Don't count this one against their sending quota.\n # It's going to their own verified address, anyway.\n , _user_initiated=False\n )", "def test_sendEmail(self):\n resetAddress = 'reset@example.org'\n resetURI = URL.fromString('http://example.org/resetPassword')\n userAddress = 'joe@divmod.com'\n\n resetAttempt = self.reset.newAttemptForUser(userAddress.decode('ascii'))\n _sentEmail = []\n self.reset.sendEmail(resetURI, resetAttempt, userAddress,\n _sendEmail=lambda *args: _sentEmail.append(args))\n\n self.assertEquals(len(_sentEmail), 1)\n [(sentFrom, sentTo, sentText)] = _sentEmail\n self.assertEquals(sentFrom, resetAddress)\n self.assertEquals(sentTo, userAddress)\n\n msg = email.message_from_string(sentText)\n [headerFrom] = msg.get_all('from')\n [headerTo] = msg.get_all('to')\n [headerDate] = msg.get_all('date')\n # Python < 2.5 compatibility\n try:\n from email import utils\n except ImportError:\n from email import Utils as utils\n self.assertEquals(utils.parseaddr(headerFrom)[1], resetAddress)\n self.assertEquals(utils.parseaddr(headerTo)[1], userAddress)\n self.assertTrue(utils.parsedate_tz(headerDate) is not None,\n '%r is not a RFC 2822 date' % headerDate)\n\n self.assertTrue(not msg.is_multipart())\n self.assertIn(flatten(resetURI.child(resetAttempt.key)),\n msg.get_payload())", "def send_receipt(to, cleaner_name, receipt_id):\n\tmessage = (\"{0} has finished cleaning your place! {1}/receipt/{2}\".format(cleaner_name, DOMAIN_NAME, receipt_id))\n\tsend_SMS(to, message)", "def test_users_activation_email_send(self):\n pass", "def send_email_request(self, request,):\n\n assert self.context == 'request'\n\n # Generate text\n from django.template import Context, Template\n from django.template.loader import get_template\n ctx = Context({\n 'prefix': settings.EMAIL_SUBJECT_PREFIX,\n 'request': request,\n 'sender': settings.USER_EMAIL_SIGNATURE,\n })\n tmpl = get_template(self.template)\n body = tmpl.render(ctx)\n subject_tmpl = Template(self.subject_template)\n subject = subject_tmpl.render(ctx)\n\n # Generate recipients\n recipients = []\n for rt in self.recipients:\n if rt == 'recipient':\n recipients.append(request.check_to_email)\n elif rt == 'area':\n recipients.append(request.budget_area.owner_address())\n elif rt == 'admins':\n pass # you don't *actually* have a choice...\n for name, addr in settings.ADMINS:\n recipients.append(addr)\n\n # Send mail!\n from django.core.mail import send_mail\n send_mail(\n subject,\n body,\n settings.SERVER_EMAIL,\n recipients,\n )", "def onePriceBuyInformSellerEmail(*args, **kwargs):\n transaction = kwargs.get('transaction')\n templates = models.NotificationTemplate.objects.filter(name='buyer_one_price_inform_seller')\n if transaction and templates:\n massEmailThread = email.MassEmailThread()\n subject = templates[0].subject.replace('{param1}', transaction.buyer.username).replace('{param2}', transaction.app.app_name)\n message = templates[0].template.replace('{param1}', transaction.seller.username).replace('{param2}', transaction.buyer.username).replace('{param3}', transaction.app.app_name)\n massEmailThread.addEmailData(subject=subject, message=message, recipient_list=[transaction.app.publisher.email])\n massEmailThread.start()\n return None", "def mass_mail_send(self):\n partner_pool = self.env['tenancy.rent.schedule']\n active_ids = partner_pool.search(\n [('start_date', '<', datetime.date.today().strftime(\n DEFAULT_SERVER_DATE_FORMAT))])\n for partner in active_ids:\n if partner.rel_tenant_id.parent_id:\n if partner.rel_tenant_id.parent_id[0].email:\n to = '\"%s\" <%s>' % (\n partner.rel_tenant_id.name,\n partner.rel_tenant_id.parent_id[0].email)\n # TODO(email): add some tests to check for invalid email addresses\n # CHECKME: maybe we should use res.partner/email_send\n tools.email_send(tools.config.get('email_from', False),\n [to],\n 'Reminder for rent payment',\n '''Hello Mr %s,\\n\n Your rent QAR %d of %s is unpaid so \\\n kindly pay as soon as possible.\n \\n\n Regards,\n Administrator.\n Property management firm.\n ''' % (\n partner.rel_tenant_id.name,\n partner.amount, partner.start_date))\n return {'type': 'ir.actions.act_window_close'}", "def activation_email(request, user):\n link = request.route_url(\n 'register_activate',\n code='-'.join(\n [text_type(user.pid),\n user.activation.code]))\n # link = '-'.join(['register.activate', text_type(user.pid), user.activation.code])\n emailtext = _(\"Please validate your email and activate your account by visiting: {link}\")\n body = emailtext.format(link=link)\n return {\n \"request\": request,\n \"subject\": _(\"Please activate your account\"),\n \"recipients\": [user.email],\n \"body\": body\n }", "def remindBuyerPay(request, *args, **kwargs):\n transaction = kwargs.get('transaction')\n templates = models.NotificationTemplate.objects.filter(name='seller_remind_buyer_pay')\n if transaction and templates:\n massEmailThread = email.MassEmailThread()\n subject = templates[0].subject.replace('{param1}', transaction.app.app_name)\n message = templates[0].template.replace('{param1}', transaction.buyer.username).replace('{param2}', transaction.app.app_name)\n massEmailThread.addEmailData(subject=subject, message=message, recipient_list=[transaction.buyer.email])\n massEmailThread.start()", "def send_review_reminder(self, invoiceline_instance, **kwargs):\n L.info('Sending email notification to buyer for new invoice')\n instance = invoiceline_instance\n\n self.msg.global_merge_vars = {\n 'USER_FULLNAME': instance.user.profile.fullname,\n 'USER_EMAIL': instance.user.email,\n 'TOTAL': instance.total,\n 'INVOICE_ID': instance.invoice.id,\n 'ORDER_ID': instance.invoice.order_id,\n }\n\n return self._send(to=[instance.user.email],\n template_name=self.ETPL_INVOICE_APPROVE_REMINDER)", "def resend_new_email_activation_link(request):\n user = User.objects.filter(email=request.user.email).first()\n if resend_activation_email(request, user, user.change_email):\n messages.success(request,\n ('A new link has successfuly been sent to {}'.\n format(user.change_email)))\n return redirect('/dashboard')\n messages.error(request, ('Something went wrong!'))\n return redirect('/dashboard')", "def send_reservation_confirm(\n to_email, to_name, date_reservation, restaurant_name, number_seat\n):\n subject = \"Reservation confirmed\"\n body = (\n \"Hi {toName},<br>\"\n \"we are glad to confirm your table for {numberSeat} people \"\n 'at restaurant \"{restaurantName}\" in date {dateReservation}<br> '\n \"<br>See you soon!<br> \"\n )\n body = body.replace(\"{toName}\", to_name)\n body = body.replace(\"{restaurantName}\", restaurant_name)\n body = body.replace(\"{dateReservation}\", date_reservation)\n body = body.replace(\"{numberSeat}\", str(number_seat))\n send_email(subject, body, to_email)", "def post(self):\n api_payload = api.payload\n output = mail_sender.send_confirm_reimbursement_mail(\n email_type=MAIL_TYPE.confirm_reimbursement.value,\n user_id=api_payload.get('user_id'),\n from_mail=MAIL_DEFAULT.from_default.value,\n to_mail=api_payload.get('to_mail'),\n subject=api_payload.get('subject'),\n ticket_params=api_payload.get('params'))\n return {'message': 'colocado na fila!'}, 200", "def send_new_account_email(self):\n # system_email =\n standard_msg = \"\"\"Hello, you have received money.\n You can retrieve it at my website\"\"\"\n # fixme: fill with correct test email info\n # server = SMTP_SSL(\"smtp.gmail.com\", 465)\n # server.login(system_email, )\n server.sendmail(system_email, self.email, standard_msg)\n server.quit()", "def send_verify_mail(self):\n\n with open(creds, 'r') as email_infos:\n e_reader = csv.reader(email_infos)\n for row in e_reader:\n email_user = row[0]\n email_password = row[1]\n\n msg = MIMEMultipart()\n msg['From'] = email_user\n msg['To'] = self.email_send\n msg['Subject'] = \"Email verification\"\n\n body = \"Verification code: \" + str(self.verif_code)\n msg.attach(MIMEText(body, 'plain'))\n\n text = msg.as_string()\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login(email_user, email_password)\n\n server.sendmail(email_user, self.email_send, text)\n server.quit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove id from set. Return True if removed or no set.
def _remove_id_from_optional_set(id: str, id_set: Optional[Set[str]]): if id_set is None: return True try: id_set.remove(id) return True except KeyError: return False
[ "def safe_remove_set(input_set: set, value):\n try:\n input_set.remove(value)\n except KeyError:\n print(\"trying to remove something funky from a set\")\n pass", "def removeById(self, id):\n for i in range(len(self.list)):\n if self.list[i].getId() == id: \n del self.list[i]\n return", "def remove(self, val):\n index = self.dict.get(val)\n if index is not None:\n last_num = self.set_list[-1]\n self.set_list[index] = last_num\n self.dict[last_num] = index\n self.set_list.pop()\n del self.dict[val]\n return True\n else:\n return False", "def remove_student(self,id):\n for index in range(len(self.stu_list)-1,-1,-1):\n if id == self.stu_list[index].id:\n del self.stu_list[index]\n return True\n return False", "def __remove_one_id(id):\n if id != SelectionDataHolder.__LAST_CREATED_ID:\n return\n\n SelectionDataHolder.__KEY_IDS[id.split('|')[0]] -= 1", "def remove(self, element):\n if not self.contains(element):\n raise KeyError(f'Element does not exist in the set: {element}')\n else:\n self.hash.delete(element)\n self.size -= 1", "def remove(self, element: _SetElementT) -> None:\n del self._elements[element]", "def remove(self, val):\n if val in self.s:\n self.s.remove(val)\n return True\n\n return False", "def remove_selection_from_set(self, selection_set, selection_set_id):\n if len(selection_set) < 1:\n return []\n if selection_set_id not in self.__selection_sets:\n return []\n selection_set = self.__make_list_distinct(selection_set)\n\n new_selection_set = [s for s in self.__selection_sets[selection_set_id] if s not in selection_set]\n # Remove the complete set if its empty.\n if len(new_selection_set) < 1:\n selection_diff = self.__selection_sets.pop(selection_set_id)\n else:\n selection_diff = [s for s in self.__selection_sets[selection_set_id] if s not in new_selection_set]\n self.__selection_sets[selection_set_id] = new_selection_set\n\n self.dispatch(self.__SELECTION_SET_REMOVED, sender=self, selection_set_id=selection_set_id, selection_diff=selection_set)\n return selection_diff", "def remove(self, element):\n if self.hash_set.contains(element):\n self.hash_set.delete(element)\n else:\n raise KeyError(f'Item not found: {element}')", "def remove(self, id): \n allS = self.__loadFromFile()\n poz = -1\n for i in range(len(allS)):\n if allS[i].getId()==id:\n poz = i\n if poz == -1:\n raise ValueError(\"No student for the id:\"+id)\n st = allS[poz]\n del allS[poz] \n self.__storeToFile(allS)\n return st", "def rm_ip_set(target_set, del_ip):\n _ipset('del', target_set, del_ip)", "def destroy_set(target_set):\n _ipset('destroy', target_set)", "def erase(self, id):\n\t\tif id not in self.entities:\n\t\t\treturn\n\n\t\tself.entities.pop(id)", "def remove( self, element ):\n assert element in self, \"The element must be in the set.\"\n self._theElements.remove(element)", "def remove(self, rng):\r\n # no mutation unless the operation is successful\r\n rng = RangeSet(rng)\r\n temp = self.copy()\r\n # do the removal on the copy\r\n for rngsetlist in temp._rangesets:\r\n for rngset, value in rngsetlist:\r\n try:\r\n rngset.discard(rng)\r\n except TypeError:\r\n break\r\n temp.popempty()\r\n self._rangesets, self._values = temp._rangesets, temp._values", "def removepoint(self, targetpoint):\n\n self.setsize -= 1\n self.set.remove(targetpoint)", "def removeLine(self, line_id: int) -> bool:\n if line_id not in self.pool:\n return False\n del self.pool[line_id]\n return True", "def remove_by_id(self, id):\n for user in self:\n if user.get_id() == id:\n self.remove(user)\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode all labels with the given batchsize. Wrapped by memory utilization maximizer to automatically reduce the batch size if needed.
def _encode_all_memory_utilization_optimized( encoder: "TextEncoder", labels: Sequence[str], batch_size: int, ) -> torch.Tensor: return torch.cat( [encoder(batch) for batch in chunked(tqdm(map(str, labels), leave=False), batch_size)], dim=0, )
[ "def encode_all(\n self,\n labels: Sequence[str],\n batch_size: Optional[int] = None,\n ) -> torch.FloatTensor:\n return _encode_all_memory_utilization_optimized(\n encoder=self, labels=labels, batch_size=batch_size or len(labels)\n ).detach()", "def next_batch(self, batch_size):\r\n # Get next batch of image (path) and labels\r\n paths = self.generate_empty_lst()\r\n for i in range(self.num_ch):\r\n paths[i] = self.images[i][self.pointer:self.pointer + batch_size]\r\n\r\n labels = self.labels[self.pointer:self.pointer + batch_size]\r\n\r\n # update pointer\r\n self.pointer += batch_size\r\n\r\n # Read images\r\n images = np.ndarray([self.num_ch, batch_size, self.scale_size[0], self.scale_size[1], 3])\r\n\r\n\r\n for i in range(self.num_ch):\r\n # img = cv2.imread(paths[i])\r\n for j in range(len(paths[0])):\r\n\r\n img = Image.open(paths[i][j])\r\n\r\n if self.histogram_eq:\r\n img = ImageOps.equalize(img)\r\n\r\n if self.horizontal_flip and np.random.random() < 0.5:\r\n img = ImageOps.mirror(img)\r\n\r\n img = img.convert(\"RGB\")\r\n img = img.resize((self.scale_size[0], self.scale_size[1]), resample=Image.LANCZOS)\r\n img = np.ndarray.astype(np.array(img), np.float32)\r\n img -= self.mean\r\n images[i][j] = img\r\n\r\n\r\n # Expand labels to one hot encoding\r\n one_hot_labels = np.zeros((batch_size, self.nb_classes))\r\n\r\n for i in range(len(labels)):\r\n one_hot_labels[i][labels[i]-1] = 1\r\n\r\n # return array of images and labels\r\n return images, one_hot_labels", "def encodeAll(self, src_path, data_home, label_file, class_file, dest_path, cnt_max, preprocessor=None):\n with open(label_file, 'r') as label_file:\n label_dict = json.load(label_file)\n\n with open(class_file, 'r') as class_file:\n class_dict = json.load(class_file)\n\n cnt = 1\n\n tfrecord_filename = os.path.join(dest_path, '%d.tfrecords' % (cnt))\n writer = tf.python_io.TFRecordWriter(tfrecord_filename)\n\n #process each json file\n for file in os.listdir(src_path):\n content = open(os.path.join(src_path, file)).read()\n ori_rcd = json.loads(content)\n\n img = vis.read_image(ori_rcd['imgname'], data_home)\n img_size = [ori_rcd['imgsize']['width'],\n ori_rcd['imgsize']['height'],\n ori_rcd['imgsize']['channel']]\n img_size = np.array(img_size)\n\n labels = []\n bboxes = []\n for obj in ori_rcd['objects']:\n labels.append(label_dict[obj['label']])\n object = [obj['x1'], obj['y1'], obj['x2'], obj['y2']]\n bboxes.append(object)\n labels = np.array(labels)\n bboxes = np.array(bboxes)\n\n class_id = label_dict[class_dict[ori_rcd['imgname']]]\n\n # Preprocess image and bounding boxes.\n if None != preprocessor:\n img, img_size, bboxes = preprocessor(img, img_size, bboxes)\n\n # plt.imshow(img)\n # plt.draw()\n # plt.waitforbuttonpress()\n\n feature = {\n 'image_name':_bytes_feature(tf.compat.as_bytes(ori_rcd['imgname'])),\n 'image': _bytes_feature(img.tobytes()),\n 'size': _int64List_feature(img_size),\n 'class': _int64_feature(class_id),\n 'labels': _int64List_feature(labels),\n 'bbox_num': _int64_feature(np.shape(bboxes)[0]),\n 'bboxes': _int64List_feature(bboxes)\n }\n\n # Create an example protocol buffer\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n\n # Writing the serialized example.\n example_str = example.SerializeToString()\n writer.write(example_str)\n\n print('cnt: %d' % cnt)\n cnt = cnt + 1\n\n #write out records each cnt_max items\n if 0 == cnt % cnt_max:\n writer.close()\n tfrecord_filename = os.path.join( dest_path, '%d.tfrecords' % (cnt))\n writer = tf.python_io.TFRecordWriter(tfrecord_filename)\n\n # break\n writer.close()", "def _fit_label_encoder(self, model_path: str):\n\n # Replace countries occurring less than N times with \"OTHER\"\n self.train_df['COUNTRY.OF.ORIGIN'].fillna('OTHER', inplace=True)\n country_counts = pd.DataFrame(self.train_df['COUNTRY.OF.ORIGIN'].value_counts())\n common_countries = set(country_counts[country_counts['COUNTRY.OF.ORIGIN'] > 50].index)\n self.train_df['COUNTRY.OF.ORIGIN.MAPPED'] = self.train_df['COUNTRY.OF.ORIGIN'].apply(\n lambda x: 'OTHER' if x not in common_countries else x)\n\n # Write out label encoder for use in prediction\n label_encoder = LabelEncoder()\n country_labels = label_encoder.fit_transform(self.train_df['COUNTRY.OF.ORIGIN.MAPPED'])\n pickle.dump(label_encoder, open(os.path.join(model_path, LABEL_FILE), \"wb\"))\n\n return country_labels", "def adjust_images_labels_size(images, labels, size):\n\n assert isinstance(size, tuple), 'size must be a tuple.'\n # TODO(panos): add input checks\n\n shape = tf.shape(images)\n spatial_shape = shape[1:3]\n\n upscale_condition = tf.reduce_any(tf.less(spatial_shape, size))\n downscale_condition = tf.reduce_all(tf.greater(spatial_shape, size))\n factor = tf.cast(tf.reduce_max(size / spatial_shape), tf.float32)\n\n def _resize_images_labels(images, labels, size):\n images = tf.image.resize_images(images, size)\n labels = tf.image.resize_images(labels[..., tf.newaxis],\n size,\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[..., 0]\n return images, labels\n\n def _true_fn():\n return _resize_images_labels(\n images,\n labels,\n tf.cast(tf.ceil(factor * tf.cast(spatial_shape, tf.float32)), tf.int32))\n\n def _false_fn():\n return images, labels\n\n combined_condition = tf.logical_or(upscale_condition, downscale_condition)\n images, labels = tf.cond(combined_condition, _true_fn, _false_fn)\n\n def _random_crop(images, labels, size):\n # TODO(panos): check images and labels spatial size statically,\n # if it is defined and is the same as size do not add random_crop ops\n # convert images to tf.int32 to concat and random crop the same area\n images = tf.cast(tf.image.convert_image_dtype(images, tf.uint8), tf.int32)\n concated = tf.concat([images, labels[..., tf.newaxis]], axis=3)\n Nb = images.shape[0].value\n crop_size = (Nb,) + size + (4,)\n print('debug:concated,crop_size:', concated, crop_size)\n concated_cropped = tf.random_crop(concated, crop_size)\n # convert images back to tf.float32\n images = tf.image.convert_image_dtype(tf.cast(concated_cropped[..., :3], tf.uint8), tf.float32)\n labels = concated_cropped[..., 3]\n return images, labels\n\n images, labels = _random_crop(images, labels, size)\n\n return images, labels", "def _save_label_encoders(label_encoder_dict: defaultdict, output_dir: str):\n for label, encoder in label_encoder_dict.items():\n with open(os.path.join(output_dir, 'encoders', f\"{label}.json\"), 'w') as f:\n json.dump(list(encoder.classes_), f)", "def batch_heatmap_nms(batch_heatmaps: Tensor, kernel_size: int = 5):\n\n assert isinstance(kernel_size, int) and kernel_size % 2 == 1, \\\n f'The kernel_size should be an odd integer, got {kernel_size}'\n\n padding = (kernel_size - 1) // 2\n\n maximum = F.max_pool2d(\n batch_heatmaps, kernel_size, stride=1, padding=padding)\n maximum_indicator = torch.eq(batch_heatmaps, maximum)\n batch_heatmaps = batch_heatmaps * maximum_indicator.float()\n\n return batch_heatmaps", "def get_train_batch(batch_size=12):\n \n image_paths = glob(os.path.join(data_dir, 'images', '*.jpg'))\n if shuffle:\n random.shuffle(image_paths)\n for i in range(0, len(image_paths), batch_size):\n images = []\n labels = []\n names = []\n for path in image_paths[i:i+batch_size]:\n image_name = os.path.basename(path)\n names.append(image_name)\n label_name = image_name[:-4] + '_train_id.png'\n label_path = os.path.join(data_dir, 'labels', label_name)\n label = imageio.imread(label_path)\n image = imageio.imread(path)\n if relabel:\n relabel_vehicles(label)\n relabel_pedestrian(label)\n relabel_background(label)\n if new_labels:\n new_label_20(label)\n new_label_30(label)\n if trim:\n image = image[trim_ind[0]:trim_ind[1]]\n label = label[trim_ind[0]:trim_ind[1]]\n if reshape:\n image = cv2.resize(image, new_shape)\n label = cv2.resize(label, new_shape, interpolation=cv2.INTER_NEAREST)\n if preprocess:\n image = image_preprocessing(image, denoise=denoise)\n label = one_hot_label(label, values)\n images.append(image)\n labels.append(label)\n \n images = np.array(images, dtype=np.uint8)\n labels = np.array(labels, dtype=np.uint8)\n \n yield images, labels, names", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batchify(inputs, batch_size):\n\n inputs = np.asarray(inputs)\n\n pad_size = -len(inputs) % batch_size\n if pad_size:\n padding = np.tile(inputs[:1], [pad_size, 1])\n padded_inputs = np.concatenate([inputs, padding], axis=0)\n else:\n padded_inputs = inputs\n batched_shape = (-1, batch_size) + padded_inputs.shape[1:]\n batched_inputs = np.reshape(padded_inputs, batched_shape)\n return batched_inputs, pad_size", "def encode(self, inputs, masks, context = True):\n with tf.variable_scope(\"encoder\") as scope_encoder:\n \n #compute sequence length\n sequence_lengths = tf.reduce_sum(masks, axis = 1) \n #create a forward cell\n fw_cell = tf.contrib.rnn.GRUCell(self.size)\n\n #pass the cells to bilstm and create the bilstm\n if context:\n bw_cell = tf.contrib.rnn.GRUCell(self.size)\n output, final_state = tf.nn.bidirectional_dynamic_rnn(fw_cell, \\\n bw_cell, inputs, \\\n sequence_length = sequence_lengths, \\\n dtype = tf.float32, \\\n parallel_iterations = 256)\n output = tf.concat([output[0], output[1]], axis = -1)\n final_state = tf.concat([final_state[0], final_state[1]], axis = -1)\n else:\n output, final_state = tf.nn.dynamic_rnn(fw_cell, inputs, \\\n sequence_length = sequence_lengths,\\\n dtype = tf.float32,\\\n parallel_iterations = 256)\n return output, final_state #[batch, max_steps, self.size], [batch, self.size] or 2*self.size if context", "def set_batch_size(self, batch_size=100):\n self._batch_size = batch_size", "def set_batch_size(self, batch_size):\r\n\r\n self.batch_size = batch_size", "def gen_labels_for_chunks(num_chunks, chunk_size,\n n_classes=10, n_unknown_labels=5):\n assert min(num_chunks, chunk_size) > 0\n classes = shuffle(np.arange(n_classes), random_state=SEED)\n n_per_class = chunk_size * (num_chunks // n_classes)\n n_maj_class = chunk_size * num_chunks - n_per_class * (n_classes - 1)\n\n first_labels = classes[0] * np.ones(n_maj_class, dtype=int)\n remaining_labels = np.concatenate([k * np.ones(n_per_class, dtype=int)\n for k in classes[1:]])\n unknown_labels = -1 * np.ones(n_unknown_labels, dtype=int)\n\n labels = np.concatenate([first_labels, remaining_labels, unknown_labels])\n return shuffle(labels, random_state=SEED)", "def encodeProcessedImagesForClassification(autoencoder=None):\n # unless an autoencoder was provided, load the default pretrained one\n if autoencoder == None:\n autoencoder = loadAutoencoder()\n def processLabelsFile(labelsFilePath):\n # open the file listing images to encode\n inFile = open(labelsFilePath)\n lines = inFile.read().split('\\n')\n inFile.close()\n # encode each image\n for line in lines:\n # skip empty lines\n if line != \"\":\n # load the image\n imageName, labelString = line.split(',')\n inputPath = os.path.join(processedDatasetPath, imageName)\n outputImage = encodeImage(autoencoder, inputPath)\n # save the encoded image to a file\n outputPath = os.path.join(encodingOutputDirectory, imageName)\n cv.imwrite(outputPath, outputImage)\n # encode all images (training and testing)\n processLabelsFile(\"./labels.txt\")", "def decode_MAP(self, epochs=20, batch_size=1000):\n\n # perform gradient descent\n batches = int(np.ceil(self.m/batch_size))\n ix = np.arange(self.m)\n with self.sess.as_default():\n for e in tqdm(range(epochs)):\n np.random.shuffle(ix)\n loss_epoch = 0.\n for b in range(batches):\n ix_batch = ix[b*batch_size:(b+1)*batch_size]\n _, loss_val = self.sess.run(\n [self.train_op, self.loss],\n feed_dict={self.batch_ix : ix_batch}\n )\n loss_epoch += loss_val*len(ix_batch)\n self.loss_vals.append(loss_epoch / self.m)", "def generate_and_save_bottleneck_features(batch_size):\n\t# Create pretrained VGG16 convolutional base\n\tmodel = PretrainedVGG16NoTop.build(depth=cfg.img_channels,\n\t\t\t\t\t\t\t\t\t width=cfg.img_width,\n\t\t\t\t\t\t\t\t\t height=cfg.img_height)\n\n\t# Initialize ImageDataGenerator and set ImageNet mean (which is subtracted from images)\n\tdatagen = ImageDataGenerator(featurewise_center=True)\n\tdatagen.mean = np.array([103.939, 116.779, 123.68],\n\t\t\t\t\t\t\tdtype=np.float32).reshape(3,1,1)\n\n\t# Generate and save bottleneck features for training data if they do not exist\n\tif not osp.isfile(cfg.bf_train_path):\n\t\tgenerator = datagen.flow_from_directory(\n\t\t\t\t\t\t\tcfg.train_data_dir,\n\t\t\t\t\t\t\ttarget_size=(cfg.img_width, cfg.img_height),\n\t\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\t\tclass_mode=cfg.classmode,\n\t\t\t\t\t\t\tshuffle=False)\n\n\t\tprint(\"Creating bottleneck features for training data: \\n{}\\n\".format(cfg.bf_train_path))\n\t\tbottleneck_features_train = model.predict_generator(generator, cfg.nb_train_samples)\n\t\tsave_np_array(cfg.bf_train_path, bottleneck_features_train)\n\telse:\n\t\tprint(\"Using existing bottleneck features for training data: \\n{}\\n\".format(cfg.bf_train_path))\n\n\t# Generate and save bottleneck features for validation data if they do not exist\n\tif not osp.isfile(cfg.bf_val_path):\n\t\tgenerator = datagen.flow_from_directory(\n\t\t\t\t\t\t\tcfg.val_data_dir,\n\t\t\t\t\t\t\ttarget_size=(cfg.img_width, cfg.img_height),\n\t\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\t\tclass_mode=cfg.classmode,\n\t\t\t\t\t\t\tshuffle=False)\n\n\t\tprint(\"Creating bottleneck features for val data: \\n{}\\n\".format(cfg.bf_val_path))\n\t\tbottleneck_features_val = model.predict_generator(generator, cfg.nb_val_samples)\n\t\tsave_np_array(cfg.bf_val_path, bottleneck_features_val)\n\telse:\n\t\tprint(\"Using existing bottleneck features for val data: \\n{}\\n\".format(cfg.bf_val_path))", "def integer_encode(in_labels):\n encoder = LabelEncoder()\n encoder.fit(in_labels)\n encoded_out = encoder.transform(in_labels)\n return encoded_out", "def generate_measure_encodings(self, dataset, logdir, batch_size=8):\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0)\n\n track_id_to_measure_encodings = defaultdict(lambda: defaultdict(list))\n buffer_dict = defaultdict(lambda: defaultdict(list))\n\n # Threshold of entries before dumping to a file\n buffer_threshold = 100\n buffer_threshold_increment = 100\n\n self.eval()\n with torch.no_grad():\n for idx, batch in enumerate(tqdm(dataloader, desc='Generating measure encodings', total=math.ceil(len(dataset)/batch_size))):\n token_ids, measure_ids, track_ids = batch\n\n token_ids = token_ids.to(self.device)\n batch_size, seq_len = token_ids.shape\n\n token_embeds = self.token_embedding(token_ids)\n\n # Permute into (seq_len, batch, embed_size)\n token_embeds = token_embeds.permute(1, 0, 2)\n\n # The position ids are just 0, 1, and 2 repeated for as long\n # as the sequence length\n pos_ids = torch.tensor([0, 1, 2]).repeat(batch_size, math.ceil(seq_len/3))[:, :seq_len]\n pos_ids = pos_ids.to(self.device)\n pos_embeds = self.pos_embedding(pos_ids)\n pos_embeds = pos_embeds.permute(1, 0, 2)\n\n full_embeds = torch.cat((token_embeds, pos_embeds), dim=2)\n\n lstm_out, _ = self.lstm(full_embeds)\n\n # We need the lstm output to be (batch_size, seq_len, hidden_dim)\n lstm_out = lstm_out.permute(1, 0, 2).cpu().numpy().astype(np.float16).tolist()\n\n track_ids = track_ids.cpu().numpy()\n measure_ids = measure_ids.cpu().numpy()\n\n # First, we add all of the model hidden states, index by track and measure ID\n for batch_idx in range(batch_size):\n for seq_len_idx in range(seq_len):\n track_id = track_ids[batch_idx][seq_len_idx]\n measure_id = measure_ids[batch_idx][seq_len_idx]\n\n # once threshold is reached, dump buffer_dict contents of PRIOR tracks/measures\n # then raise the threshold, empty the buffer, and continue onward\n # this is necessary to keep memory footprint low\n if track_id >= buffer_threshold:\n print(\"Buffer threshold reached! {} tracks\".format(buffer_threshold))\n buffer_threshold += buffer_threshold_increment\n\n print(\"Dumping buffer dict...\")\n for buffer_t_id in buffer_dict: # buffer track\n for buffer_m_id in buffer_dict[buffer_t_id]: # buffer measure\n measure_hidden_states = buffer_dict[buffer_t_id][buffer_m_id]\n\n # Take the average to get compact representation\n track_id_to_measure_encodings[buffer_t_id][buffer_m_id] = torch.mean(torch.tensor(measure_hidden_states), dim=0)\n\n # Convert the track to a normal dict\n track_id_to_measure_encodings[buffer_t_id] = dict(track_id_to_measure_encodings[buffer_t_id])\n\n # De-allocate buffer and start a new one\n del buffer_dict\n buffer_dict = defaultdict(lambda: defaultdict(list))\n\n model_hidden = lstm_out[batch_idx][seq_len_idx]\n buffer_dict[track_id][measure_id].append(model_hidden)\n\n # Final dump of buffer dict\n for track_id in buffer_dict:\n for measure_id in buffer_dict[track_id]:\n measure_hidden_states = buffer_dict[track_id][measure_id]\n\n track_id_to_measure_encodings[track_id][measure_id] = torch.mean(torch.tensor(measure_hidden_states), dim=0)\n\n # Convert the track to a normal dict\n track_id_to_measure_encodings[track_id] = dict(track_id_to_measure_encodings[track_id])\n\n # Convert the whole thing to a normal dict\n track_id_to_measure_encodings = dict(track_id_to_measure_encodings)\n\n # Save measure encodings to logdir\n measure_encodings_path = os.path.join(logdir, 'measure_encodings.pkl')\n\n print(\"Saving measure encodings to {}...\".format(measure_encodings_path))\n with open(measure_encodings_path, 'wb') as file:\n pickle.dump(track_id_to_measure_encodings, file)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode all labels (inference mode & batched).
def encode_all( self, labels: Sequence[str], batch_size: Optional[int] = None, ) -> torch.FloatTensor: return _encode_all_memory_utilization_optimized( encoder=self, labels=labels, batch_size=batch_size or len(labels) ).detach()
[ "def encode_labels(labels):\r\n le = preprocessing.LabelEncoder()\r\n norm_labels = le.fit_transform(labels)\r\n return norm_labels", "def _save_label_encoders(label_encoder_dict: defaultdict, output_dir: str):\n for label, encoder in label_encoder_dict.items():\n with open(os.path.join(output_dir, 'encoders', f\"{label}.json\"), 'w') as f:\n json.dump(list(encoder.classes_), f)", "def one_hot_encoding(labels):\n\tencoded_labels = [0]*NUM_CLASSES\n\tfor label in labels:\n\t\tencoded_labels[label] = 1\n\treturn encoded_labels", "def _encode_all_memory_utilization_optimized(\n encoder: \"TextEncoder\",\n labels: Sequence[str],\n batch_size: int,\n) -> torch.Tensor:\n return torch.cat(\n [encoder(batch) for batch in chunked(tqdm(map(str, labels), leave=False), batch_size)],\n dim=0,\n )", "def encode_data(data, labels, model, file_name= \"encoded.hdf5\"):\r\n\r\n model.summary()\r\n print(data.shape)\r\n\r\n print(\"Imagens processadas\")\r\n outputs = model.predict(data, batch_size=128, verbose=1)\r\n print(outputs.shape)\r\n \r\n f = h5py.File(file_name, mode ='w')\r\n f.create_dataset(\"encoded\", outputs.shape, np.float32)\r\n f[\"encoded\"][...] = outputs\r\n f.create_dataset(\"labels\", (outputs.shape[0],7), np.uint8)\r\n f[\"labels\"][...] = labels\r\n f.close()\r\n print('Finalizado')", "def setup_label_coding(self):\n all_labels = set()\n for _key in self.__class__.ORDERED_SUBSET[:-1]:\n _df = self.dfs[_key]\n if _df.empty:\n continue\n assert \"label\" in _df.columns\n _found_labels = set(_df[\"label\"].tolist())\n all_labels = all_labels.union(_found_labels)\n\n # exclude ABSTAIN from self.classes, but include it in the encoding\n all_labels.discard(module_config.ABSTAIN_DECODED)\n self.classes = sorted(all_labels)\n self.label_encoder = {\n **{_label: _i for _i, _label in enumerate(self.classes)},\n module_config.ABSTAIN_DECODED: module_config.ABSTAIN_ENCODED,\n }\n self.label_decoder = {_v: _k for _k, _v in self.label_encoder.items()}\n\n console.print(\n f\"Set up label encoder/decoder with {len(self.classes)} classes.\",\n style=\"green\",\n )\n self.validate_labels()", "def integer_encode(in_labels):\n encoder = LabelEncoder()\n encoder.fit(in_labels)\n encoded_out = encoder.transform(in_labels)\n return encoded_out", "def encodeProcessedImagesForClassification(autoencoder=None):\n # unless an autoencoder was provided, load the default pretrained one\n if autoencoder == None:\n autoencoder = loadAutoencoder()\n def processLabelsFile(labelsFilePath):\n # open the file listing images to encode\n inFile = open(labelsFilePath)\n lines = inFile.read().split('\\n')\n inFile.close()\n # encode each image\n for line in lines:\n # skip empty lines\n if line != \"\":\n # load the image\n imageName, labelString = line.split(',')\n inputPath = os.path.join(processedDatasetPath, imageName)\n outputImage = encodeImage(autoencoder, inputPath)\n # save the encoded image to a file\n outputPath = os.path.join(encodingOutputDirectory, imageName)\n cv.imwrite(outputPath, outputImage)\n # encode all images (training and testing)\n processLabelsFile(\"./labels.txt\")", "def label_encoding(self):\n attributes_labels = np.concatenate((self.train_data.attributes.values,\n self.validation_data.attributes.values,\n self.test_data.attributes.values),\n axis=None)\n attributes_yt = self.mlb.fit_transform(attributes_labels)\n return {\n 'tr_act': torch.tensor(self.le.fit_transform(self.train_data.action.values), device=self.device),\n 'tr_att': torch.tensor(attributes_yt[0:len(self.train_data.attributes.values)], device=self.device),\n 'vd_act': torch.tensor(self.le.fit_transform(self.validation_data.action.values), device=self.device),\n 'vd_att': torch.tensor(attributes_yt[len(self.train_data.attributes.values): len(self.train_data.attributes.values) + len(\n self.validation_data.attributes.values)], device=self.device),\n 'tst_act': torch.tensor(self.le.fit_transform(self.test_data.action.values), device=self.device),\n 'tst_att': torch.tensor(attributes_yt[len(self.train_data.attributes.values) + len(self.validation_data.attributes.values):],\n device=self.device)\n }", "def encode_labels(train, test):\r\n\r\n\tcategorical_features = train.select_dtypes(['object']).columns\r\n\r\n\tfor col in categorical_features:\r\n\t\ttotal_values = pd.concat([train[col], test[col]], axis=0)\r\n\t\t\r\n\t\tlbl = LabelEncoder()\r\n\t\t\r\n\t\tlbl.fit(total_values)\r\n\t\ttrain[col] = lbl.transform(train[col])\r\n\t\ttest[col] = lbl.transform(test[col])\r\n\r\n\treturn train, test", "def create_label_encoder(labels):\n from sklearn.preprocessing import LabelEncoder\n encoder = LabelEncoder()\n encoder.fit(labels)\n return encoder", "def generate_encoder(self, paths):\n labels = self.get_labels(paths)\n enc = OneHotEncoder()\n #labels = np.array(labels, dtype=np.float32)\n enc.fit(labels)\n labels = enc.transform(labels).toarray()\n return enc", "def one_hot_encoding(labels, num_classes=10):\n one_hot_labels = []\n for label in labels:\n ohe = [0] * num_classes\n ohe[int(label)] = 1\n one_hot_labels.append(ohe)\n return np.array(one_hot_labels)", "def encode_label(labels):\n y = np.zeros(len(labels))\n for i, l in np.ndenumerate(labels):\n if l == 'realDonaldTrump':\n y[i] = 1\n else:\n y[i] = 0\n return y", "def encodeAll(self, src_path, data_home, label_file, class_file, dest_path, cnt_max, preprocessor=None):\n with open(label_file, 'r') as label_file:\n label_dict = json.load(label_file)\n\n with open(class_file, 'r') as class_file:\n class_dict = json.load(class_file)\n\n cnt = 1\n\n tfrecord_filename = os.path.join(dest_path, '%d.tfrecords' % (cnt))\n writer = tf.python_io.TFRecordWriter(tfrecord_filename)\n\n #process each json file\n for file in os.listdir(src_path):\n content = open(os.path.join(src_path, file)).read()\n ori_rcd = json.loads(content)\n\n img = vis.read_image(ori_rcd['imgname'], data_home)\n img_size = [ori_rcd['imgsize']['width'],\n ori_rcd['imgsize']['height'],\n ori_rcd['imgsize']['channel']]\n img_size = np.array(img_size)\n\n labels = []\n bboxes = []\n for obj in ori_rcd['objects']:\n labels.append(label_dict[obj['label']])\n object = [obj['x1'], obj['y1'], obj['x2'], obj['y2']]\n bboxes.append(object)\n labels = np.array(labels)\n bboxes = np.array(bboxes)\n\n class_id = label_dict[class_dict[ori_rcd['imgname']]]\n\n # Preprocess image and bounding boxes.\n if None != preprocessor:\n img, img_size, bboxes = preprocessor(img, img_size, bboxes)\n\n # plt.imshow(img)\n # plt.draw()\n # plt.waitforbuttonpress()\n\n feature = {\n 'image_name':_bytes_feature(tf.compat.as_bytes(ori_rcd['imgname'])),\n 'image': _bytes_feature(img.tobytes()),\n 'size': _int64List_feature(img_size),\n 'class': _int64_feature(class_id),\n 'labels': _int64List_feature(labels),\n 'bbox_num': _int64_feature(np.shape(bboxes)[0]),\n 'bboxes': _int64List_feature(bboxes)\n }\n\n # Create an example protocol buffer\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n\n # Writing the serialized example.\n example_str = example.SerializeToString()\n writer.write(example_str)\n\n print('cnt: %d' % cnt)\n cnt = cnt + 1\n\n #write out records each cnt_max items\n if 0 == cnt % cnt_max:\n writer.close()\n tfrecord_filename = os.path.join( dest_path, '%d.tfrecords' % (cnt))\n writer = tf.python_io.TFRecordWriter(tfrecord_filename)\n\n # break\n writer.close()", "def encode_multi_class_label(self, df_in, class_label):\n le = LabelEncoder()\n le.fit(df_in[class_label])\n # print(le.classes_)\n df_in[class_label] = le.transform(df_in[class_label])", "def _fit_label_encoder(self, model_path: str):\n\n # Replace countries occurring less than N times with \"OTHER\"\n self.train_df['COUNTRY.OF.ORIGIN'].fillna('OTHER', inplace=True)\n country_counts = pd.DataFrame(self.train_df['COUNTRY.OF.ORIGIN'].value_counts())\n common_countries = set(country_counts[country_counts['COUNTRY.OF.ORIGIN'] > 50].index)\n self.train_df['COUNTRY.OF.ORIGIN.MAPPED'] = self.train_df['COUNTRY.OF.ORIGIN'].apply(\n lambda x: 'OTHER' if x not in common_countries else x)\n\n # Write out label encoder for use in prediction\n label_encoder = LabelEncoder()\n country_labels = label_encoder.fit_transform(self.train_df['COUNTRY.OF.ORIGIN.MAPPED'])\n pickle.dump(label_encoder, open(os.path.join(model_path, LABEL_FILE), \"wb\"))\n\n return country_labels", "def setup_label_coding(self, verbose=True, debug=False):\n all_labels = set()\n for _key in [*self.__class__.PUBLIC_SUBSETS, *self.__class__.PRIVATE_SUBSETS]:\n _df = self.dfs[_key]\n _found_labels = set(_df[\"label\"].tolist())\n all_labels = all_labels.union(_found_labels)\n\n # exclude ABSTAIN from self.classes, but include it in the encoding\n all_labels.discard(module_config.ABSTAIN_DECODED)\n self.classes = sorted(all_labels)\n self.label_encoder = {\n **{_label: _i for _i, _label in enumerate(self.classes)},\n module_config.ABSTAIN_DECODED: module_config.ABSTAIN_ENCODED,\n }\n self.label_decoder = {_v: _k for _k, _v in self.label_encoder.items()}\n\n if verbose:\n self._good(\n f\"Set up label encoder/decoder with {len(self.classes)} classes.\"\n )\n if debug:\n self.validate_labels()", "def dump_labels(self, labels: Labels):\n self.labels_queue.put(labels)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
v[j]u[i]=c[i,j] if x[i,j]>0 v[0]=0
def calc_u_v(c: np.ndarray, x: np.ndarray) -> (np.array, np.array): assert c.shape == x.shape (m, n) = c.shape # constructing a graph (adjacency list) graph = defaultdict(list) for i in range(m): for j in range(n): if x[i, j] is not None: graph[i].append(m+j) graph[m+j].append(i) # performing BFS run u, v = np.zeros(m), np.zeros(n) visited = [False for _ in range(m+n)] stack = [m + 0] visited[m + 0] = True # v[0] := 0 <=> node no. m+0 is visited while stack: node = stack.pop() for adj in graph[node]: # for all adjacent nodes: if not visited[adj]: visited[adj] = True stack.append(adj) if node < m: # `node` in `u`, `adj` in `v` i = node j = adj - m v[j] = c[i, j] + u[i] else: # `node` in `v`, `adj` in `u` i = adj j = node - m u[i] = v[j] - c[i, j] # performing a check: all components of u, v found? if False in visited: raise Exception("unable to define potentials, perhaps the problem is ill-posed") return u, v
[ "def nonzero_values(x):\n return x[x != 0]", "def proj_cap_ent(d0, v):\n d = d0\n m = len(d)\n if v < 1.0 / m:\n print \"error\"\n # this is more numerically stable than the original pseudo code.\n uu = np.sort(d, kind='quicksort')\n cs = np.cumsum(uu)\n # uu = np.sort(d, kind='quicksort')[::-1]\n # Z = uu.sum()\n\n for i in xrange(m):\n # if Z == 0:\n # break\n Z = cs[m-i-1]\n e = (1 - v * i) / Z\n if e * uu[m-i-1] <= v:\n break\n\n # try:\n # for i in xrange(m):\n # # if Z == 0:\n # # break\n # e = (1 - v * i) / Z\n # if e * uu[i] <= v:\n # break\n # Z -= uu[i]\n # if e < 0:\n # print \"\"\n # except FloatingPointError:\n # print \"Z: %f, sum: %d\" % (Z, uu.sum())\n # except Exception as err:\n # pdb.set_trace()\n if d.max()>1 or d.min()<0:\n print 'wrong'\n d = np.minimum(v, e * d)\n return d", "def jaccard(u, v):\n # The scipy implementation is for binary vectors only. \n # This version is more general.\n return 1.0 - (matching(u, v) / np.sum(np.maximum(u, v)))", "def jaccard(u, v):\n u = u > 0\n v = v > 0\n return (1.0 * (u * v).sum()) / (u + v).sum()", "def Relax(G, u, v):\n\tprint(u.value, v.value)\n\tif v.distance > u.distance + G.weights[(u, v)]:\n\t\tv.distance = u.distance + G.weights[(u, v)]\n\t\tv.predecessor = u", "def m0positiveprior(self,width,x,components):\n\t\tval=components[0].copy().flatten()\n\t\tisnegative=val<0\n\t\tval[~isnegative]=0\n\t\tjacobian=np.zeros((val.size,self.npar))\n\t\tjacobian[:,self.im0]=self.regularizationDerivs[0].reshape((-1,self.im0.size)).copy()\n\t\tjacobian[~isnegative,:]=0\n\t\treturn val/width,val,jacobian", "def relu(z):\n return np.maximum(0, z)", "def zero_crossings(x, y):\n n = len(x)\n x_zc = []\n for i in range(n-1):\n if y[i] == 0.0:\n x_zc.append(x[i])\n elif ( (y[i] > 0.0 and y[i+1] < 0.0)\n or (y[i] < 0.0 and y[i+1] > 0.0) ):\n x_zc.append(\n (y[i] * x[i+1] - y[i+1] * x[i]) / (y[i] - y[i+1]))\n return x_zc", "def test_cvx_simple():\n\n x_ = np.matrix([0.5, 1.5]).T\n\n x = cp.Variable(2)\n c = np.matrix([[2, 1]]).T\n A = np.matrix([[-1, 1],\n [ -1, -1],\n [ 0, -1],\n [ 1, -2],\n ])\n b = np.matrix([[ 1, -2, 0, 4 ]]).T\n\n prob = cp.Problem(\n cp.Minimize( c.T * x ),\n [A * x - b <= 0],\n )\n prob.solve()\n\n print prob.status\n x = np.matrix(x.value)\n print 'x', x\n print 'x*', x_\n assert np.allclose(x, x_)", "def reLU(x):\n \n return np.maximum(0,x)", "def threshold_to_zero(self,mx, threshold):\n high_values_indexes = set(zip(*((np.abs(mx) >= threshold).nonzero())))\n nonzero_indexes = zip(*(mx.nonzero()))\n\n if not sp.isspmatrix_lil(mx):\n mx = mx.tolil() \n\n for s in nonzero_indexes:\n if s not in high_values_indexes:\n mx[s] = 0.0\n mx = mx.tocoo()\n mx.eliminate_zeros()\n return mx", "def relu(data):\n return data * (data > 0)", "def zero_crossings(data):\n pos = data > 0\n npos = ~pos\n return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]", "def PartialPivot(A,v):\r\n\r\n N = len(v)\r\n \r\n # Gaussian elimination\r\n for m in range(N):\r\n heads = A[::,m] #collecting leading elements of the m-th stel in the elimination to ultimately select a good candidate. \r\n abs_heads = list(abs(heads))\r\n winning = abs_heads.index(max(abs_heads))\r\n if heads[m] == 0:\r\n A[m, :], A[winning, :] = copy(A[winning, :]), copy(A[m, :])\r\n v[m], v[winning] = copy(v[winning]), copy(v[m])\r\n else:\r\n pass\r\n # Divide by the diagonal element\r\n div = A[m,m]\r\n A[m,:] /= div\r\n v[m] /= div\r\n \r\n # Now subtract from the lower rows\r\n for i in range(m+1,N):\r\n mult = A[i,m]\r\n A[i,:] -= mult*A[m,:]\r\n v[i] -= mult*v[m]\r\n \r\n # Backsubstitution\r\n x = empty(N,float)\r\n for m in range(N-1,-1,-1):\r\n x[m] = v[m]\r\n for i in range(m+1,N):\r\n x[m] -= A[m,i]*x[i]\r\n return x", "def recipr0(x):\n x = np.asarray(x)\n out = np.zeros_like(x, dtype=np.float64)\n nans = np.isnan(x.flat)\n non_zero = ~nans\n non_zero[non_zero] = non_zero[non_zero] & (x.flat[non_zero] != 0)\n out.flat[non_zero] = 1.0 / x.flat[non_zero]\n out.flat[nans] = np.nan\n return out", "def matching(u, v):\n # The scipy implementation is for binary vectors only. \n # This version is more general.\n return np.sum(np.minimum(u, v))", "def somb(x, e):\n\n z = np.ones(x.shape, dtype=float)\n # is this finding where the values are > 0?\n i = np.where(x > 0)\n\n z[i] = 2.0 * (sci.special.jn(1, np.pi*x[i]) - e*scipy.special.jn(1,e*np.pi*x[i])) / (np.pi*x[i])/(1-e**2)\n\n return z", "def test_cphase_zero_paulix(self, wires, res):\n commutation = qml.is_commuting(qml.CPhase(0.0, wires=wires[0]), qml.PauliX(wires=wires[1]))\n assert commutation == res", "def prox_csimplex(z, k):\n\t# safe guard for k\n\tassert 0<=k<=z.size, 'k: k must be between 0 and dimension of the input.'\n\n\tdef f(l):\n\t\tans = 0\n\t\tn = len(z)\n\t\tfor zi in z:\n\t\t\tif zi < l:\n\t\t\t\tans += 1/2*zi**2 - l*k/n\n\t\t\telif zi > 1 + l:\n\t\t\t\tans += 1/2*(1-zi)**2 + l*(1-k/n)\n\t\t\telse:\n\t\t\t\tans += 1/2*l**2 + l*(zi - l - k/n)\n\t\treturn ans\n\n\tdef df(l):\n\t\tans = 0\n\t\tn = len(z)\n\t\tfor zi in z:\n\t\t\tif zi < l:\n\t\t\t\tans += -k/n\n\t\t\telif zi > 1 + l:\n\t\t\t\tans += 1 - k/n\n\t\t\telse:\n\t\t\t\tans += -l + zi - k/n\n\t\treturn ans\n\n\tl0, r = bisect(df, -100500, + 100500, full_output=True)\n\tif not r.converged:\n\t\tprint(\"does not converge\")\n\treturn (z-l0).clip(0, 1)\n\n\t# TODO do the computation here\n\t# Hint: 1. construct the scalar dual object and use `bisect` to solve it.\n\t#\t\t2. obtain primal variable from optimal dual solution and return it.\n\t#" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
detected object on an image that is passed in a request stream
def detect_objects_image_from_request_body(self, img_name, output_image_name): print(f'{img_name} detected object on an image that is passed in a request stream') method = 'ssd' threshold = 50 includeLabel = True includeScore = True allowedLabels = "person" blockedLabels = "" input_stream = img_name # os.path.join("./detection/", img_name) outPath = output_image_name storage = None # We are using default Cloud Storage request = requests.CreateObjectBoundsRequest(input_stream, method, threshold, includeLabel, includeScore, allowedLabels, blockedLabels, outPath, storage) # print('Call CreateObjectBoundsRequest with params: method: {0}, threshold: {1}, includeLabel: {2}, includeScore: {3}'.format(method, threshold, includeLabel, includeScore)) detectedObjectsList = self._imaging_api.create_object_bounds(request) person_count = len(detectedObjectsList.detected_objects) print('objects detected: {0}'.format(person_count) ) # print(detectedObjectsList) return person_count
[ "def image(obj):\n return match(obj, image_matchers)", "def visualize_detect_objects_image_from_request_body(self, img_name, output_image_name):\n print('Visualize detected object on an image that is passed in a request stream')\n\n def write_to_file(file_path, file_name):\n shutil.copy(file_name, file_path)\n \n print('Image ' + ' is saved to ' + os.path.dirname(file_path))\n\n\n method = 'ssd'\n threshold = 50\n includeLabel = True\n includeScore = True\n allowedLabels = \"person\"\n blockedLabels = \"dog\"\n color = None\n input_stream = img_name\n outPath = None\n storage = None # We are using default Cloud Storage\n\n request = requests.CreateVisualObjectBoundsRequest(input_stream, method, threshold,\n includeLabel, includeScore, allowedLabels, blockedLabels, color, outPath, storage)\n\n print('Call CreateVisualObjectBoundsRequest with params: method: {0}, threshold: {1}, includeLabel: {2}, includeScore: {3}, color: {4}'.format(method, threshold, includeLabel, includeScore, color))\n\n updated_image = self._imaging_api.create_visual_object_bounds(request)\n \n # self._save_updated_sample_image_to_output(updated_image, False, \"jpg\")\n write_to_file(output_image_name, updated_image)\n print()\n\n return", "def image(self, obj):", "def find_instances(img):\n pass", "def imagecheck(tweet):\n\tpass", "def _to_detected_object(yd, img):\n tl = yd[\"topleft\"]\n br = yd[\"bottomright\"]\n bbox = etag.BoundingBox.from_abs_coords(\n tl[\"x\"], tl[\"y\"], br[\"x\"], br[\"y\"], img=img\n )\n return etao.DetectedObject(yd[\"label\"], bbox, confidence=yd[\"confidence\"])", "def picture():\n if request.headers['Content-Type'] == 'application/json':\n imagedata, status = vision.analyse(request.json['image'])\n return Response(imagedata, status=status, mimetype='application/json')\n else:\n app.logger.error('Problem with data, request body looked like this (%s)', request.data)\n return Response(\"payload needs to be in JSON-format\", status=400)", "def http_classify(self, req):\n\n if len(req.files) != 0:\n img = np.fromstring(req.files['file'].read(), np.uint8)\n else:\n img = np.fromstring(req.data, np.uint8)\n\n img = cv2.imdecode(img, cv2.IMREAD_UNCHANGED)\n img = cv2.resize(img, (self.Helpers.confs[\"data\"][\"dim\"],\n self.Helpers.confs[\"data\"][\"dim\"]))\n\n return self.get_prediction(img)", "def post(self):\n\n # Parse the request arguments\n args = parser.parse_args()\n # Validation on 'file' argument in the request\n if args['file'] == \"\":\n self.logger.warning('No file found')\n return InferenceResult('error','No file found').serialize(), HTTPStatus.BAD_REQUEST\n\n # read image file from the stream\n uploaded_file = args['file']\n filename = secure_filename(uploaded_file.filename).lower()\n self.logger.debug(f\"Raw filename='{uploaded_file.filename}', Secure filename='{filename}'\")\n if filename == \"\":\n self.logger.warning(f\"No secure filename found for raw filename='{uploaded_file.filename}'\")\n return InferenceResult('error','No file found').serialize(), HTTPStatus.BAD_REQUEST\n \n file_ext = os.path.splitext(filename)[1]\n self.logger.debug(f\"File extension='{file_ext}'\")\n is_file_ext_valid = file_ext in self.allowed_exts\n is_file_valid_image = file_ext == self.__validate_image__(uploaded_file.stream)\n if (not is_file_ext_valid) or (not is_file_valid_image):\n self.logger.warning(f\"Invalid file extension={is_file_ext_valid}\")\n self.logger.warning(f\"Invalid image file={is_file_valid_image}\")\n return InferenceResult('error','Invalid file').serialize(), HTTPStatus.BAD_REQUEST\n \n # Step 1: Save file\n upload_file_path = self.storage_handler.save_file(\n uploaded_file, filename, request.mimetype)\n self.logger.info(f\"Step-1: Uploaded file path='{upload_file_path}'\")\n # Step 2: Detect tea leaves from the uploaded file\n detector = Detector(self.logger, file_path=upload_file_path)\n self.logger.debug(f\"Step-2: Detect options='{detector.options}'\")\n detected_dir = detector.detect()\n self.logger.info(f\"Step-2: Detected dir='{detected_dir}'\")\n # Step 3: Classify each detected tea leaf image\n classifier = TeaLeavesClassifier(detected_dir)\n predictions = classifier.predictions()\n self.logger.info(f\"Step-3: Classifier predictions='{predictions}'\")\n\n # Return response json\n '''\n Result:\n {\n 'status' : '',\n 'msg' : '',\n 'predictions' : {\n 'type' : 'Withered',\n 'categories' : {\n 'Best' : 60.0,\n 'Below Best': 15.0,\n 'Poor' : 25.0\n }\n }\n }\n '''\n return InferenceResult('success','image processed', predictions).serialize(), HTTPStatus.OK", "def is_image_fobj(fobj):\n bf = HunkBlockFile()\n bf_type = bf.peek_type(fobj)\n return bf_type == TYPE_LOADSEG", "def detect(self, src):\n pre = self.preprocess(src)\n seg = self.segment(pre)\n morph = self.morphological(seg)\n hulls = self.create_convex_hulls(morph)\n gate_im = self.bound_gate_using_poles(hulls, src)\n return gate_im", "def test_no_object(self):\n img = \"test_files/test_no_obj.jpg\"\n detection = img_object_detection(img)\n self.assertEqual(detection, None)", "def handler_process_existing_image():\n pass", "def classify_image(img_path, img_result):\n token = get_authorisation()\n headers = {'Authorization': 'Bearer ' + token}\n\n files = {'file': (img_path,open('person.png','rb'),'image/png')}\n\n if img_result:\n print(\"classify_image - if\")\n url = config.DETECTION_URL + \"format:image\"\n r = requests.post(url, headers=headers, files=files)\n #responseJSON = json.loads(r.text)\n #print(r.text))\n if r.status_code == 200:\n with open('output.jpg', 'wb') as f:\n for chunk in r:\n f.write(chunk)\n\n image = Image.open('output.jpg')\n \n #print(responseJSON['num_detections'])\n else:\n print(\"classify_image - else\")\n url = config.DETECTION_URL\n r = requests.post(url, headers=headers, files=files)\n responseJSON = json.loads(r.text)\n print(responseJSON)\n if r.status_code == 200:\n num_people = responseJSON['num_detections']\n return num_people", "def detect(image, args):\n data = {\n 'requests':[\n {\n 'image': {'content': image},\n 'features':[\n {\n 'type': 'FACE_DETECTION',\n 'maxResults': 2,\n },\n {\n 'type': 'TEXT_DETECTION',\n 'maxResults': 1,\n },\n ]\n }\n ]\n }\n r = requests.post('https://vision.googleapis.com/v1/images:annotate?key=' + args.g,\n data=json.dumps(data))\n\n if r.status_code != 200:\n print 'error status ' + str(r.json())\n return None\n else:\n return likelySentiment(r, args)", "def image():\n if request.files.get('image'):\n fname = 'images/{}.jpg'.format(str(time.time()))\n request.files['image'].save(fname)\n result = img_tasks.read_img_detect_circles(fname)\n print(result)\n current_app.logger.info(\"POST {}\".format(result))\n return jsonify({'success': True}), 200\n else:\n current_app.logger.warning(\"IMAGE POST FAILED\")\n return jsonify({'success': False}), 400", "def get_image_prediction():\n\n if request.headers['Content-Type'] == 'application/json;charset=utf-8':\n response = request.get_json()\n filepath = response[\"filepath\"]\n destination = response[\"destination\"]\n return return_predictions_recur(str(filepath), str(destination), tfnet)\n else:\n print('header check failed')\n return {403, 'Headers are invalid'}", "def test_image_analysis_by_url(self):\n pass", "def test_api_stream_image(self):\n tmp_file = BytesIO()\n # create a new image\n image = Image.new(\"RGBA\", (1280, 1024), (255, 0, 0, 0))\n image.save(tmp_file, \"png\")\n tmp_file.seek(0)\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"0\",\n quality=\"default\",\n image_format=\"png\",\n ),\n )\n # Check if returns `Last-Modified` key in headers\n # required for `If-Modified-Since`\n self.assertTrue(\"Last-Modified\" in get_the_response.headers)\n\n last_modified = get_the_response.headers[\"Last-Modified\"]\n\n self.assertEqual(get_the_response.data, tmp_file.getvalue())\n\n # Test `If-Modified-Since` recognized properly\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"0\",\n quality=\"default\",\n image_format=\"png\",\n ),\n headers={\"If-Modified-Since\": last_modified},\n )\n\n self.assertEqual(get_the_response.status_code, 304)\n\n urlargs = dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"200,200,200,200\",\n size=\"300,300\",\n rotation=\"!50\",\n quality=\"color\",\n image_format=\"pdf\",\n )\n\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=urlargs,\n )\n self.assert200(get_the_response)\n\n default_name = \"{name}-200200200200-300300-color-50.pdf\".format(\n name=secure_filename(urlargs[\"uuid\"])\n )\n for dl, name in (\n (\"\", default_name),\n (\"1\", default_name),\n (\"foo.pdf\", \"foo.pdf\"),\n ):\n urlargs[\"dl\"] = dl\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=urlargs,\n )\n self.assert200(get_the_response)\n self.assertEqual(\n get_the_response.headers[\"Content-Disposition\"],\n \"attachment; filename={name}\".format(name=name),\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visualize detected object on an image that is passed in a request stream.
def visualize_detect_objects_image_from_request_body(self, img_name, output_image_name): print('Visualize detected object on an image that is passed in a request stream') def write_to_file(file_path, file_name): shutil.copy(file_name, file_path) print('Image ' + ' is saved to ' + os.path.dirname(file_path)) method = 'ssd' threshold = 50 includeLabel = True includeScore = True allowedLabels = "person" blockedLabels = "dog" color = None input_stream = img_name outPath = None storage = None # We are using default Cloud Storage request = requests.CreateVisualObjectBoundsRequest(input_stream, method, threshold, includeLabel, includeScore, allowedLabels, blockedLabels, color, outPath, storage) print('Call CreateVisualObjectBoundsRequest with params: method: {0}, threshold: {1}, includeLabel: {2}, includeScore: {3}, color: {4}'.format(method, threshold, includeLabel, includeScore, color)) updated_image = self._imaging_api.create_visual_object_bounds(request) # self._save_updated_sample_image_to_output(updated_image, False, "jpg") write_to_file(output_image_name, updated_image) print() return
[ "def detect_objects_image_from_request_body(self, img_name, output_image_name):\n print(f'{img_name} detected object on an image that is passed in a request stream')\n\n method = 'ssd'\n threshold = 50\n includeLabel = True\n includeScore = True\n allowedLabels = \"person\"\n blockedLabels = \"\"\n input_stream = img_name # os.path.join(\"./detection/\", img_name)\n \n outPath = output_image_name\n storage = None # We are using default Cloud Storage\n\n request = requests.CreateObjectBoundsRequest(input_stream, method, threshold,\n includeLabel, includeScore, allowedLabels, blockedLabels, outPath, storage)\n\n # print('Call CreateObjectBoundsRequest with params: method: {0}, threshold: {1}, includeLabel: {2}, includeScore: {3}'.format(method, threshold, includeLabel, includeScore))\n\n detectedObjectsList = self._imaging_api.create_object_bounds(request)\n person_count = len(detectedObjectsList.detected_objects)\n print('objects detected: {0}'.format(person_count) )\n # print(detectedObjectsList)\n\n return person_count", "def image(self, obj):", "def picture():\n if request.headers['Content-Type'] == 'application/json':\n imagedata, status = vision.analyse(request.json['image'])\n return Response(imagedata, status=status, mimetype='application/json')\n else:\n app.logger.error('Problem with data, request body looked like this (%s)', request.data)\n return Response(\"payload needs to be in JSON-format\", status=400)", "def image_analysis_in_stream(subscription_key):\n client = ComputerVisionClient(\n endpoint=\"https://jishnu-roychoudhury-sgp.cognitiveservices.azure.com/\",\n credentials=CognitiveServicesCredentials(subscription_key)\n )\n\n with open(os.path.join(IMAGES_FOLDER, \"danl.png\"), \"rb\") as image_stream:\n image_analysis = client.analyze_image_in_stream(\n image=image_stream,\n visual_features=[\n VisualFeatureTypes.image_type, # Could use simple str \"ImageType\"\n VisualFeatureTypes.faces, # Could use simple str \"Faces\"\n VisualFeatureTypes.categories, # Could use simple str \"Categories\"\n VisualFeatureTypes.color, # Could use simple str \"Color\"\n VisualFeatureTypes.tags, # Could use simple str \"Tags\"\n VisualFeatureTypes.description # Could use simple str \"Description\"\n ]\n )\n\n print(\"This image can be described as: {}\\n\".format(\n image_analysis.description.captions[0].text))\n\n print(\"Tags associated with this image:\\nTag\\t\\tConfidence\")\n for tag in image_analysis.tags:\n print(\"{}\\t\\t{}\".format(tag.name, tag.confidence))\n\n print(\"\\nThe primary colors of this image are: {}\".format(\n image_analysis.color.dominant_colors))", "def image(obj):\n return match(obj, image_matchers)", "def render(self, image=..., cameraPose=...) -> image:\n ...", "def draw_identify(self, result):\n # defaults\n box_width = 100\n box_height = 22\n\n camera_width = self.camera.properties['width']\n camera_height = self.camera.properties['height']\n left = int(camera_width * 0.5 - box_width * 0.5)\n right = int(camera_width * 0.5 + box_width * 0.5)\n top = int(camera_height * 0.5 - box_height * 0.5)\n bottom = int(camera_height * 0.5 + box_height * 0.5)\n\n text = \"Searching...\"\n color = (0, 0, 200)\n text_color = (255, 255, 255)\n\n if result:\n left, top, right, bottom, text, color, text_color = result\n\n # Draw frame\n cv2.rectangle(Data.frame, (left, top), (right, bottom), color, 1)\n\n # Draw a label with a name below the face\n cv2.rectangle(Data.frame, (left, bottom - 20), (right, bottom), color, cv2.FILLED)\n font = cv2.FONT_HERSHEY_PLAIN\n center = int((left + right) * 0.5 * 0.95)\n cv2.putText(Data.frame, text, (center, bottom - 6), font, 1.0, text_color, 1)", "def start_preview_stream(self) -> GoProResp:", "def display(self):\n self.o.display_image(self.image)", "def get_inference_image(self):\n for detection in self.cvOut[0,0,:,:]:\n score = float(detection[2])\n if score > self.Threshold:\n left = int(detection[3] * self.cols)\n top = int(detection[4] * self.rows)\n right = int(detection[5] * self.cols)\n bottom = int(detection[6] * self.rows)\n\n # Draw the bounding-box on the image\n cv2.rectangle(self.result_image,(left, top),(right, bottom), (23, 230, 210), thickness=2)\n cv2.drawMarker(self.result_image,get_rect_centre(left, top,right, bottom),(255,0,0))\n cv2.putText(self.result_image, self.label_dict[int(detection[1])] + \" : \" + str(round(score,4)),\\\n (int(left-10),int(top-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 2)\n\n print(\"[INFO] Result image generated successfully.\")\n return self.result_image", "def CollectImage(self, request, context):\n name, imgPath = take_picture()\n return collect_pb2.ImageInfo(name=name, path=imgPath)", "def preview_capture_example():", "def annotate_objects(annotator, results, labels):\n for obj in results:\n # Convert the bounding box figures from relative coordinates\n # to absolute coordinates based on the original resolution\n ymin, xmin, ymax, xmax = obj['bounding_box']\n xmin = int(xmin * CAMERA_WIDTH)\n xmax = int(xmax * CAMERA_WIDTH)\n ymin = int(ymin * CAMERA_HEIGHT)\n ymax = int(ymax * CAMERA_HEIGHT)\n\n # Overlay the box, label, and score on the camera preview\n annotator.bounding_box([xmin, ymin, xmax, ymax])\n annotator.text([xmin, ymin],'%s\\n%.2f' % (labels[obj['class_id']], obj['score']))", "def callback(self, data):\n try:\n # Conversion to cv2 image using bgr8 encoding \n frame = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n # Showing frame \n cv.imshow(\"Camera_Stream\", frame)", "def send_request(self, img_path):\n\n addr = \"http://\" + self.Helpers.confs[\"server\"][\"ip\"] + \\\n ':'+str(self.Helpers.confs[\"server\"][\"port\"]) + '/Inference'\n headers = {'content-type': 'image/jpeg'}\n\n self.Helpers.logger.info(\"Sending request for: \" + img_path)\n\n _, img_encoded = cv2.imencode('.png', cv2.imread(img_path))\n response = requests.post(\n addr, data=img_encoded.tostring(), headers=headers)\n response = json.loads(response.text)\n\n return response", "def show_callback(self, msg):\n try:\n img = self.bridge.imgmsg_to_cv2(msg)\n if self.update_snapshot:\n self._snapshot = deepcopy(img)\n self.update_snapshot = False\n self._update_snapshot_window = False\n if self.arm_z < 5.0:\n action_point = self.get_action_point()\n cv.circle(img, action_point, 2, (0,0,255), -1)\n cv.imshow(\"Live\", img)\n cv.imshow(\"Snapshot\", self._snapshot)\n if self._init:\n cv.setMouseCallback(\"Snapshot\", self.onMouse)\n self._img = deepcopy(img)\n self._init = False\n cv.imshow(\"Highlight\", self._img)\n cv.waitKey(1)\n except CvBridgeError as e:\n print(\"Bridge-Error: {}\".format(e))", "def view_image(row, train_test):\n\n image_name, l, t, r, b, class_idx = row\n class_name = car_dict[class_idx]\n drawn_img = Image.open(\n Path(\"stanford_car\")\n / \"car_data\"\n / train_test\n / class_name\n / image_name\n )\n bbox = ImageDraw.Draw(drawn_img)\n bbox.rectangle([l, t, r, b], outline=\"red\", fill=None)\n drawn_img.show()", "def visualize(spxl, mask):\n cv2.imshow(\"spxl\", spxl)\n cv2.imshow(\"mask\", mask)\n cv2.waitKey(100)", "def annotate_objects(annotator, results, labels):\n for obj in results:\n # Convert the bounding box figures from relative coordinates\n # to absolute coordinates based on the original resolution\n ymin, xmin, ymax, xmax = obj['bounding_box']\n xmin = int(xmin * CAMERA_WIDTH)\n xmax = int(xmax * CAMERA_WIDTH)\n ymin = int(ymin * CAMERA_HEIGHT)\n ymax = int(ymax * CAMERA_HEIGHT)\n\n # Overlay the box, label, and score on the camera preview\n annotator.bounding_box([xmin, ymin, xmax, ymax])\n annotator.text([xmin, ymin], '%s\\n%.2f' % (labels[obj['class_id']], obj['score']))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the response to the get request to the page of autolab specified by path
def request_autolab(path): if not(path.startswith(AUTOLAB_URL)): path = requests.compat.urljoin(AUTOLAB_URL, path) response = requests.get(path, headers=headers, cookies=cookies) return response
[ "def do_GET(self):\n path = self.path\n name = path[1:] # strip the leading slash\n \n # if the path is the name of a known pokemon, get its html string and construct the response:\n if name in self.pokemon_dictionary:\n self.send_response(http.HTTPStatus.OK)\n self.send_header(\"Content-type\", 'text/html')\n self.end_headers()\n html = self.boilerplate_prefix + self.pokemon_dictionary[name][0].html_body() + self.boilerplate_suffix\n self.wfile.write(html.encode('utf-8')) \n \n else:\n self.send_error(http.HTTPStatus.NOT_FOUND, \"Pokemon Not Found\".format(self.path))", "def soup_autolab(path):\n response = request_autolab(path)\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup", "def get_response(self, url):\n self.response = requests.get(url)", "def get(self, path, **kwargs):\n return(self._request('GET', path, params=kwargs))", "def do_GET(self):\n self._set_response(400, 'text/html')\n self.wfile.write(b\"no GET requests\")", "def api_call_get(path):\n try:\n return api_interpret(itapi.call(path))\n except ITTechnicalAPIError as e:\n return api_interpret(e.response, e.status)", "def test_speciess_get(self):\n query_string = [('label', 'label_example'),\n ('page', 1),\n ('per_page', 100)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/speciess',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get(self, path):\n self.__init__(self.addr, self.port)\n msg = 'GET ' + path + ' HTTP/1.1\\r\\n\\r\\n'\n self.ss.send(msg)\n self.resp = self.ss.recv(1000)\n self.close()", "def get(path, **kwargs):\n return generic_request('GET', path, **kwargs)", "def getpage():\n\tpage = dataent.form_dict.get('name')\n\tdoc = get(page)\n\n\t# load translations\n\tif dataent.lang != \"en\":\n\t\tsend_translations(dataent.get_lang_dict(\"page\", page))\n\n\tdataent.response.docs.append(doc)", "def get_response(self, company_name):\r\n request_url = \"https://en.wikipedia.org/w/api.php?action=query&titles=\"+ company_name +\"&prop=revisions&rvprop=content&format=json\"\r\n print(request_url)\r\n wiki_response = requests.get(request_url)\r\n print(wiki_response)\r\n wiki_response_json = json.loads(wiki_response.text)\r\n # print(wiki_response_json)\r\n wiki_query = wiki_response_json['query']\r\n wiki_query_pages = wiki_query['pages']\r\n\r\n if str(wiki_response) == \"<Response [404]>\":\r\n print(\"404 Error\")\r\n return None\r\n else:\r\n print(\"Page Found\")\r\n return wiki_query_pages", "def on_get(self, req: falcon.Request, resp: falcon.Response, job_id: str):\n path = req.path\n #print('path: '+path)\n #print('job_id: '+job_id)\n logging.info('path: %s, job_id: %s [%d], active_jobs=%d'%(path,job_id,os.getpid(),len(self.active_jobs)))\n if path.startswith(\"/status/\"):\n uid = get_job_id(job_id)\n logging.info('path: %s, job_id: %s [%d], active_jobs=%d'%(path,str(uid),os.getpid(),len(self.active_jobs)))\n if uid in self.active_jobs:\n resp.code = falcon.HTTP_200\n resp.text = str(self.active_jobs[uid].get_status(uid))\n return\n add_error(resp, \"No such job\")\n return\n\n # New version of status that returns a JSON structure rather than a string so we can return\n # error messages. We need to do it this way to preserve backwards compatability\n if path.startswith(\"/status2/\"):\n uid = get_job_id(job_id)\n logging.info('path: %s, job_id: %s [%d], active_jobs=%d'%(path,str(uid),os.getpid(),len(self.active_jobs)))\n if uid in self.active_jobs:\n resp.code = falcon.HTTP_200\n resp.text = json.dumps(self.active_jobs[uid].get_status2(uid))\n logging.info(\"status2 response: \"+resp.text)\n return\n add_error(resp, \"No such job\")\n return\n\n if path.startswith(\"/fetch/\"):\n uid = get_job_id(job_id)\n if uid in self.active_jobs:\n self.active_jobs[uid].fetch_results(uid, req, resp)\n return\n add_error(resp, \"No such job\")\n return\n\n if path.startswith(\"/terminate/\"):\n uid = get_job_id(job_id)\n if uid in self.active_jobs:\n self.active_jobs[uid].terminate(uid, req, resp)\n return\n add_error(resp, \"No such job\")\n return\n\n if path.startswith(\"/jobs\"):\n return\n\n #print('no matching path')", "def get(self, path=None):\n # allow to pass a route too\n if path and not isinstance(path, str):\n values = dict(((v, v) for v in path.ordered_variables))\n path = path.url(**values)\n #\n if path and path.startswith('/'):\n path = path[1:]\n path = path or ''\n for api in self:\n if api.match(path):\n return api\n raise Http404", "def GET(self, serverclient):\n path = get_os_path(serverclient.req.url)\n logging.debug(path)\n if os.path.exists(path):\n serverclient.write_file(path)\n else:\n logging.debug(\"404 Not Found\")\n serverclient.write_generic_body(404)", "def get_response(page_url):\n response = requests.get(page_url)\n # print(page_url, response.status_code)\n return response", "def test_api_get_lab_w_extension(self, authenticated_client, lab, lab_path):\n resp = authenticated_client.api.get_lab(lab_path)\n assert resp[\"data\"][\"name\"] == lab[\"name\"]", "def page1(self):\n result = request101.GET('/whatIsMyIPAddress')\n return result", "def do_GET(s):\n session = bpy.context.scene.batchapps_session\n session.log.debug(\"Received AAD request {0}\".format(s.path))\n\n if s.path.startswith('/?code'):\n bpy.context.scene.batchapps_auth.code = s.path\n\n s.send_response(200)\n s.send_header(\"Content-type\", \"text/html\")\n s.end_headers()\n\n s.wfile.write(b\"<html><head><title>Authentication Successful</title></head>\")\n s.wfile.write(b\"<body><p>Authentication successful.</p>\")\n s.wfile.write(b\"<p>You can now return to Blender where your log in</p>\")\n s.wfile.write(b\"<p>will be complete in just a moment.</p>\")\n s.wfile.write(b\"</body></html>\")\n\n else:\n bpy.context.scene.batchapps_auth.code = s.path\n\n s.send_response(401)\n s.send_header(\"Content-type\", \"text/html\")\n s.end_headers()\n\n s.wfile.write(b\"<html><head><title>Authentication Failed</title></head>\")\n s.wfile.write(b\"<body><p>Authentication unsuccessful.</p>\")\n s.wfile.write(b\"<p>Check the Blender console for details.</p>\")\n s.wfile.write(b\"</body></html>\")", "def do_GET(self) -> None:\n logging.info('%s - %s', self.requestline, self.client_address)\n path = urlparse(self.path).path\n if path == '/probe':\n prometheus_client.MetricsHandler.do_GET(self)\n else:\n server.SimpleHTTPRequestHandler.do_GET(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the soup of the page of autolab specified by path
def soup_autolab(path): response = request_autolab(path) soup = BeautifulSoup(response.text, 'html.parser') return soup
[ "def extractor(url,wait_time):\n driver = webdriver.Chrome()\n driver.get(url)\n time.sleep(wait_time) # important\n\n html_doc = driver.page_source # stores the source HTML code in the driver's page_source attribute\n soup = BeautifulSoup(html_doc, 'html.parser')\n abstract = soup.find('div', {'class':\"Abstracts u-font-serif\"}).text\n\n driver.quit()\n return abstract", "def request_autolab(path):\n if not(path.startswith(AUTOLAB_URL)):\n path = requests.compat.urljoin(AUTOLAB_URL, path)\n response = requests.get(path, headers=headers, cookies=cookies)\n return response", "def connect(self, u):\n r = requests.get(u)\n soup = bs(r.text, 'html.parser')\n return soup", "def makesoup(url):\n html = requests.get(url)\n soup = bs(html.content)\n return soup", "def main(url_or_file):\n url_or_file = url_or_file or ih.user_input('url')\n ph.soup_explore(url_or_file)", "def tiebaSpider(url, beginPage, endPage):\n for page in range(beginPage, endPage + 1):\n pn = (page - 1) * 50\n filename = 'page' + str(page) + '.html'\n fullurl = url + '&pn=' + str(pn)\n #print(fullurl)\n html = loadPage(fullurl, filename)\n print(html)", "def _fetch_url(self, word):\n\t\t\n\t\tif os.path.isfile(RAW_DATA_PATH + word + \".html\"):\n\t\t\thtml_file = open(RAW_DATA_PATH + word + \".html\", \"r\")\n\t\t\tcontent = html_file.read()\n\t\t\thtml_file.close()\n\n\t\telse:\n\t\t\ttry:\n\t\t\t\turl = \"http://dictionary.reference.com/browse/\" + word\n\t\t\t\tcontent = urllib2.urlopen(url).read()\n\t\t\texcept urllib2.URLError as err:\n\t\t\t\tprint \"Word not in database, please connect to the internet.\"\n\t\t\t\texit()\n\n\t\t#Saving html content just in case I'll need to make some changes in the future.\n\t\tcontent_file = open(RAW_DATA_PATH + word + \".html\", \"w\")\n\t\tcontent_file.write(content)\n\t\tcontent_file.close()\n\t\t\n\t\treturn BeautifulSoup(content)", "def get_html(url): \n \n return requests.get(url)", "def get_soup(url, driver=None, for_task=None):\n\n if driver:\n\n if for_task == settings.TASKS[1]:\n id_name = 'resume_body'\n else:\n id_name = 'job-content'\n\n browser.wait_till(driver, 'id', id_name)\n page = driver.page_source\n\n else:\n page = requests.get(url, headers=headers).text\n\n return BeautifulSoup(page, 'html.parser')", "def fetchUsages(self, url, artifact, page=None):\n soup = UrlHandler.getSoup(url)\n\n artifact_root = self._separateV(artifact, getRoot = True)[0]\n scala = re.search(r'_[0-9]\\.[0-9][0-9]', artifact_root)\n if scala:\n artifact_root = artifact_root[:-5]\n usages = []\n previous = ''\n scope = False\n end = False\n next_page = 0\n current_page = 0\n\n if not page:\n print('Page 1')\n else:\n usages_page_link = url + '?p=' + page\n print(\"Continued on page \" + page)\n soup = UrlHandler.getSoup(usages_page_link)\n current_page = int(page)\n if self._fileManager:\n self._fileManager.setCurrentPage(artifact, page)\n page = None\n\n while not end:\n\n for tag in soup.find_all('a'):\n link = tag.get('href')\n\n if scope:\n\n if '?p=' in link:\n next_page = int(link[3:])\n\n if next_page > current_page:\n scope = False\n usages_page_link = url + link\n soup = UrlHandler.getSoup(usages_page_link)\n current_page = int(link[3:])\n print('Page',current_page)\n if self._fileManager:\n self._fileManager.setCurrentPage(artifact, current_page)\n break\n\n elif '/tags' in link:\n end = True\n scope = False\n\n break\n\n elif '/artifact/' in link:\n if link == previous:\n if link not in usages:\n usages.append(link[10:])\n if self._fileManager:\n self._fileManager.writeUsage(artifact,link[10:])\n\n previous = link\n\n if artifact_root in link:\n scope = True\n\n return usages", "def ChooseScraper(self, url):", "def openURL(self, url):\n response = requests.get(url)\n self.soup = BeautifulSoup(response.text, 'html.parser')", "def get_bsobj(page):\n bsobj = soup(page,\"lxml\")\n return bsobj", "def fetch( self ) :\n self.client.staticwiki( self )\n return self.text", "def get_soup(session, project):\n resp = session.get('https://intranet.hbtn.io/projects/{}'.format(project))\n if 200 <= resp.status_code < 300:\n return BeautifulSoup(resp.content, features='html.parser')\n return None", "def fetch_page(name):\n\n params = {\"action\": \"parse\", \"format\": \"json\", \"page\": name}\n rv = requests.get(WIKIMEDIA_API_URL, params=params)\n if rv.status_code != 200:\n print(f\"Unexpected HTTP code: {rv.status_code}\\n{rv}\")\n return None\n\n rv.encoding = \"utf-8\"\n data = rv.json()\n try:\n body = data[\"parse\"][\"text\"][\"*\"]\n title = data[\"parse\"][\"title\"]\n except ValueError:\n print(\"Something is wrong with the server response\")\n raise\n\n return title, body", "def main():\n \n URL = \"http://tldp.org/guides.html\"\n res = requests.get(URL)\n soup = bs(res.text, 'html.parser')\n base_url = 'https://tldp.org/'\n links = (base_url + link.a['href'] for link in soup.find_all('li') if link.a.text.strip() == \"PDF\")\n for link in links:\n os.system(f\"wget -nc {link}\") #using wget to download", "def apollo15_lsj_scrape_index():\n lsj_base_link = 'https://www.hq.nasa.gov/alsj/a15/'\n lsj_base_page = lsj_base_link+'a15.html'\n\n headers = {'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8'}\n\n # Make a soup from the page HTML\n r = requests.get(lsj_base_page, headers = headers)\n html_doc = r.text\n soup = BeautifulSoup(html_doc,\"lxml\")\n\n # Extract everything under \"<h2>The Journal</h2>\"\n log_links = []\n\n stuff = soup.find_all(['h2','a'])\n \n switch = False\n for s in stuff:\n if s.name=='h2':\n if s.text=='The Journal':\n switch=True\n else:\n switch=False\n if s.name=='a' and switch:\n if 'Flight Journal' not in s.text:\n link_loc = lsj_base_link+\"/\"+s.attrs['href'] \n print(link_loc)\n print(\"Found link:\")\n print(\" Target: %s\"%(link_loc))\n log_links.append(link_loc)\n\n if not os.path.exists(SCRAPE_DIR):\n os.mkdir(SCRAPE_DIR)\n\n # Follow those links!!!\n # Save each page to disk\n for i,link in enumerate(log_links):\n\n dest = os.path.join(SCRAPE_DIR, os.path.basename(link))\n\n if not os.path.exists(dest):\n\n print(\"Scraping...\")\n print(\" Link: %s\"%(link))\n print(\" Target file: %s\"%(dest))\n\n r = requests.get(link, headers=headers)\n html_doc = r.content.decode('ISO-8859-1')\n soup = BeautifulSoup(html_doc, \"lxml\")\n\n with open(dest,'w') as f:\n f.write(soup.text)\n\n print(\"Done.\\n\")\n\n else:\n\n print(\"Skipping %s, file already exists...\"%(dest))\n\n print(\"Done scraping Apollo 15 Lunar Surface Journals.\")", "def browse_current_page(self, soup):\n products = soup.select('div.image_container a')\n for result in products:\n url_product = self.urlpath(result['href'])\n ExtractProduct(url_product, self.extracted_dir.directory)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all divs that are cards in the soup
def get_cards(soup): return soup.findAll("div", {"class": "card"})
[ "def get_clubs(soup):\n return soup.findAll('div', {'class': 'box'})", "def card_list(search_url):\n card_list = []\n card_link_re = re.compile('^\\/cards\\/[0-9].*')\n \n main_url = \"https://www.hearthpwn.com\"\n \n raw_html = simple_get(main_url+search_url)\n if raw_html is not None:\n html = BeautifulSoup(raw_html, 'html.parser')\n for link in html.aside.find_all('a'):\n href = str(link.get('href'))\n if card_link_re.match(href): \n try:\n count = int(link['data-count'])\n if count == 2:\n card_list.append(href)\n except:\n log(\"data-count error. Likely extraneous card. Skipping...\")\n continue\n card_list.append(href)\n #log(href)\n else:\n log(\"error: card_list simple_get returned None\")\n log(\"Found {0} cards in deck.\".format(len(card_list)))\n return card_list", "def get_divs(bsobj):\n\n all_divs = bsobj.find_all(\"div\",attrs={\"class\":\"list-grid-item\"})\n if all_divs != []:\n return all_divs\n else:\n return None", "def get_cards(self):\n for c in sorted(self.cards, key=lambda card: card.data['house']):\n for i in range(self.data['_links']['cards'].count(c.key)):\n c.data['is_legacy'] = c.key in self.data.get('set_era_cards',{}).get('Legacy',[])\n c.data['bonus_icons'] = []\n for bonus_card in self.data.get(\"bonus_icons\", []):\n if bonus_card[\"card_id\"] == c.key:\n c.data['bonus_icons'] = bonus_card['bonus_icons']\n yield c", "def __get_book_divs(self, page_number):\n print('[*] Now on page: ', page_number)\n params = {'page': page_number,\n 'display_quantity': self.display_quantity}\n response = requests.get(self.site_root, params=params)\n soup = BeautifulSoup(response.text, 'lxml')\n book_divs = soup.find('div', {'id': 'bibList'})\n return book_divs", "def scrape(self, card_name):\n raise NotImplementedError", "def __parse_divs(self, book_divs):\n books = []\n for book_div in book_divs.findAll('div', {'class': 'info'}):\n book = {}\n try:\n book['title'] = book_div.find('span', {'class': 'title'}).text\n except AttributeError:\n book['title'] = 'N/A'\n try:\n book['author'] = book_div.find('span', {'class': 'author'}).a.text\n # Remove the year, if present\n if len(book['author'].split(',')) >= 3:\n book_author = book['author'].split(',')\n book_author = book_author[:2]\n #del book_author[-1]\n book['author'] = ','.join(book_author)\n except AttributeError:\n book['author'] = 'N/A'\n books.append(book)\n return books", "def parse_card(self, response):\n output = {\n 'title': response.css('.card-details .caption::text').extract()[0],\n 'class': 'all',\n }\n\n infobox = response.css('.card-details .infobox')\n infobox = infobox.css('::text').extract()\n\n for i, infobox_line in enumerate(infobox):\n if 'Type:' in infobox_line:\n output['type'] = infobox[i+1].strip()\n if 'Rarity:' in infobox_line:\n output['rarity'] = infobox[i+1].strip()\n if 'Set:' in infobox_line:\n output['set'] = infobox[i+1].strip()\n if 'Class:' in infobox_line:\n output['class'] = infobox[i+1].strip()\n if 'Classes:' in infobox_line:\n output['class'] = 'Multi'\n if 'Used in' in infobox_line:\n usage = re.sub(r'\\s+', ' ', infobox_line).strip()\n usage_tokens = usage.split(' ')\n output['usage_pct'] = usage_tokens[2]\n\n yield output", "def get_all_container_nodes(self):\n return self.html_doc.find_all(constants.WOOLIES_CONTAINER_DIV[\"tag\"],constants.WOOLIES_CONTAINER_DIV[\"class\"])", "def get_list(soup, class_):\n feature_list = [feature.text.strip('\\n\\t') for feature in soup.find_all('div', class_)]\n return feature_list", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]", "def get_products(self):\n\n while True:\n soup = BeautifulSoup(\n self.get_page(),\n \"html.parser\",\n )\n not_found = soup.find(\n \"span\",\n attrs={\"class\": \"a-size-medium a-color-base\"},\n )\n\n if not_found is not None:\n return None\n\n product_divs = soup.find_all(\n \"div\",\n attrs={\n \"class\": \"sg-col-4-of-12 s-result-item s-asin sg-col-4-of-16 sg-col sg-col-4-of-20\"\n },\n )\n\n if len(product_divs) > 0:\n break\n\n return product_divs", "def find(self):\n print 'Start searching ...'\n soup = BeautifulSoup(self._content) # wrap the content with BeautifulSoup\n for pin in soup.find_all('div', 'item'): # find all pins\n self._getPinInfo(pin) # for each pin, call storePin to extract info\n print 'Cong! DONE!'", "def GetCards(self):\n return self.cards", "def _fetch_comics_available():\n response = requests.get(URL_COMICS)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n comic_data = []\n comic_contents = soup.find_all('div', {'class': 'cartoon-content'})\n for content in comic_contents:\n name = content.find('span', {'class': 'title'}).get_text()\n uri_part = content.find('meta', {'itemprop': 'contentUrl'})['content']\n url = f\"{URL_BASE}{uri_part}\"\n comic_data.append({'name': name, 'url': url})\n return comic_data", "def collectContainer(self, content : bs4.element.NavigableString) -> List:\r\n return [(lambda container_location: content.find('div', container_location))(container_location)\r\n for container_location in [{'class': 'container left'}, {'class': 'container right'}]]", "def get_cards(self):\r\n return self.deck", "def all_cards(self):\n for i in range(len(__class__.card_suits) * len(__class__.card_values)):\n suit = __class__.card_suits[i // len(__class__.card_values)]\n value = __class__.card_values[i % len(__class__.card_values)]\n yield __class__(suit=suit, value=value)", "def query_cards(self, query: CardQuery):\n count = 0\n for _, card in self.card_database.cards.items():\n study_data = self.study_database.get_card_study_data(card)\n if query.matches(card, study_data):\n yield card\n count += 1\n if query.max_count is not None and count >= query.max_count:\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the title of the card, where card is a BeautifulSoup object
def get_card_title(card): card_title = card.find("span", {"class": "card-title"}) assert card_title is not None card_title = slugify(card_title.string) return card_title
[ "def extract_book_title(entry: bs4.BeautifulSoup) -> str:\n try:\n return entry.find(\"div\", attrs={\"class\": \"headsummary\"}).find(\"h1\").text.strip()\n except Exception:\n return \"\"", "def get_review_title(full_review):\n title_div = full_review.find_all(\"div\", class_=REVIEW_TITLE_CLASS)[0]\n title = title_div.get_text()\n return title", "def _extract_title(self):\n return self._get_child('title').text", "def get_title_data(html_soup):\n return str(soup.title.string)", "def _find_title(self):\n html = bs4.BeautifulSoup(str(self.content), \"html.parser\")\n if html.title:\n return html.title.text\n\n return str(self.url)", "def get_article_title(article: WebElement):\n return article.find_element_by_css_selector(\"div.post-container>h2>a\").text", "def find_title(self, html):\n return \"\"", "def get_article_title(self, soup):\r\n # checks if title can be found\r\n try:\r\n title = soup.find('h1', class_='page-header__title').text\r\n except:\r\n raise self.InvalidArticle('Cannot find title')\r\n\r\n return title", "def get_journal_title(self, soup):\n title = soup.find(\"h3\", class_=\"feed-item-title\")\n if not title:\n return \"default\"\n title = soup.find(\"a\")\n if not title:\n return \"default\"\n return title.decode_contents()", "def title(self) -> str:\n return self.__soup.title.text # type: ignore", "def title(self):\n return self.getAttribute('title')", "def scrape(self, card_name):\n raise NotImplementedError", "def find_article_title(soup):\n title = soup.find('h1')\n if title:\n return clean_data(title.text)\n return None", "def parseChapterTitle(self, chapter: Chapter, chapterSoup: BeautifulSoup) -> str:\n # The chapter title has already been set, so return that instead\n return chapter.title", "def get_cards(soup):\n return soup.findAll(\"div\", {\"class\": \"card\"})", "def title(self):\n return self._book_dict[\"title\"]", "def _get_title(video):\n title = video.get('title', {}).get('value')\n if not title:\n title = video.get('summary', {}).get('value', {}).get('title', {}).get('value')\n return title", "def fetch_title( f ):\n return f.Info['/Title']", "def parseMangaTitle(self, mangaSoup: BeautifulSoup) -> str:\n return mangaSoup.find('ul', 'manga-info-text').find('h1').text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get tasks from a card
def get_tasks(card): tasks = [] anchors = card.findAll("a", {"class": "collection-item"}, href=True) for anchor in anchors: task_title = slugify(anchor.string) task_link = anchor['href'] tasks.append((task_title, task_link)) return tasks
[ "async def get_fetch_tileables(self, task_id: str) -> List[Tileable]:", "def get_task(self, task_name):", "def tasks(**_):\n for task in filter(bool, get_all_tasks()):\n print(task)", "def get_card():\n card = '{\"id\": \"5f835afcf6400f7c70f9597e\", \"checkItemStates\": [], \"closed\": false, \"dateLastActivity\": \"2020-10-11T19:20:28.624Z\", \"desc\": \"This is a test task for a new item\", \"descData\": null, \"dueReminder\": null, \"idBoard\": \"5fd503d7e780f63e718bf593\", \"idList\": \"5fd503e2db852a6e5a2fddab\", \"idMembersVoted\": [], \"idShort\": 63, \"idAttachmentCover\": null, \"idLabels\": [], \"manualCoverAttachment\": false, \"name\": \"New Test Task\", \"pos\": 32768, \"shortLink\": \"iUihslfg\", \"isTemplate\": false, \"dueComplete\": false, \"due\": \"2020-10-30T00:00:00.000Z\", \"labels\": [], \"shortUrl\": \"https://trello.com/c/iUihslfg\", \"start\": null,\"url\": \"https://trello.com/c/iUihslfg/63-new-test-task\", \"cover\": {\"idAttachment\": null, \"color\": null, \"idUploadedBackground\": null, \"size\": \"normal\", \"brightness\": \"light\"}, \"idMembers\": [], \"email\": null, \"badges\": {\"attachmentsByType\": {\"trello\": {\"board\": 0, \"card\": 0}}, \"location\": false, \"votes\": 0, \"viewingMemberVoted\": false, \"subscribed\": false, \"fogbugz\": \"\", \"checkItems\": 0, \"checkItemsChecked\": 0, \"checkItemsEarliestDue\": null, \"comments\": 0, \"attachments\": 0, \"description\": true, \"due\": \"2020-10-30T00:00:00.000Z\", \"dueComplete\": false, \"start\": null}, \"subscribed\": false, \"idChecklists\": []}'\n return json.loads(card)", "def getTasks(self, queue):\n work_items = requests.get(self.client.baseurl\n + queue.get('queueElements'),\n auth = self.client.cred)\n if not work_items.json():\n print (\"'%s' queue is empty!\"%queue['name'])\n else:\n return work_items.json()['queueElements']", "def sde_get_tasks(self):\n\n if not self.sde_plugin:\n raise AlmException('Requires initialization')\n\n try:\n if self.config['selected_tasks']:\n return self.sde_plugin.get_task_list()\n else:\n return self.sde_plugin.get_task_list(priority__gte=self.config['sde_min_priority'])\n except APIError, err:\n logger.error(err)\n raise AlmException('Unable to get tasks from SD Elements. Please ensure'\n ' the application and project are valid and that the user has'\n ' sufficient permission to access the project. Reason: %s' % (str(err)))", "def test_get_tasks_for_tag(self):\n pass", "def test_get_tasks_for_project(self):\n pass", "def test_get_tasks_for_user_task_list(self):\n pass", "async def tasks_all(self, ctx):\n if isinstance(ctx.channel, discord.TextChannel):\n await ctx.send(\"This is a long list. I'm going to send it to your DM. To view items \"\n \"in the Council Chat, please request them individually (`++tasks suggestions`).\")\n # Suggestions\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Suggestions!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Suggestions\", color=discord.Color.blurple())\n flag = 0\n for row in values:\n if len(row) < 9:\n embed.add_field(name=f\"Suggestion from {row[1]}\\n{row[7]}\",\n value=f\"{row[3][:500]}\\nDated {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n # Council Nominations\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Council!A2:J\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Nominations\", color=discord.Color.dark_gold())\n for row in values:\n if row[8] == \"\":\n embed.add_field(name=f\"Council Nomination for {row[3]}\\n{row[9]}\",\n value=f\"Submitted by {row[1]}\\nDated {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n # Verification Requests\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Verification!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Verification Requests\", color=discord.Color.dark_blue())\n for row in values:\n if len(row) < 9 or row[8] in (\"1\", \"2\", \"3\", \"4\"):\n status = \"has not been addressed\"\n try:\n if row[8] == \"1\": status = \" is awaiting a scout\"\n if row[8] == \"2\": status = \" is currently being scouted\"\n if row[8] == \"3\": status = \" is awaiting the post-scout survey\"\n if row[8] == \"4\": status = \" is awaiting a decision by Council\"\n except:\n self.bot.logger.debug(\"row is shorter than 9\")\n embed.add_field(name=f\"Verification for {row[1]} {status}.\\n{row[7]}\",\n value=f\"Leader: {row[3]}\\nDated {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks update <Task ID> to change the status.\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n # Other Submissions\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Other!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Other Items\", color=discord.Color.gold())\n for row in values:\n if len(row) < 9:\n if len(row[6]) > 1:\n assigned_to = f\"Assigned to: {self.guild.get_member(int(row[6])).display_name}\"\n else:\n assigned_to = \"Unassigned\"\n embed.add_field(name=f\"Other Comment from {row[1]}\\n{row[7]}\",\n value=(f\"{row[3][:500]}\\n{assigned_to}\\n\"\n f\"Dated {row[0]}\"),\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n # Tasks (Individual Action Items)\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Tasks!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Action Items\", color=discord.Color.dark_magenta())\n for row in values:\n if len(row) < 9:\n if len(row[6]) > 1:\n assigned_to = f\"Assigned to: {self.guild.get_member(int(row[6])).display_name}\"\n else:\n assigned_to = \"Unassigned\"\n embed.add_field(name=f\"{assigned_to}\\n{row[7]}\",\n value=f\"{row[1]}\\nDated: {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n if flag == 0:\n await ctx.send(\"No incomplete tasks at this time! Well done!\")", "def search_tasks(self, md5):\n if not self._cursor:\n return None\n\n if not md5 or len(md5) != 32:\n return None\n\n try:\n self._cursor.execute(\"SELECT * FROM queue \" \\\n \"WHERE md5 = ? \" \\\n \"AND status = 1 \" \\\n \"ORDER BY added_on DESC;\",\n (md5,))\n except sqlite3.OperationalError, why:\n return None\n\n tasks = []\n for row in self._cursor.fetchall():\n task_dict = self._get_task_dict(row)\n if task_dict:\n tasks.append(task_dict)\n\n return tasks", "def tasks(self):\n tasks = self.user.tasks()\n return [task for task in tasks if task.categoryId == self['id']]", "def get_tasks(self, query: domain.Query) -> list:\n tasks = {}\n\n sql, values = self._filter( # fetch task data\n query,\n self._T_TSK,\n job_id=True,\n layer_id=True,\n task_id=True,\n state=True,\n key=True,\n user_id=True,\n )\n self._cur.execute(sql, values)\n for row in self._cur.fetchall():\n data = {k[k.index(\"_\") + 1:]: v for k, v in row.items()}\n tasks[data[\"task_id\"]] = data\n\n records = collections.defaultdict(list)\n sql, values = self._filter( # fetch record data\n query,\n self._T_REC,\n task_id=True,\n )\n self._cur.execute(sql, values)\n for row in self._cur.fetchall():\n data = {k[k.index(\"_\") + 1:]: v for k, v in row.items()}\n records[data[\"task_id\"]].append(data)\n\n results = []\n for task_id, task_data in tasks.items():\n t = domain.Task.decode(task_data)\n t.result = str(bytes(t.result), encoding=\"utf8\") if t.result else None\n t.records = records.get(task_id, [])\n results.append(t)\n\n return results", "def get_task_by_id(self,task_id): \n return self.tasks.get_task_by_id(task_id = task_id)", "def test_get_subtasks_for_task(self):\n pass", "def get_tasks():\n return render_template(\"tasks.html\", tasks=mongo.db.tasks.find())", "def get_results(self):\n for t in self.task:\n print t.get()", "def test_get_tasks_for_section(self):\n pass", "def get_queue(self, task_name):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the filename from the download link response
def get_filename(response): if 'Content-Disposition' in response.headers: filename_matches = re.findall( r'filename="(.*)"', response.headers['Content-Disposition']) assert len(filename_matches) == 1 return filename_matches[0] else: return response.url.split("/")[-1]
[ "def _get_http_response_filename(resp: Response, link: Link) -> str:\n filename = link.filename # fallback\n # Have a look at the Content-Disposition header for a better guess\n content_disposition = resp.headers.get(\"content-disposition\")\n if content_disposition:\n filename = parse_content_disposition(content_disposition, filename)\n ext: Optional[str] = splitext(filename)[1]\n if not ext:\n ext = mimetypes.guess_extension(resp.headers.get(\"content-type\", \"\"))\n if ext:\n filename += ext\n if not ext and link.url != resp.url:\n ext = os.path.splitext(resp.url)[1]\n if ext:\n filename += ext\n return filename", "def get_response_filename(response):\n\n disp = response.headers.get('Content-Disposition')\n if disp:\n return parse_content_disposition(disp)['filename']\n return posixpath.basename(urllib.parse.urlsplit(response.url)[2])", "def parse_file_name_from_url(response):\r\n split_url = urlsplit(response.url)\r\n filename = split_url.path.split(\"/\")[-1:][0]\r\n return filename", "def _getFilenameFromContentDisposition(self, response):\n try:\n with response as r:\n fname = ''\n if \"Content-Disposition\" in r.headers.keys():\n fname = re.findall(\n \"filename=(.+)\", r.headers[\"Content-Disposition\"])[0]\n else:\n fname = r.url.split(\"?\")[0].split(\"/\")[-1]\n\n return fname.replace(\"\\\"\", \"\")\n except Exception:\n return \"file.unknown\"", "def filename(self):\n return self.url.filename", "def download_url():", "def get_download_filename(self):\n title = self.contentnode.title\n filename = \"{} ({}).{}\".format(title, self.get_preset(), self.get_extension())\n valid_filename = get_valid_filename(filename)\n return valid_filename", "def download(self, hashkey, output):\n url = self['endpoint'] + 'file?hashkey=%s' % hashkey\n\n self['logger'].info('Fetching URL %s' % url)\n fileName, header = self['requests'].downloadFile(output, str(url)) #unicode broke pycurl.setopt\n self['logger'].debug('Wrote %s' % fileName)\n return fileName", "def get_filename_from_url(url):\n split = url.split('/') # this splits a string on a character and returns a list\n name = split[-1]\n return name", "def extract_filename_from_url(log, url):\n ## > IMPORTS ##\n import re\n # EXTRACT THE FILENAME FROM THE URL\n try:\n log.debug(\"extracting filename from url \" + url)\n reEoURL = re.compile('([\\w\\.]*)$')\n filename = reEoURL.findall(url)[0]\n # log.debug(filename)\n if(len(filename) == 0):\n filename = 'untitled.html'\n if not (re.search('\\.', filename)):\n filename = filename + '.html'\n except Exception as e:\n filename = None\n # print url\n log.warning(\"could not extracting filename from url : \" + str(e) + \"\\n\")\n\n return filename", "def _get_wheelname_from_link(self, wheel_link: str) -> str:\n logger.debug(\"getting filename for %s\", wheel_link)\n match = WHEEL_FILENAME_RE.search(wheel_link)\n if match:\n return match.group(\"filename\")\n else:\n return \"\"", "def getDownloadUrl(response):\n newUrl = re.findall('\"ExportUrlBase\":(\"[^\"]+\")', response.text)\n if len(newUrl) == 0: return ''\n assert len(newUrl) == 1, 'Multiple download URLs should not happen'\n return constants.HOST_URL + json.loads(newUrl[0]) + 'CSV'", "def filename(self) -> str:\n return os.path.splitext(\n os.path.basename(\n unquote(\n urlparse(\n self.original_url\n ).path\n )\n )\n )[0] + \".png\"", "def download(self, hashkey=None, name=None, output=None):\n # FIXME: option for temp file if output=None\n if hashkey:\n url = self['endpoint'] + 'file?hashkey=%s' % hashkey\n else:\n url = self['endpoint'] + 'file?inputfilename=%s' % name\n\n self['logger'].info('Fetching URL %s' % url)\n fileName, header = self['requests'].downloadFile(output, str(url)) #unicode broke pycurl.setopt\n self['logger'].debug('Wrote %s' % fileName)\n return fileName", "def getRequestsFileName(request):\n content_disposition = request.headers.get(\"Content-Disposition\")\n if content_disposition is not None:\n attributes = (x.strip() for x in content_disposition.split(\";\")\n if x.startswith(\"filename=\"))\n for attr in attributes:\n _, filename = attr.split(\"=\")\n return filename.strip('\"')\n return None", "def get_file_url(self):\n return self.raw['url']", "def _download_from_url(self, url):\n ext = get_file_extension(url)\n if \"?\" in url:\n ext = get_file_extension(os.path.splitext(url.split(\"?\")[0]))\n filepath = \"/tmp/%s.%s\" % (uuid.uuid4().hex, ext)\n request.urlretrieve(url, filepath)\n return filepath", "def get_download_file_name(self):\n # Use 'unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.get_url_name()\n else:\n term = 'unknown'\n\n return 'syllabus-{course}-{term}-{instructors}{ext}'.format(\n course=self.course_instance.course.get_url_name(),\n term=term,\n instructors='_'.join([i.last_name for i in self.instructors]),\n ext=self.file_ext)", "def get_download_link(self):\n return f'{self.link}$value'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scrape submissions, handouts, and writeups from a course
def process_course(course): course_title, course_link = course print() print("PROCESSING COURSE ", course) soup = soup_autolab(course_link) assns = get_assns(soup) for assn in assns: process_assn(assn, course_title)
[ "def scraper(page):\n\n # Initialize empty lists\n titles = []\n urls = []\n techs = []\n instructors = []\n\n # Start scraper and get course blocks\n soup = BeautifulSoup(page, 'html')\n div = soup.findAll(\"div\", { \"class\": \"course-block\"})\n\n # Loop over all courses\n for element in div:\n a = element.find(\"a\", { \"class\": \"course-block__link\"})\n\n # Get url\n url = 'https://www.datacamp.com' + a.get('href')\n\n # Get tech\n if a.contents[1].get(\"class\")[1] == 'course-block__technology--r':\n tech = 'R'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--python':\n tech = 'Python'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--sql':\n tech = 'SQL'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--git':\n tech = 'Git'\n elif a.contents[1].get(\"class\")[1] == 'course-block__technology--shell':\n tech = 'Shell'\n\n # Get title\n title = [element.get_text() for element in a.select(\"h4\")][0]\n\n # Get instructor\n instructor_div = element.find(\"div\", { \"class\": \"course-block__author-body\"})\n instructor = [element.get_text() for element in instructor_div.select(\"p\")][0]\n\n # Write information in lists\n titles.append(title)\n urls.append(url)\n techs.append(tech)\n instructors.append(instructor)\n\n # Write ordered dictionary and return it\n courses = OrderedDict({'Course': titles,\n 'URL': urls,\n 'Tech': techs,\n 'Instructor': instructors})\n\n return courses", "def search_courses(session):\n page = session.get(URL)\n bs = BeautifulSoup(page.text, 'lxml')\n colleges = get_college(bs)\n for college in colleges:\n terms = get_term(session, bs, college)\n for term in terms[1:]:\n majors = get_majors(session, bs, college, term)\n for major in majors:\n for career in CAREER:\n doc_ref = db.collection('colleges').document(college) \\\n .collection('majors').document(major) \\\n .collection('terms').document(term) \\\n .collection('career').document(career)\n\n values = get_param_for_courses(bs, college, term, career, major)\n page = session.post(URL, data=values, headers=headers)\n bs1 = BeautifulSoup(page.text, 'lxml')\n try:\n get_courses(bs1, doc_ref)\n except AttributeError as ex:\n print('No course found')\n time.sleep(randint(0, 1))", "def scrape_course(department, course_number, term):\n # This first part extracts the user input to build the url string that will be processed in the scraper.\n\n\n year = term[1:3]\n season = term[0]\n if season == 'F':\n term_num = '01'\n year = str(int(year) + 1)\n if season == 'W':\n term_num = '02'\n if season == 'S':\n term_num = '03'\n\n url = 'http://catalog.oregonstate.edu/CourseDetail.aspx?Columns=afghijklmnopqrstuvwyz'\\\n '{&SubjectCode=' + department + '&CourseNumber=' + str(course_number) + '&Term=20' + year + term_num\n\n # This part scrapes the website\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html5lib\")\n trs = soup.find_all('tr')\n headers = []\n\n # This takes care of the headers for the table\n for tr in trs:\n for header in tr.find_all('b'):\n stripped_header = header.text\n headers.append(stripped_header)\n\n # This part takes care of the body of the table\n locater = []\n j = 0\n table_data = []\n for tr in trs:\n for td in tr.find_all('td'):\n stripped_td = td.text\n for element in td.find_all('font'):\n if element.text == term:\n locater.append(j)\n j += 1\n table_data.append(element.text)\n\n # This takes care of the course description\n course_description = soup.find('h3').text\n course_description = course_description.replace('.', '').replace('(4)','').replace('\\n', ' ').replace(' ', '')\\\n .replace(department+ ' ' + str(course_number), \"\")\n\n\n\n # This part pulls specific items from our table\n turms = []\n crns = []\n instructors = []\n calendars = []\n for element in locater:\n turm = table_data[element]\n crn = table_data[element + 1]\n section = table_data[element + 2]\n Cr = table_data[element + 3]\n pass_no = table_data[element + 4]\n instructor = table_data[element + 5]\n day_time_date = table_data[element + 6]\n day_time_date = day_time_date.replace('\\n', '')\n day_time_date = day_time_date.replace(' ', '')\n location = table_data[element + 7]\n #print(turm, crn, instructor, day_time_date)\n turms.append(turm)\n crns.append(crn)\n instructors.append(instructor)\n calendars.append(day_time_date)\n days = []\n times = []\n\n if len(calendars) > 1:\n for i in range(len(calendars)):\n days.append(calendars[i][0:2])\n times.append(calendars[i][2:11])\n else:\n calendars = str(calendars)\n days = calendars[1:4].replace(\"'\", '')\n times = calendars[4:13]\n table_headings = headers[0], headers[1], headers[5], headers[6]\n class_table = [list(zip(turms, crns, instructors, calendars))]\n formatted_table = [table_headings, class_table]\n\n\n\n description = 'Place holder'\n c = Course(department, course_number, term, course_description, turms, crns, instructors, days, times)\n\n return c", "def mine_utm_courses():\n course_data = {}\n\n utm_api_url = \"https://student.utm.utoronto.ca/timetable/timetable?yos=&subjectarea=\"\n\n for i in tqdm(range(1, utm_max_subjects + 1), desc=\"UTM\"):\n request_url = utm_api_url + str(i) + \"&session=\" + session\n\n response = requests.get(request_url).text\n soup = BeautifulSoup(response, 'html5lib')\n results = soup.find_all(\"div\", id=re.compile(\"-span$\"))\n\n for result in results:\n course_title = result.find(\"h4\").text.strip()\n\n info = re.search('(.*) - (.*)', course_title)\n course_code = info.group(1)\n\n if course_code in course_data:\n continue\n\n course_title = info.group(2)\n\n course_description = result.find(\"div\", class_=\"alert alert-info infoCourseDetails infoCourse\")\n\n key_terms = [\"Exclusion:\", \"Prerequisite:\", \"Corequisite:\"]\n\n for term in key_terms:\n index = str(course_description).find(term)\n if index > -1:\n course_description = str(course_description)[:index]\n\n course_description = BeautifulSoup(str(course_description), 'html5lib').text.strip()\n\n exclusions = result.find(\"strong\", text=re.compile(\"Exclusion:\"))\n prerequisites = result.find(\"strong\", text=re.compile(\"Prerequisites:\"))\n corequisites = result.find(\"strong\", text=re.compile(\"Corequisites:\"))\n\n if exclusions:\n exclusions = str(exclusions.next_sibling).strip()\n\n if prerequisites:\n prerequisites = str(prerequisites.next_sibling).strip()\n\n if corequisites:\n corequisites = str(corequisites.next_sibling).strip()\n\n course_data[course_code] = {\"Title\": course_title,\n \"Description\": course_description,\n \"Exclusions\": exclusions,\n \"Prerequisites\": prerequisites,\n \"Corequisites\": corequisites}\n\n with open('./data/utm_courses.pickle', 'wb') as handle:\n pickle.dump(course_data, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def get_courses(bs, doc_ref):\n courses = bs.find(id=\"ACE_$ICField$4$$0\").tr.find_next_siblings('tr')\n for course in courses:\n title = course.find('a', {'class': 'PSHYPERLINK PTCOLLAPSE_ARROW'}).parent\n sections = course.find_all('table', {'class': 'PSLEVEL1GRIDNBONBO'})\n for section in sections:\n section = section.find('tr').find_next_sibling('tr')\n tds = section.find_all('td')\n\n doc_ref.collection('courses').document(title.get_text().strip().split('-')[0]) \\\n .collection('sections').document(tds[0].get_text().strip()).set({\n 'section': tds[1].get_text().split()[0].split('-')[1].strip(),\n 'time': tds[2].get_text().strip(),\n 'Instructor': tds[4].get_text().strip(),\n 'Status': tds[6].img['alt']\n }\n )", "def mine_utsg_courses():\n st_george_api_url = \"https://timetable.iit.artsci.utoronto.ca/api/20209/courses?org=\"\n\n course_data = {}\n\n for subject in tqdm(st_george_subjects, desc=\"UTSG\"):\n request_url = st_george_api_url + subject\n results = json.loads(requests.get(request_url).text)\n\n for key in results:\n\n course_code = results[key]['code']\n\n if course_code in course_data:\n continue\n\n course_title = results[key]['courseTitle']\n course_description = BeautifulSoup(results[key]['courseDescription'], 'html5lib').text.strip()\n exclusions = results[key]['exclusion']\n prerequisites = results[key]['prerequisite']\n corequisites = results[key]['corequisite']\n\n course_data[course_code] = {\"Title\": course_title,\n \"Description\": course_description,\n \"Exclusions\": exclusions,\n \"Prerequisites\": prerequisites,\n \"Corequisites\": corequisites}\n\n with open('./data/utsg_courses.pickle', 'wb') as handle:\n pickle.dump(course_data, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def scraper():\n\t# init BeautifulSoup on webpage\n\tcourse_catalog = \"file:///tmp/guest-LSLA0H/Desktop/hw09/fall2014.html\"\n\tpage = urllib2.urlopen(course_catalog)\n\tsoup = BeautifulSoup(page)\n\tsubjects = \"\"\n\t# find all <ul>\n\ttable = soup.find(\"td\")\n\t# while writing to file keep it open\n\twith open('subjectList.csv', 'w') as csv_file:\n\t\t# find all <li> and write the text inside to the file\n\t\tfor row in table.findAll(\"submenulinktext2\"):\n\t\t\tsubjects = row.findAll(text=True)\n\t\t\tsubject_writer = csv.writer(csv_file, delimiter=\" \")\n\t\t\tsubject_writer.writerow(subjects)", "def course(course_code):\n base_url = (\n f\"{settings.UQ_BASE_URL}/programs-courses/course.html?course_code={course_code}\"\n )\n soup = helpers.get_soup(base_url)\n\n if soup is None or soup.find(id=\"course-notfound\"):\n return None\n\n course_summary_raw = soup.find(id=\"course-summary\")\n\n course_summary = None\n if course_summary_raw:\n course_summary = (\n course_summary_raw.get_text().replace('\"', \"\").replace(\"'\", \"''\")\n )\n\n # handle edge-case (see STAT2203)\n if \"\\n\" in course_summary:\n course_summary = course_summary.split(\"\\n\")[0]\n\n title = soup.find(id=\"course-title\")\n if title:\n title = title.get_text()[:-11].replace(\"'\", \"''\")\n\n course_details = {\n \"course_code\": course_code,\n \"title\": title,\n \"description\": course_summary,\n \"units\": int(soup.find(id=\"course-units\").get_text()),\n \"semester_offerings\": [\"false\", \"false\", \"false\"],\n }\n\n parent_description_elem = soup.find(\n id=\"description\").contents[1].get_text()\n invalid_match = \"This course is not currently offered, please contact the school.\"\n # case for deprecated courses w/ no units (e.g. COMP1500) or other determining factors\n if course_details[\"units\"] < 1 or invalid_match in parent_description_elem:\n logfile = open(settings.INVALID_COURSES_FILEPATH, \"w\")\n logfile.write(course_code + \"\\n\")\n return None\n\n try:\n course_details[\"raw_prereqs\"] = soup.find(\n id=\"course-prerequisite\").get_text()\n except AttributeError:\n course_details[\"raw_prereqs\"] = None\n\n try:\n course_details[\"incompatible_courses\"] = (\n soup.find(id=\"course-incompatible\")\n .get_text()\n .replace(\" and \", \", \")\n .replace(\" or \", \", \")\n .replace(\" & \", \", \")\n .replace(\"; \", \", \")\n .split(\", \")\n )\n\n except AttributeError:\n course_details[\"incompatible_courses\"] = None\n\n raw_semester_offerings = str(soup.find_all(id=\"course-current-offerings\"))\n\n if \"Semester 1, \" in raw_semester_offerings:\n course_details[\"semester_offerings\"][0] = \"true\"\n if \"Semester 2, \" in raw_semester_offerings:\n course_details[\"semester_offerings\"][1] = \"true\"\n if \"Summer Semester, \" in raw_semester_offerings:\n course_details[\"semester_offerings\"][2] = \"true\"\n try:\n course_details[\"course_profile_id\"] = soup.find(class_=\"profile-available\")[\n \"href\"\n ].split(\"=\")[-1]\n except TypeError:\n course_details[\"course_profile_id\"] = 0\n\n return course_details", "def collect_courses_data(url, data):\n soup = BeautifulSoup(requests.get(url).content, 'html.parser')\n courses_urls = generate_courses_urls_list(soup)\n\n for course_url in courses_urls:\n course_code, course_data = get_course_data(course_url)\n data[course_code] = course_data", "def parse_course(c):\n course_model = {}\n # Data in first row\n course_model['code'] = str(c.contents[2].contents[0].contents[0].string).strip()\n course_model['name'] = str(c.contents[3].contents[0].contents[0].string).strip()\n course_model['credits'] = str(c.contents[4].contents[0].contents[0].string).strip()\n\n row = c.next_sibling\n \n course_model['times'] = []\n \n # Data in following rows of the same course\n while (type(row) == Tag and row.has_attr('bgcolor') and row['bgcolor'] != \"LightBlue\" and not (row['bgcolor'] == \"White\" and not row.has_attr('align'))):\n row_data = row.contents\n\n # Prerequisite row, if present\n if str(row_data[2].contents[0].string).find(\"Prerequisite:\") != -1:\n course_model['prereq'] = str(row_data[3].contents[0].string).strip()\n\n # Special note row, if present\n elif str(row_data[2].contents[0].string).find(\"Special Note:\") != -1:\n course_model['special_note'] = str(row_data[3].contents[0].string).strip()\n\n # Row containing a session, name, time, location, prof\n elif len(row_data) > 5 and row_data[2].contents[0].contents and len(row_data) > 5 and (str(row_data[2].contents[0].contents[0].string) == \"/1\" or str(row_data[2].contents[0].contents[0].string) == \"/2\" or str(row_data[2].contents[0].contents[0].string) == \"/3\" or str(row_data[2].contents[0].contents[0].string) == \"/4\"):\n time = parse_time(row_data)\n course_model['times'].append(time)\n\n row = row.next_sibling\n return course_model", "def download_all():\n\n course_id = prompt_for_course_id()\n assignment = prompt_for_assignment(course_id)\n students = get_students(course_id)\n\n for student in sorted(students.items(),\n key=lambda student: student[1]):\n download_submission(course_id, assignment, student[1],\n student[0])", "def run(*args):\n request_tokens = {'thought', 'comment', 'workload', 'feedback'}# Words indicative\n # of asking for feedback\n\n # Words added by inspection, e.g. people tends to refer to ppl taking that\n # class, hence not asking for feedback\n undesired_tokens = {'teacher', 'lecturer', 'prof', 'manual',\n 'switch', 'people', 'guys', 'grade', 'offer', 'regist'} \n # Set with ids of courses with 'book' in its name because if\n # 'return_courses' identified courses but the word 'book' is in the post\n # text, it most likely concerns asking for books\n BOOK_IDS = {course.id for course in \n Course.objects.filter(name__contains='book')}\n i = 0\n out = []\n # f = open('post_identification_test.txt', 'w')\n for post_inst in Post.objects.filter(text__contains='?'):\n\n textlower = post_inst.text.lower()\n # Check if undesired words are in there\n if any(word in textlower for word in undesired_tokens):\n continue\n \n # Preliminary condition of any request_token (see above)\n # must be satisfied\n if any(word in textlower for word in request_tokens):\n identified_courses = return_courses(textlower)\n if identified_courses == set():\n continue # No courses identified so go to next post\n \n \n # If no 'book' course is identifed, yet 'book' is in the post,\n # skip it because most likely asking for books\n if (BOOK_IDS.intersection(identified_courses) == set() and\n 'book' in textlower):\n continue\n post_inst.request=True\n i += 1\n for course_id in identified_courses:\n post_inst.courses.add(course_id)\n post_inst.save()\n out.append(post_inst)\n # f.write(''.join([\"Identified: \", ', '.join(identified_courses), ' in\\n',post_inst.text,'\\n--------------------------------------------------------\\n']).encode('utf-8'))\n else:\n continue # If none of keywords in post, likely not relevant\n print \"Found\", i, \"posts that ask for course reviews\"", "def college_transfer_scrape(schools):\n \n #links =\n\n #for i, link in enumerate(links):\n # soup = make_soup(link)\n \n # for item in soup.findAll():\n # stuff = ''\n \n # schools[i]['item'] = stuff\n \n return schools", "def return_courses(posttext):\n global coursename_id_dic, coursenames, coursename_relwords_dic\n global courseword_frequency, courseword_abbrevs, abbrevs\n textlower = posttext # post.text.lower()\n processed_text = BEREND(textlower) # BEREND(post.text)\n out = set()\n out.update(set(cname for cname in coursenames if cname in textlower))\n abbrevs_found = [a for a in abbrevs if a in processed_text]\n # Get relevant course words (filtered stopwords)\n plausible_courses = set()\n for abb in abbrevs_found:\n plausible_courses.update(abbrev_courses_dic[abb])\n for coursename in plausible_courses:\n # THIS CONDITION SHOULD BE INCLUDED IN COMMENTS ONLY USING ALREADY RELEVANT\n # COURSES, OTHERWISE GOING TO BE TO HARD\n # FIrst check if any of associated course words is\n # unique (i.e. satisfies presence of this course\n # (e.g. gastronomy occurs only once in all courses, hence no need for\n # full name) \n # for course_word in coursename_relwords_dic[coursename]:\n # if (courseword_frequency[course_word] == 1 and\n # any(abbrev in processed_text for abbrev in\n # courseword_abbrevs[course_word])):\n # out.add(coursename)\n # continue\n\n\n # Else check if course_cond is satisifed, i.e. any of alterantives versions\n # of course words are identified in right order\n course_cond = coursename_course_cond_dic[coursename]\n span = len(course_cond)\n for i in range(len(processed_text)-span):\n window = processed_text[i:i+span]\n if all(window[j] in course_cond[j] for j in range(span)):\n out.add(coursename)\n continue\n # Filter such that e.g. both 'philosophy' and 'philosophy in literature'\n # have been identified, it keeps only the longest match to prevent overmatching\n # Take ids of those courses by using mapping from coursename_id_dic\n out = set(coursename_id_dic[el] for el in out if\n all(el==sec_el or el not in sec_el for sec_el in out))\n return out", "def get_courses(url, campus, dept_link):\n client = http.client.HTTPSConnection(url.netloc)\n client.request('GET', '%s%s' % (url.path, dept_link))\n response = client.getresponse()\n if response.status != 200:\n logging.warning('Error reading category (%s): %d %s', dept_link,\n response.status, response.read())\n return\n\n tree = lxml.html.fromstring(response.read())\n client.close()\n\n items = tree.xpath('/html/body/a/p')\n courses = []\n for i in items:\n course = parse_course(i, campus)\n if not course:\n logging.warning('Unable to parse course: %s', lxml.html.tostring(i))\n continue\n\n courses.append(course)\n\n return courses", "def crawl_competitions():\n\n READ_SHELL_COMMAND = shell('kaggle competitions list')\n information = []\n for file in READ_SHELL_COMMAND.output():\n information.append(file)\n\n result = \"\"\n link_perfix = 'https://www.kaggle.com/c/'\n for index, value in enumerate(information):\n if index == 1 :\n continue\n value = value.replace(\"userHasEntered\",\"\").replace(\"True\",\"\").replace(\"False\",\"\")\n result += value + \"\\n\"\n if index >1:\n link = \"Link: \" + link_perfix + value.split(\" \")[0] + \"\\n\"\n result +=link\n\n\n return result", "def feedCourseResponse(courseIds):\n responseList = []\n td = datetime.datetime.now() + datetime.timedelta(hours=5, minutes=30)\n curDate = td.date()\n curTime = td.time()\n for courseId in courseIds:\n dueAssignments, dueExams, recentNotes = 0, 0, 0\n course = courseId.get()\n if course is None:\n print \"Invalid courseId\"\n continue\n assignmentIds = course.assignmentIds\n for assignmentId in assignmentIds:\n assignment = assignmentId.get()\n try:\n if assignment is None:\n continue\n a = assignment.dueDate\n date, month, year = int(a[0:2]), int(a[3:5]), int(a[6:10])\n dueDate = datetime.date(year, month, date)\n if(curDate > dueDate):\n continue\n a = assignment.dueTime\n hour, minute = int(a[0:2]), int(a[3:5])\n dueTime = datetime.time(hour, minute)\n if(curDate == dueDate and dueTime < curTime):\n continue\n dueAssignments = dueAssignments + 1\n except:\n print \"assignmentdate parse error\"\n examIds = course.examIds\n for examId in examIds:\n exam = examId.get()\n try:\n if exam is None:\n continue\n a = exam.dueDate\n date, month, year = int(a[0:2]), int(a[3:5]), int(a[6:10])\n dueDate = datetime.date(year, month, date)\n if(curDate > dueDate):\n continue\n a = exam.dueTime\n hour, minute = int(a[0:2]), int(a[3:5])\n dueTime = datetime.time(hour, minute)\n if(curDate == dueDate and dueTime < curTime):\n continue\n dueExams = dueExams + 1\n except:\n print \"Examdate parse error\"\n noteBookIds = course.noteBookIds\n for noteBookId in noteBookIds:\n noteBook = noteBookId.get()\n if noteBook is None:\n continue\n a = str(noteBook.lastUpdated)\n date, month, year = int(a[8:10]), int(a[5:7]), int(a[0:4])\n lastUpdated = datetime.date(year, month, date)\n if(curDate - lastUpdated).days > 7:\n continue\n recentNotes = recentNotes + 1\n responseList.append(FeedCourseResponse(courseId=courseId.urlsafe(),\n courseName=course.courseName,\n dueAssignments=dueAssignments,\n dueExams=dueExams, date=course.date,\n startTime=course.startTime,\n endTime=course.endTime,\n colour=course.colour,\n recentNotes=recentNotes,\n professorName=course.professorName,\n elective=course.elective,\n courseCode=course.courseCode))\n return responseList", "def nav_to_courses_from_requisites(self, soup):\n breadcrumbs = soup.find('span', {'id': 'portlet-breadcrumbs'})\n if breadcrumbs is None:\n raise Exception('failed to navigate to courses from requisite page')\n for bread in breadcrumbs.find_all('a'):\n if bread.text == 'Results':\n postback = bread.get('href')\n payload = self.dc.prepare_payload({}, postback)\n self.dc.http_post(self.COURSEURL, data=payload, params=self.QUERYPARAMS)\n break", "def get_seat_info(course_code: str, course_number: int, section: str, s_term: str):\n\n #: Get the month number of the selected term (for use in the url).\n term = {\"Winter\": \"01\", \"Summer\": \"05\", \"Fall\": \"09\"}[s_term]\n\n now = datetime.today()\n\n #: No need to check courses in the past, thus if a request is made\n #: for a term whose month is before ours, assume that the user\n #: actually wants to know about next year's term. For example, if\n #: a used asks about the Winter term in December 2017, assume they want\n #: January 2018's term, and not January 2017's term.\n if now.month > int(term):\n add_year = 1\n else:\n add_year = 0\n\n #: Create the term_in variable used in the url. It is YYYYMM format.\n term_in = str(datetime.today().year + add_year) + term\n\n #: The url with which to find the course with.\n course_search_url = \"https://www.uvic.ca/BAN1P/bwckctlg.p_disp_listcrse\" \\\n \"?term_in={}&subj_in={}&crse_in={}&schd_in=\" \\\n .format(term_in, course_code, course_number)\n\n #: Get the html page for the generated url, and create a soup.\n response = None\n while not response:\n try:\n response = requests.get(course_search_url)\n except Exception as e:\n print(\"Unable to connect to internet to get request. Trying again in 10 seconds.\")\t\t\n sleep(10)\n\t\t\n soup = BeautifulSoup(response.content, \"lxml\")\n\n #: Get all th tags with a class that is ddtitle.\n course_samples = soup.find_all(\"th\", \"ddtitle\")\n\n #: Loop through all of the found th tags, until we find one whose\n #: string has the requested section in it. Then grab the url in the\n #: href assosiated with that, this is the url of the page that has\n #: seat information in it.\n seat_capacity_url = None\n for th in course_samples:\n\n #: th.a.string will look something like:\n #: \tSENG 275 - B01 - ######\n #: thus, if the requested section (eg B01) is in\n #: this string, then this is the correct th.\n if section in th.a.string:\n #: If this is the correct th, then grab the href assosiated\n #: with it, as this is the link to the page with seat info.\n seat_capacity_url = \"https://www.uvic.ca\" + th.a.attrs['href']\n\n #: break to save time.\n break\n\n #: If there was no found url, then the section was not in any of the th's.\n #: This means that either something has changed with the website, or more likely\n #: the class does not have a section matching the requested section.\n if seat_capacity_url is None:\n print(\"{} does not match any existing sections for {} {} in the {} term.\" \\\n .format(section, course_code, course_number, s_term))\n return {\"Seats\": {\n \"Capacity\": 0,\n \"Actual\": 0,\n \"Remaining\": 0\n },\n \"Waitlist\": {\n \"Capacity\": 0,\n \"Actual\": 0,\n \"Remaining\": 0\n }\n }\n\n #: Get the html page for the url with seat capacities, and create a soup.\n response = requests.get(seat_capacity_url)\n soup = BeautifulSoup(response.content, \"lxml\")\n\n #: Find the span that is the label for Seats, and find its parent's parent.\n #: This is the row of the table holding the information on the class's seat\n #: information.\n seats_label = soup.findAll(\"span\", string=\"Seats\")[0]\n seats_row = seats_label.parent.parent\n\n #: Get the numbers for capacity, actual, and remaining seats from the\n #: table.\n cells = seats_row.findAll('td') #: Get each cell in the table.\n seats = { #: cells[x].string holds the value for that cell in the table.\n \"Capacity\": int(cells[0].string),\n \"Actual\": int(cells[1].string),\n \"Remaining\": int(cells[2].string)\n }\n\n #: Find the span that is the label for Waitlist seats, and find its parent's\n #: parent. this is the row of the table holding the information on the class's\n #: waitlist seats information.\n waitlist_label = soup.findAll(\"span\", string=\"Waitlist Seats\")[0]\n waitlist_row = waitlist_label.parent.parent\n\n #: Get the numbers for capacity, actual, and remaining for waitlist seats from\n #: the table.\n cells = waitlist_row.findAll('td')\n waitlist_seats = { #: cells[x].string holds the value for that cell in the table.\n \"Capacity\": int(cells[0].string),\n \"Actual\": int(cells[1].string),\n \"Remaining\": int(cells[2].string)\n }\n\n #: Return the dictionary specified in this method's description.\n return {\"Seats\": seats, \"Waitlist\": waitlist_seats}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute Lasso path with Celer and primal extrapolation.
def path(self, X, y, alphas, coef_init=None, return_n_iter=True, **kwargs): return celer_primal_path( X, y, alphas=alphas, coef_init=coef_init, max_iter=self.max_iter, return_n_iter=return_n_iter, max_epochs=self.max_epochs, p0=self.p0, verbose=self.verbose, tol=self.tol, prune=self.prune, weights=self.weights)
[ "def _lifter(self, cepstra, L=22):\n if L > 0:\n nframes, ncoeff = np.shape(cepstra)\n n = np.arange(ncoeff)\n lift = 1 + (L / 2) * np.sin(np.pi * n / L)\n return lift * cepstra\n else:\n # values of L <= 0, do nothing\n return cepstra", "def cost2_lasslop(p, Rg, et, VPD, NEE):\n return np.sum((NEE-lasslop(Rg, et, VPD, p[0], p[1], p[2], p[3]))**2)", "def pathloss(coordinates):\n pl1=46.3+33.9*np.log10(fc)-13.82*np.log10(h)\n pl21=(44.9-6.55*np.log10(h))\n pl22=np.log10(get_distance(coordinates))\n pl2=pl21*pl22\n pl=pl1+pl2\n return pl", "def cost_lasslop(p, Rg, et, VPD, NEE):\n return np.sum(np.abs(NEE-lasslop(Rg, et, VPD, p[0], p[1], p[2], p[3])))", "def _estimate_path(self, multiplier, pc_vel, pc_acc):\n # check for duplicates\n self.min_pair_dist, self.t_sum = _check_waypts(\n self.waypts, pc_vel.vlim, pc_acc.alim\n )\n if self.min_pair_dist < JNT_DIST_EPS: # issue a warning and try anyway\n logger.warning(\n \"Duplicates found in input waypoints. This is not recommended,\"\n \" especially for the beginning and the end of the trajectory. \"\n \"Toppra might throw a controllability exception. \"\n \"Attempting to optimise trajectory anyway...\"\n )\n # initial x for toppra's path, essentially normalised time on x axis\n # rescale by given speed limits.\n # only applies to ParametrizeSpline.\n self.path_length_limit = 100 * self.t_sum # empirical magic number\n # t_sum is the minimum time required to visit all given waypoints.\n # toppra generally needs a smaller number for controllabiility.\n # It will find that the needed total path length > t_sum in the end.\n x_max = 1 if multiplier is None else multiplier * self.t_sum\n x = np.linspace(0, x_max, self.waypts.shape[0])\n logger.debug(\n f\"t_sum = {self.t_sum}, t_sum_multiplier = {multiplier}, \"\n f\"estimated path length: {x_max}\"\n )\n # specifying natural here doensn't make a difference\n # toppra only produces clamped cubic splines\n return ta.SplineInterpolator(x, self.waypts, bc_type=\"clamped\")", "def test_Lonepair(self):\n warnings.filterwarnings('ignore', category=CharmmPSFWarning)\n psf = CharmmPsfFile('systems/chlb_cgenff.psf')\n crd = CharmmCrdFile('systems/chlb_cgenff.crd')\n params = CharmmParameterSet('systems/top_all36_cgenff.rtf',\n 'systems/par_all36_cgenff.prm')\n plat = Platform.getPlatformByName('Reference')\n system = psf.createSystem(params)\n con = Context(system, VerletIntegrator(2*femtoseconds), plat)\n con.setPositions(crd.positions)\n init_coor = con.getState(getPositions=True).getPositions()\n # move the position of the lonepair and recompute its coordinates\n plp=12\n crd.positions[plp] = Vec3(0.5, 1.0, 1.5) * angstrom\n con.setPositions(crd.positions)\n con.computeVirtualSites()\n new_coor = con.getState(getPositions=True).getPositions()\n \n self.assertAlmostEqual(init_coor[plp][0]/nanometers, new_coor[plp][0]/nanometers)\n self.assertAlmostEqual(init_coor[plp][1]/nanometers, new_coor[plp][1]/nanometers)\n self.assertAlmostEqual(init_coor[plp][2]/nanometers, new_coor[plp][2]/nanometers)", "def _sqrtlasso_wrapper(self):\n if self.verbose:\n print(\"Sparse logistic regression. \\n\")\n print(self.penalty.upper() + \"regularization via active set identification and coordinate descent. \\n\")\n\n return self._decor_cinterface(_PICASSO_LIB.SolveSqrtLinearRegression)", "def compute_spline_varying_alim(self):\n # avoid going over limit taking into account toppra's precision\n pc_vel = constraint.JointVelocityConstraint(\n self.vlim - np.sign(self.vlim) * V_LIM_EPS\n )\n # Can be either Collocation (0) or Interpolation (1).\n # Interpolation gives more accurate results with\n # slightly higher computational cost\n pc_acc = constraint.JointAccelerationConstraint(\n self.alim_coeffs.reshape(-1, 1)\n * (self.alim - np.sign(self.alim) * A_LIM_EPS),\n discretization_scheme=constraint.DiscretizationType.Interpolation,\n )\n # Since scaling to a shorter path length improves siedel stability,\n # prefer short path, try unity next, finally use 1 * t_sum\n # which is unlikely to succceed but worth a try anyways if it got there\n t_sum_multipliers = [0.03, None, 1]\n for multiplier in t_sum_multipliers:\n path = self._estimate_path(multiplier, pc_vel, pc_acc)\n if self.qlim is not None:\n while self.resplines_allowed > 0:\n # If the joint limit checker detects that the spline\n # violates joint limits, it will add additional waypts\n # to keep the spline within joint limits\n if self.joint_limits_obeyed(path, multiplier):\n break\n logger.info(\"Path violates joint limits. Re-estimating.\")\n logger.debug(f\"waypts = {self.waypts}\")\n path = self._estimate_path(multiplier, pc_vel, pc_acc)\n self.resplines_allowed -= 1\n # Use the default gridpoints=None to let\n # interpolator.propose_gridpoints calculate gridpoints\n # that sufficiently covers the path.\n # this ensures the instance is controllable and avoids error:\n # \"An error occurred when computing controllable velocities.\n # The path is not controllable, or is badly conditioned.\n # Error: Instance is not controllable\"\n # If using clamped as boundary condition, the default gridpoints\n # error1e-3 is OK and we don't need to calculate gridpoints.\n # Boundary condition \"natural\" especially needs support by\n # smaller error.\n try:\n instance = algo.TOPPRA(\n [pc_vel, pc_acc],\n path,\n solver_wrapper=\"seidel\",\n parametrizer=\"ParametrizeSpline\",\n )\n return self._compute_and_check_traj(\n instance, multiplier == t_sum_multipliers[-1]\n )\n except RuntimeError:\n logger.error(f\"t_sum_multiplier = {multiplier} failed\")\n if multiplier == t_sum_multipliers[-1]:\n raise # raise on failure with the last candidate\n raise RuntimeError # for linter, never gets here", "def get_L(P1, P2, target_eps=1.0,ncomp=500, error_tol=1e-5):\n\n L=1.0\n error_term=1.0\n\n lambd_pow_array=np.linspace(-3,2,20)\n\n while error_term > error_tol:\n\n\n # increase until the error goes under 'error_tol'\n L=1.05*L\n\n err_temp=1.0\n\n #Compute the lambda-divergence \\alpha^+\n\n for l_pow in lambd_pow_array:\n\n lambda_sum_plus=0\n lambd=L*10**l_pow\n k=ncomp\n for i in range(0,len(P1)):\n lambda_sum_plus+=(P1[i]/P2[i])**lambd*P1[i]\n alpha_plus=np.log(lambda_sum_plus)\n\n #Compute the lambda-divergence \\alpha^-\n lambda_sum_minus=0\n k=ncomp\n for i in range(0,len(P1)):\n lambda_sum_minus+=(P2[i]/P1[i])**lambd*P2[i]\n alpha_minus=np.log(lambda_sum_minus)\n\n #Evaluate the bound of Thm. 10\n # T1=(2*np.exp((ncomp+1)*alpha_plus) - np.exp((ncomp)*alpha_plus) - np.exp(alpha_plus) )/(np.exp(alpha_plus) - 1)\n # T2=(np.exp((ncomp+1)*alpha_minus) - np.exp(alpha_minus) )/(np.exp(alpha_minus) - 1)\n # error_term= (T1+T2)*(np.exp(-lambd*L)/(1-np.exp(-lambd*L)))\n\n #Evaluate the bound of Thm. 10, stabilised version, rough upper bound\n\n #assuming L \\geq 3, (1 - exp(-L^2/2))^{-1} < 1.02\n T1=(2*np.exp((ncomp+1)*alpha_plus - lambd*L)*1.02)/(np.exp(alpha_plus) - 1)\n T2=(np.exp((ncomp+1)*alpha_minus - lambd*L)*1.02)/(np.exp(alpha_minus) - 1)\n\n # print('nominator : ' + str(2*np.exp((ncomp+1)*alpha_plus - lambd*L)*0.6))\n # print('denominator : ' + str(np.exp(alpha_plus) - 1))\n\n if (T1+T2) < err_temp:\n err_temp=(T1+T2)\n\n error_term=err_temp\n\n print('L: ' + str(L))\n return L, error_term", "def lasso_path(X, y, factor=0.95, n_alphas = 10, **kwargs):\n alpha_max = np.abs(np.dot(X.T, y)).max()\n alpha = alpha_max\n model = Lasso(alpha=alpha)\n weights = []\n alphas = []\n for _ in range(n_alphas):\n model.alpha *= factor\n model.fit(X, y, **kwargs)\n\n alphas.append(model.alpha)\n weights.append(model.w.copy())\n\n alphas = np.asarray(alphas)\n weights = np.asarray(weights)\n return alphas, weights", "def lp_heuris(self, useCplex=False):\n s = api.get_Supplies()\n d = api.get_Demands()\n cap = api.get_Capacities()\n c = api.get_Tcosts() + api.get_Fcosts() / cap\n err = 0\n\n if useCplex and api.CplexOk:\n cpx = cplex.Cplex()\n ub = lambda arc: cap[arc] if cap[arc] < min(s[arc // self.n], d[arc % self.n]) \\\n else cplex.infinity\n cpx.objective.set_sense(cpx.objective.sense.minimize)\n ubnd = [float(ub(arc)) for arc in range(self.narcs)]\n x = cpx.variables.add(obj=c, ub=ubnd)\n # Supply constraints\n lhs = [cplex.SparsePair(x[i * self.n:(i + 1) * self.n], [1] * self.n) for i in self.m]\n cpx.linear_constraints.add(lin_expr=lhs, senses=['E'] * self.m, rhs=s)\n # Demand constraints\n lhs = [cplex.SparsePair([x[i * self.n + j] for i in range(self.m)], [1] * self.m) \\\n for j in range(self.n)]\n cpx.linear_constraints.add(lin_expr=lhs, senses=['E'] * self.n, rhs=d)\n cpx.solve()\n if cpx.solution.is_primal_feasible():\n flows = (np.array(cpx.solution.get_values()) + 0.001).astype(int)\n api.set_flows(flows)\n err = api.set_base()\n else:\n err = 777\n cpx.end()\n else:\n G = nx.DiGraph()\n G.add_nodes_from([i for i in range(self.nnodes)])\n G.add_edges_from([(i, j) for i in range(self.m) \\\n for j in range(self.m, self.nnodes)])\n ndem = {k: {'demand': -s[k] if k < self.m else d[k - self.m]} for k in G.nodes}\n ecost = {e: {'weight': int(round(c[e[0] * self.n + e[1] - self.m] * 1000))} for e in G.edges}\n ecap = {e: {'capacity': cap[e[0] * self.n + e[1] - self.m]} for e in G.edges\n if cap[e[0] * self.n + e[1] - self.m] < min(s[e[0]], d[e[1] - self.m])}\n nx.set_node_attributes(G, ndem)\n nx.set_edge_attributes(G, ecost)\n if ecap: nx.set_edge_attributes(G, ecap)\n flowCst, flowDict = nx.network_simplex(G)\n flows = np.array([flowDict[e // self.n][self.m + e % self.n] \\\n for e in range(self.narcs)], dtype=int)\n api.set_flows(flows)\n objval = api.comp_cost()\n err = api.set_base()\n return err", "def find_path(self, T):\n if T.ntype == 'exNode':\n if T.size <= 1: return self.e\n else:\n self.e = self.e + c_factor(T.size)\n return self.e\n else:\n # Threshold for the hyperplane for splitting data at a given node.\n q = T.q \n # Direction curve for the hyperplane for splitting data at a given node.\n d = T.d \n self.e += 1\n \n if (self.alpha != 1):\n if self.innerproduct(self.x, d, self.deriv_x, self.deriv_D[T.dd]) - q < 0:\n self.path_list.append('L')\n return self.find_path(T.left)\n else:\n self.path_list.append('R')\n return self.find_path(T.right)\n else:\n if self.innerproduct(self.x, d, self.step) - q < 0:\n self.path_list.append('L')\n return self.find_path(T.left)\n else:\n self.path_list.append('R')\n return self.find_path(T.right)", "def compute_forward_cost(image, energy):\n\n image = color.rgb2gray(image)\n H, W = image.shape\n\n cost = np.zeros((H, W))\n paths = np.zeros((H, W), dtype=np.int)\n\n # Initialization\n cost[0] = energy[0]\n for j in range(W):\n if j > 0 and j < W - 1:\n cost[0, j] += np.abs(image[0, j+1] - image[0, j-1])\n paths[0] = 0 # we don't care about the first row of paths\n\n ### YOUR CODE HERE\n for row in range(1,H):\n # 先获取之前的像素相邻三个能量值\n upL = np.insert(cost[row - 1, 0:W - 1], 0, 1e10, axis=0)\n upM = cost[row - 1, :]\n upR = np.insert(cost[row - 1, 1:W], W - 1, 1e10, axis=0)\n # 拼接可以使用np.concatenate,但是np.r_或np.c_更高效\n # upchoices = np.r_[upL, upM, upR].reshape(3, -1)\n # upchoices = np.concatenate((upL, upM, upR), axis=0).reshape(3, -1)\n\n # I(i,j+1)\n I_i_j_P = np.insert(image[row,0:W-1],0,0,axis=0)\n # I(i,j-1)\n I_i_j_M = np.insert(image[row,1:W],W-1,0,axis=0)\n # I(i-1.j)\n I_M = image[row-1,:]\n\n C_V = abs(I_i_j_P - I_i_j_M)\n C_V[0] = 0\n C_V[-1] = 0\n\n C_L = C_V + abs(I_M - I_i_j_P)\n C_L[0] =0\n\n C_R =C_V + abs(I_M - I_i_j_M)\n C_R[-1] = 0\n\n upchoices = np.concatenate((upL+C_L, upM+C_V, upR+C_R), axis=0).reshape(3, -1)\n\n cost[row] = energy[row] + np.min(upchoices,axis=0)\n paths[row] = np.argmin(upchoices, axis=0) - 1 #-1,0,1分别表示左中右\n ### END YOUR CODE\n\n # Check that paths only contains -1, 0 or 1\n assert np.all(np.any([paths == 1, paths == 0, paths == -1], axis=0)), \\\n \"paths contains other values than -1, 0 or 1\"\n\n return cost, paths", "def computePath(self, cells):\t# Cells = map.cells\r\n incYpos = decYpos = incXpos = decXpos = True\t# Default: vehicle can go to any direction\r\n right = left = straight = False\t\t\t# Default: each road is Low-traffic\r\n nextHigh = [] # List of cells with high-traffic\r\n nextLow = [] # List of cells with low-traffic\r\n #startHigh = []\t# List of start cells with high traffic\r\n #startLow = []\t# List of start cells with low traffic\r\n nextPath = \"\"\r\n indPrint = 0\t# Index for testing, printing\r\n rand = Random()\r\n secondCell = False # Determines whether it currently compute the vehicle's second cell\r\n\r\n \r\n # Randomly choose the vehicle's starting cell based on the road's status.\r\n #print \"\"\r\n if rand.randint(0, 4) > 0:\r\n random = rand.randint(0, len(const.busyStart)-1)\r\n xpos = const.busyStart[random][0]\r\n ypos = const.busyStart[random][1]\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n else:\r\n random = rand.randint(0, len(const.lowStart)-1)\r\n xpos = const.lowStart[random][0]\r\n ypos = const.lowStart[random][1]\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n\r\n # Randomly compute the vehicle's path based on the road's status\r\n while True:\r\n if incXpos:\t# when a vehicle goes on incremented x-axis\r\n if ((xpos+1) < len(cells)) and cells[xpos+1][ypos] and isinstance(cells[xpos+1][ypos], cell.RoadCell):\t# at a road cell \r\n if not cells[xpos+1][ypos].end:\r\n self.path.append(cells[xpos+1][ypos])\r\n xpos += 1\r\n #print xpos, ypos\r\n secondCell = True\r\n incXpos = True\r\n decXpos = False\r\n incYpos = False\r\n decYpos = False\r\n elif cells[xpos+1][ypos].end: # If the vehicle reach an end point\r\n if secondCell:\r\n self.path.append(cells[xpos+1][ypos])\r\n xpos += 1\r\n #print xpos, ypos\r\n\t break\r\n elif xpos + 1 < len(cells) and cells[xpos+1][ypos] and isinstance(cells[xpos+1][ypos], cell.IntersectionCell): # at an intersection cell\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n secondCell = True\r\n \r\n # to get the traffic condition of going straight, left, or right\r\n if cells[xpos+1][ypos+1] and cells[xpos+1][ypos+1].busy:\r\n nextHigh.append(\"right\")\r\n else:\r\n nextLow.append(\"right\")\r\n if cells[xpos+2][ypos-2] and cells[xpos+2][ypos-2].busy:\r\n nextHigh.append(\"left\")\r\n else:\r\n nextLow.append(\"left\")\r\n if cells[xpos+3][ypos] and cells[xpos+3][ypos].busy:\r\n nextHigh.append(\"straight\")\r\n else:\r\n nextLow.append(\"straight\")\r\n \r\n if len(nextLow) == 0:\r\n nextPath = nextHigh[rand.randint(0, len(nextHigh) - 1)]\r\n elif len(nextHigh) == 0:\r\n nextPath = nextLow[rand.randint(0, len(nextLow) - 1)]\r\n elif rand.randint(0, 4) > 0:\r\n nextPath = nextHigh[rand.randint(0, len(nextHigh) - 1)]\r\n else:\r\n nextPath = nextLow[rand.randint(0, len(nextLow) - 1)]\r\n \r\n ####CHOOSE WHICH WAY###\r\n if nextPath == \"right\":\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n incXpos = False\r\n incYpos = True\r\n elif nextPath == \"left\":\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n incXpos = False\r\n decYpos = True\r\n elif nextPath == \"straight\":\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n if decXpos:\t# when a vehicle goes on decremented x-axis\r\n if (xpos - 1 >= 0) and cells[xpos-1][ypos] and isinstance(cells[xpos-1][ypos], cell.RoadCell):\t# at a road cell\r\n if not cells[xpos-1][ypos].end:\r\n self.path.append(cells[xpos-1][ypos])\r\n xpos -= 1\r\n #print xpos, ypos\r\n secondCell = True\r\n incXpos = False\r\n decXpos = True\r\n incYpos = False\r\n decYpos = False\r\n elif cells[xpos-1][ypos].end: # If the vehicle reach an end point\r\n if secondCell:\r\n self.path.append(cells[xpos-1][ypos])\r\n xpos -= 1\r\n #print xpos, ypos\r\n break\r\n elif xpos - 1 >= 0 and cells[xpos-1][ypos] and isinstance(cells[xpos-1][ypos], cell.IntersectionCell):\t# at an intersection cell\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n secondCell = True\r\n \r\n\t\t # to get the traffic condition of going straight, left, or right\r\n\t\t if cells[xpos-1][ypos-1] and cells[xpos-1][ypos-1].busy:\r\n\t\t nextHigh.append(\"right\")\r\n\t\t else:\r\n\t\t nextLow.append(\"right\")\r\n\t\t if cells[xpos-2][ypos+2] and cells[xpos-2][ypos+2].busy:\r\n\t\t nextHigh.append(\"left\")\r\n\t\t else:\r\n\t\t nextLow.append(\"left\")\r\n\t\t if cells[xpos-3][ypos] and cells[xpos-3][ypos].busy:\r\n\t\t nextHigh.append(\"straight\")\r\n\t\t else:\r\n nextLow.append(\"straight\")\r\n \r\n if len(nextLow) == 0:\r\n nextPath = nextHigh[rand.randint(0, len(nextHigh) - 1)]\r\n elif len(nextHigh) == 0:\r\n nextPath = nextLow[rand.randint(0, len(nextLow) - 1)]\r\n elif rand.randint(0, 4) > 0:\r\n nextPath = nextHigh[rand.randint(0, len(nextHigh) - 1)]\r\n else:\r\n nextPath = nextLow[rand.randint(0, len(nextLow) - 1)]\r\n \r\n ####CHOOSE WHICH WAY###\r\n if nextPath == \"right\":\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n decXpos = False\r\n decYpos = True\r\n elif nextPath == \"left\":\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n decXpos = False\r\n incYpos = True\r\n elif nextPath == \"straight\":\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n if incYpos:\t# when a vehicle goes on incremented y-axis\r\n if (ypos + 1 < len(cells[0])) and cells[xpos][ypos+1] and isinstance(cells[xpos][ypos+1], cell.RoadCell):\t# at a road cell \r\n if not cells[xpos][ypos+1].end:\r\n self.path.append(cells[xpos][ypos+1])\r\n ypos += 1\r\n #print xpos, ypos\r\n secondCell = True\r\n incXpos = False\r\n decXpos = False\r\n incYpos = True\r\n decYpos = False\r\n elif cells[xpos][ypos+1].end: # If the vehicle has reached an end point.\r\n if secondCell:\r\n\t self.path.append(cells[xpos][ypos+1])\r\n\t ypos += 1\r\n\t #print xpos, ypos\r\n break\r\n elif ypos + 1 < len(cells[0]) and cells[xpos][ypos+1] and isinstance(cells[xpos][ypos+1], cell.IntersectionCell):\t# at an intersection cell\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n secondCell = True\r\n \r\n\t\t # to get the traffic condition of going straight, left, or right\r\n\t\t if cells[xpos-1][ypos+1] and cells[xpos-1][ypos+1].busy:\r\n\t\t\tnextHigh.append(\"right\")\r\n\t\t else:\r\n\t\t\tnextLow.append(\"right\")\r\n\t\t if cells[xpos+2][ypos+2] and cells[xpos+2][ypos+2].busy:\r\n\t\t\tnextHigh.append(\"left\")\r\n\t\t else:\r\n\t\t\tnextLow.append(\"left\")\r\n\t\t if cells[xpos][ypos+3] and cells[xpos][ypos+3].busy:\r\n\t\t\tnextHigh.append(\"straight\")\r\n\t\t else:\r\n nextLow.append(\"straight\")\r\n \r\n if len(nextLow) == 0:\r\n nextPath = nextHigh[rand.randint(0, len(nextHigh) - 1)]\r\n elif len(nextHigh) == 0:\r\n nextPath = nextLow[rand.randint(0, len(nextLow) - 1)]\r\n elif rand.randint(0, 4) > 0:\r\n nextPath = nextHigh[rand.randint(0, len(nextHigh) - 1)]\r\n else:\r\n nextPath = nextLow[rand.randint(0, len(nextLow) - 1)]\r\n \r\n ####CHOOSE WHICH WAY###\r\n if nextPath == \"right\":\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n incYpos = False\r\n decXpos = True\r\n elif nextPath == \"left\":\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n incYpos = False\r\n incXpos = True\r\n elif nextPath == \"straight\":\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n if decYpos:\t# when a vehicle goes on decremented y-axis\r\n if (ypos - 1 >= 0) and cells[xpos][ypos-1] and isinstance(cells[xpos][ypos-1], cell.RoadCell):\t# at a road cell \r\n if not cells[xpos][ypos-1].end:\r\n\t self.path.append(cells[xpos][ypos-1])\r\n\t ypos -= 1\r\n\t #print xpos, ypos\r\n\t secondCell = True\r\n incXpos = False\r\n decXpos = False\r\n incYpos = False\r\n decYpos = True\r\n elif cells[xpos][ypos-1].end: # if end point\r\n if secondCell:\r\n self.path.append(cells[xpos][ypos-1])\r\n ypos -= 1\r\n #print xpos, ypos\r\n break\r\n elif ypos - 1 >= 0 and cells[xpos][ypos-1] and isinstance(cells[xpos][ypos-1], cell.IntersectionCell):\t# at an intersection cell\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n secondCell = True\r\n \r\n # to get the traffic condition of going straight, left, or right\r\n\t\t if cells[xpos+1][ypos-1] and cells[xpos+1][ypos-1].busy:\r\n\t\t \tnextHigh.append(\"right\")\r\n\t\t else:\r\n\t\t \tnextLow.append(\"right\")\r\n\t\t if cells[xpos-2][ypos-2] and cells[xpos-2][ypos-2].busy:\r\n\t\t \tnextHigh.append(\"left\")\r\n\t\t else:\r\n\t\t \tnextLow.append(\"left\")\r\n\t\t if cells[xpos][ypos-3] and cells[xpos][ypos-3].busy:\r\n\t\t \tnextHigh.append(\"straight\")\r\n\t\t else:\r\n nextLow.append(\"straight\")\r\n \r\n if len(nextLow) == 0:\r\n nextPath = nextHigh[rand.randint(0, len(nextHigh) - 1)]\r\n elif len(nextHigh) == 0:\r\n nextPath = nextLow[rand.randint(0, len(nextLow) - 1)]\r\n elif rand.randint(0, 4) > 0:\r\n nextPath = nextHigh[rand.randint(0, len(nextHigh) - 1)]\r\n else:\r\n nextPath = nextLow[rand.randint(0, len(nextLow) - 1)]\r\n \r\n ####CHOOSE WHICH WAY###\r\n if nextPath == \"right\":\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos += 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n decYpos = False\r\n incXpos = True\r\n elif nextPath == \"left\":\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n xpos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n decYpos = False\r\n decXpos = True\r\n elif nextPath == \"straight\":\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n ypos -= 1\r\n self.path.append(cells[xpos][ypos])\r\n #print xpos, ypos\r\n # Remove the content of next possible cells in current position.\r\n for x in nextHigh:\r\n nextHigh.remove(x)\r\n for x in nextLow:\r\n nextLow.remove(x)", "def _lcalc(self):\n # this will require using Rubinstein's L-calc\n raise NotImplementedError", "def all_rail_pathless_to_roadway(self):\n\n for pathless_rail_od in self.fn.rail.iter_od_pairs(pathless=True):\n msg = \"pathless rail od is derived to roadway.\"\n print pathless_rail_od.id, msg\n self.od_pathless_to_roadway(pathless_rail_od)", "def paschen_curve_model(pressure, distance, A, B, gamma):\n return B * pressure * distance / (log(A * pressure * distance) - \n log(log(1 + 1 / gamma)))", "def interpolate_slopes_with_step(self, ch_nodes, ch_dists, interp_pt_elevs):\n ch_z = self._grid.at_node[\"topographic__elevation\"][ch_nodes]\n assert (\n ch_z[0] >= interp_pt_elevs[-1]\n ), \"Highest interp_pt_elev must be below top channel node\"\n interp_pt_x = np.interp(interp_pt_elevs, ch_z[::-1], ch_dists[::-1])\n interp_pt_S = np.empty_like(interp_pt_elevs)\n # now a downwind map of the slopes onto the nodes\n # slopes are defined positive\n z_diff = interp_pt_elevs[:-1] - interp_pt_elevs[1:]\n x_diff = interp_pt_x[1:] - interp_pt_x[:-1]\n np.divide(z_diff, x_diff, out=interp_pt_S[:-1])\n interp_pt_S[-1] = interp_pt_S[-2]\n # Map S back onto nodes\n ch_S = np.interp(ch_z, interp_pt_elevs, interp_pt_S)\n\n return ch_S", "def runLasso():\n X,y=preprocess()\n Lasso(X,y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show conducto login profiles recognized on this computer.
def profile_list(): conf = api.Config() for profile in conf.profile_sections(): data = conf._profile_general(profile) try: _print_profile(profile, data) except KeyError: print( log.format( f"Invalid or incomplete profile '{profile}'", color="red", bold=False, ) )
[ "def list_profiles():\n marker = {True: \"(*)\", False: \" \"}\n print(\" Available profiles:\")\n for profile in os.listdir(SCRIPT_DIRECTORY):\n print(\" {} {}\".format(marker[profile == DEFAULT_PROFILE], profile))", "async def profiles(self, ctx):\n if ctx.invoked_subcommand is None:\n await self.show(ctx)", "def cli(ctx, **kwds):\n profile_names = profiles.list_profiles(ctx, **kwds)\n print(profile_names)", "def listProfiles(args=None):\n com = comar.Link() #communicating with comar deamon\n com.localize() #set language for translated replies\n links = queryLinks(com)\n profiles = queryProfiles(com)\n \n profiles.sort(key=lambda x: x.devname + x.name) #profiles are sorted by device_name + name\n \n name_title = \"\" # _(\"Profile\")\n state_title = \"\" # _(\"Status\")\n addr_title = \"\" # _(\"Address\")\n \n #name_size and state_size are set to the maximum length of name/state of profiles\n # -for ljust operations in output format-\n name_size = max(max(map(lambda x: len(x.name), profiles)), len(name_title))\n state_size = max(max(map(lambda x: len(x.get_state()), profiles)), len(state_title))\n \n cstart = \"\"\n cend = \"\"\n link_list = links.items()\n link_list.sort(key=lambda x: x[1].name)\n profile_names_list=[]\n for script, link in link_list:\n link_profiles = filter(lambda x: x.script == script, profiles)\n if len(link_profiles) > 0:\n print \"%s:\" % link.name\n for profile in link_profiles:\n line = \" %s%s%s | %s%s%s | %s%s%s\" % (\n cstart,\n profile.name.ljust(name_size),\n cend, cstart,\n profile.get_state().center(state_size),\n cend, cstart,\n profile.get_address(),\n cend\n )\n print line\n profile_names_list.append(profile.name) \n return profile_names_list # returns all profile_names defined on comp.", "async def com_show_cprofile(ctx):\n\n await ctx.channel.trigger_typing()\n\n user = ctx.message.author.id\n claimed_profiles = ph.show_claimed_profiles(user)\n\n if ph.error != '':\n await ctx.channel.send(\n ctx.message.author.mention + ' ' + ph.error\n )\n\n else:\n await ctx.channel.send(\n ctx.message.author.mention + \" Your claimed profiles are:\\n\" +\n \"```\" + '\\n'.join(claimed_profiles) +\n \"```\"\n )", "def connectedUserView(self):\n highestCount = \"NaN\"\n try:\n output = subprocess.check_output(\"ntpdc -n -c monlist | awk '{if(NR>2)print $1}' | uniq | wc -l\", shell=True) # Gets all the connected clients from ntp\n except subprocess.CalledProcessError as e:\n output = e.output\n returncode = e.returncode\n print returncode\n \n try:\n highestCount = subprocess.check_output(\"ntpdc -n -c monlist | awk '{if(NR>2)print $4}' | sort -nrk1,1 | line\", shell=True) # Gets the highest connections from connected clients\n except subprocess.CalledProcessError as e:\n output = e.output\n returncode = e.returncode\n print returncode\n theStr = \"Con users: {:>6}\".format(output)\n theStr += \"Hi cons: {:>8}\".format(highestCount)\n self.writeLCD(theStr)", "def print_profiles(profiles):\n\n print \"Available profiles for the pods are the following:\"\n\n for profile in profiles:\n print \" %s\" % (profile)", "def profile(ctx):\n if ctx.invoked_subcommand is None:\n config = ctx.obj.configuration\n\n default = config.default_profile_name()\n names = config.profiles()\n for profile_name in names:\n profile = config.profile(profile_name)\n if profile_name == default:\n click.echo(\"Profile: %s (default)\" % profile_name)\n else:\n click.echo(\"Profile: %s\" % profile_name)\n click.echo(\"User: %s\" % profile['user'])\n click.echo(\"URL: %s\" % profile['url'])\n click.echo()", "def list_profiles(config: Config):\n profile: Optional[str] = config.profile\n click.echo(\"\\nBonsai configuration file(s) found at {}\".format(config.file_paths))\n click.echo(\"\\nAvailable Profiles:\")\n if profile:\n if profile == \"DEFAULT\":\n click.echo(\" DEFAULT\" + \" (active)\")\n else:\n click.echo(\" DEFAULT\")\n\n # Grab Profiles from bonsai config and list each one\n sections = config.section_list()\n for section in sections:\n if section == profile:\n click.echo(\" \" + section + \" (active)\")\n else:\n click.echo(\" \" + section)\n else:\n click.echo(\"No profiles found please run 'bonsai configure'.\")", "def print_pod_profiles(pods, remote):\n for pod in pods:\n profile = remote.get_system(pod)['profile']\n print \"%s: %s\" % (pod, profile)", "def list_connected_users(self):\n\n connected_users = self.chatty_server.connected_users\n\n self.write(\" CONNECTED USERS:\", message_type=\"status\")\n for user, status in connected_users.items():\n self.write(\" %s (%s)\" % (user, status), message_type=\"status\")", "def print_profile_information(config: Config):\n try:\n profile: Any = config.profile\n profile_info = config.section_items(profile)\n except NoSectionError:\n profile_info = config.defaults().items()\n\n click.echo(\"\\nBonsai configuration file(s) found at {}\".format(config.file_paths))\n click.echo(\"\\nProfile Information\")\n click.echo(\"--------------------\")\n if profile_info:\n for key, val in profile_info:\n click.echo(key + \": \" + str(val))\n else:\n click.echo(\"No profiles found please run 'bonsai configure'.\")", "def seccomp_profiles(self):\n return self._seccomp_profiles", "def acc_status():\n print(\"\\nAccount Information\")\n for info in acc_info:\n print(info, \":\", acc_info[info])", "def profiles(self) -> Sequence[str]:\n return pulumi.get(self, \"profiles\")", "def os_profile(self) -> 'outputs.OSProfileResponse':\n return pulumi.get(self, \"os_profile\")", "def display_cred(cls):\n return cls.cred_list", "def infoProfile (args):\n profile_name = \"\"\n if ( len(args) == 0 ):\n profile_name = raw_input('%s -> ' % _(\"Enter name of profile\"))\n else: \n profile_name=\" \".join(args)\n com = comar.Link()\n com.localize() \n com.Net.Link.connectionInfo(name=profile_name)\n\n global found\n found = False\n for reply in collect(com):\n if reply.command == \"result\":\n found = True\n profile = Profile(reply.script, profile_name)\n profile.parse( reply.data )\n print\n profile.print_info()\n if ( not found ) :\n print _(\"No such profile\")", "def list(self, architecture):\n return self._list(\"/archs/%s/profiles\" % architecture.id,\n \"profiles\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a new login profile for a specified Conducto URL.
def profile_add(url, default=False, token=None): if token is not None: os.environ["CONDUCTO_TOKEN"] = token profile = _profile_add(url, default=default) conf = api.Config() data = conf._profile_general(profile) _print_profile(profile, data)
[ "def set_access_profile_url(self, access_profile, obj, profile, snmp_ro_community):\n if profile is None:\n raise CommandError(\"Script name must contain profile when using URLs\")\n url = URL(obj)\n access_profile.profile = profile\n access_profile.scheme = Script.get_scheme_id(url.scheme)\n access_profile.address = url.host\n if url.port:\n access_profile.port = url.port\n access_profile.user = url.user\n if \"\\x00\" in url.password:\n # Check the password really the pair of password/enable password\n p, s = url.password.split(\"\\x00\", 1)\n access_profile.password = p\n access_profile.super_password = s\n else:\n access_profile.password = url.password\n access_profile.path = url.path\n if snmp_ro_community:\n access_profile.snmp_ro = snmp_ro_community", "def add_provider(request):\n try:\n web_provider = request.user.get_profile()\n except:\n web_provider = None\n return {'web_provider': web_provider}", "def new_profile() -> Profile:\n return Profile(\n last_name='',\n email='',\n picture='')", "async def addprofile(ctx):\n await ctx.send(\n ctx.message.author.mention + ' Please fill out this google form to ' +\n 'add a profile to the profile pool.' +\n '\\nhttps://docs.google.com/forms/d/e/1FAIpQLScJpN7TuiklZjTzVhTLJyYXPF_9K2jhIqdwEBbHAiUV5Z1qLg/viewform?usp=sf_link'\n )", "def create_user_profile():\n response = put_firehose_record(\n validation_schema=profile_schema, delivery_stream_name='user_profiles')\n return response", "def create(self, customer_id: int, type_: str, value: str) -> int:\n response = self.base_post_request(\n f\"{self.base_url}/{customer_id}/social-profiles\", type=type_, value=value\n )\n\n return self.process_result_with_status_code(response, 201)", "def profile_add_online_profiles(\n context: Context, supplier_alias: str, online_profiles: Table\n):\n actor = get_actor(context, supplier_alias)\n company = get_company(context, actor.company_alias)\n profiles = [row[\"online profile\"] for row in online_profiles]\n facebook = PROFILES[\"FACEBOOK\"] in profiles\n linkedin = PROFILES[\"LINKEDIN\"] in profiles\n twitter = PROFILES[\"TWITTER\"] in profiles\n\n # Step 1 - Update links to Online Profiles\n response, new_details = profile.edit_online_profiles.update_profiles(\n actor, company, facebook=facebook, linkedin=linkedin, twitter=twitter\n )\n context.response = response\n\n # Step 2 - Check if Supplier is on FAB Profile page\n profile.edit_company_profile.should_be_here(response)\n\n # Step 3 - Update company's details stored in context.scenario_data\n update_company(\n context,\n company.alias,\n facebook=new_details.facebook,\n linkedin=new_details.linkedin,\n twitter=new_details.twitter,\n )\n logging.debug(\n \"%s set Company's Online Profile links to: Facebook=%s, LinkedId=%s, \"\n \"Twitter=%s\",\n supplier_alias,\n new_details.facebook,\n new_details.linkedin,\n new_details.twitter,\n )", "def initialize_user_profile(sender, request, user, **kwargs):\n authz_token = get_authz_token(request)\n user_profile_client_pool.initializeUserProfile(authz_token)\n log.debug(\"initialized user profile for {}\".format(user.username))", "def create_new_openid(self, identity_url, username):\n self._openid[identity_url] = username", "def create_user(uform, pform):\n user = uform.save()\n profile = pform.save(commit=False)\n profile.user = user\n profile.save()", "def _update_profile_data_in_config(self, config, profile, url = None,\r\n username = None, key_file = None,\r\n cert_file = None, client_id = None):\r\n if url is not None:\r\n config[profile][\"url\"] = url\r\n self._add_timestamp_to_profile_data_in_config(config, profile)\r\n if username is not None:\r\n config[profile][\"username\"] = username\r\n self._add_timestamp_to_profile_data_in_config(config, profile)\r\n if key_file is not None:\r\n config[profile][\"key_file\"] = key_file\r\n self._add_timestamp_to_profile_data_in_config(config, profile)\r\n if cert_file is not None:\r\n config[profile][\"cert_file\"] = cert_file\r\n self._add_timestamp_to_profile_data_in_config(config, profile)\r\n if client_id is not None:\r\n config[profile][\"client_id\"] = client_id\r\n self._add_timestamp_to_profile_data_in_config(config, profile)", "def handle_profile_for_cli(ctx):\n profile = cloudless.profile.load_profile(ctx.obj['PROFILE'])\n if not profile:\n click.echo(\"Profile: \\\"%s\\\" not found.\" % ctx.obj['PROFILE'])\n click.echo(\"Try running \\\"cldls --profile %s init\\\".\" % ctx.obj['PROFILE'])\n sys.exit(1)\n ctx.obj['PROVIDER'] = profile[\"provider\"]\n ctx.obj['CREDENTIALS'] = profile[\"credentials\"]\n ctx.obj['CLIENT'] = cloudless.Client(provider=ctx.obj['PROVIDER'],\n credentials=ctx.obj['CREDENTIALS'])", "def _getOrgProfileCreateUrl(program):\n return '/gsoc/org/profile/create/%s' % program.key().name()", "def add_new_user (username, password, title):\n storage_format = f\"{username}|{password}|{title}\"\n append_new_line(users_credentials, storage_format)", "def get_profile_url(self):\n return API_URL + 'users/' + self.account.extra_data['id']", "def add(state: 'SoState', profile: 'SoProfile') -> \"void\":\n return _coin.SoProfileElement_add(state, profile)", "def dso_quay_add_user(url, login_username, login_password, verbose, usernames,\n passwords):\n with quay.Quay(\n url, login_username, login_password, verbosity=verbose\n ) as api:\n for username, password in zip(usernames.split(','),\n passwords.split(',')):\n new_user = api.add_user(username, password)\n if new_user is not None:\n print(f'{username} added')\n else:\n print(f'{username} ok')", "def load_profile(self, profile):\n self.config.set_current_profile(profile)\n # send the at conn str\n self._configure_connection(profile)", "def SoProfileElement_add(state: 'SoState', profile: 'SoProfile') -> \"void\":\n return _coin.SoProfileElement_add(state, profile)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start the local agent for the default or specified profile.
def profile_start_agent(id=None): if id is not None: os.environ["CONDUCTO_PROFILE"] = id token = api.Config().get_token_from_shell() if os.getenv("CONDUCTO_OS", "").startswith("Windows"): os.environ["WINDOWS_HOST"] = "plain" start_status = agent_utils.launch_agent(token=token) if start_status.startswith("running"): print(f"Agent for profile {api.Config().default_profile} is already running") else: print(f"Agent launched for profile {api.Config().default_profile}") config = api.Config() if config.default_profile != config.get("general", "default"): print("To set this profile as the default for command line usage:") print(f"conducto-profile set-default {config.default_profile}")
[ "def start_seattle():\n starter_file_path = [SEATTLE_FILES_DIR + os.sep + get_starter_file_name()]\n if OS == \"WindowsCE\":\n windows_api.launch_python_script(starter_file_path)\n else:\n if SILENT_MODE:\n p = subprocess.Popen(starter_file_path,stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n else:\n p = subprocess.Popen(starter_file_path)\n\n p.wait()", "def start_launch(self, context, **kwargs):\n self._launch_id = self._rp.launch_id or self._rp.start_launch(\n name=self._cfg.launch_name,\n start_time=timestamp(),\n attributes=self._get_launch_attributes(),\n description=self._cfg.launch_description,\n rerun=self._cfg.rerun,\n rerunOf=self._cfg.rerun_of,\n **kwargs\n )\n if not self._skip_analytics:\n send_event(self.agent_name, self.agent_version)", "def _profile(self):\n if self.args.profile == \"console\":\n self._console_profile(self._main)\n else:\n cProfile.runctx(\"self._main()\", locals(), locals(), self.args.profile)", "def load_profile(params):\n #load conf according to execenv/devenv switch\n if params[u'execenv']:\n conf = config.ExecEnvConfigFile(params[u'conf'])\n else:\n conf = config.DevEnvConfigFile(params[u'conf'])\n\n profile = None\n if params[u'first_prof']:\n #load first profile\n profiles = conf.load()\n if len(profiles) == 0:\n logger.fatal(u'No profile exists. Unable to start.')\n sys.exit(1)\n profile = profiles[profiles.keys()[0]]\n\n elif params[u'prof'] is None:\n #show profile wizard\n profile = conf.select_profile()\n\n else:\n #profile selected from command line\n profiles = conf.load()\n if params[u'prof'] not in profiles.keys():\n logger.fatal(u'Profile \"%s\" does not exist.' % params[u'prof'])\n sys.exit(1)\n profile = profiles[params[u'prof']]\n\n logger.debug(u'Selected profile: %s' % profile)\n return profile", "def start(self):\n if self.step_id is not None:\n self.start_local()\n else:\n super(SlurmJob, self).start()", "def load_profile(self, profile):\n self.config.set_current_profile(profile)\n # send the at conn str\n self._configure_connection(profile)", "def start():\n site = _env.hostout.options.get('hostname')\n if _env.hostout.options.get('remote-sudo') == 'true':\n _sudo('supervisorctl start %s:*' % site)\n else:\n _run('supervisorctl start %s:*' % site)", "def start():\n with local_basedir():\n local('screen -dmS hueweb ./runserver.py')", "def setDefaultProfile( self, profile ):\n self._defaultProfile = profile", "def main():\n utils.vip_main(LightAgent, version=__version__)", "def start_leader_follower(self):\n # pylint: disable=attribute-defined-outside-init\n self.runner = make_runner(\n runner_type=RunnerType.LEADER_FOLLOWER,\n abort_on_error=False,\n installer_set=self.installer_set,\n use_auto_certs=False,\n selenium_worker=\"none\",\n selenium_driver_args=[],\n runner_properties=RunProperties(\n enterprise=True,\n encryption_at_rest=False,\n ssl=False,\n ),\n )\n self.runner.starter_prepare_env()\n self.runner.starter_run()\n self.runner.finish_setup()\n self.runner.leader_starter_instance.detect_arangosh_instances(\n self.runner.leader_starter_instance.cfg, self.runner.leader_starter_instance.cfg.version\n )\n self.runner.follower_starter_instance.detect_arangosh_instances(\n self.runner.follower_starter_instance.cfg, self.runner.leader_starter_instance.cfg.version\n )\n self.starter = self.runner.leader_starter_instance", "def cmd_start(self, and_wait=False):\n # starts actor system, director, and source authority\n self.asys.tell(self.director, 'start')\n r = self('refresh')\n if not r and and_wait:\n import time\n while True:\n time.sleep(3600)\n return r", "def start_profiler(state):\n if core.is_profiler_enabled():\n return\n if state not in ['CPU', 'GPU', \"All\"]:\n raise ValueError(\"The state must be 'CPU' or 'GPU' or 'All'.\")\n if state == \"GPU\":\n prof_state = core.ProfilerState.kCUDA\n elif state == \"CPU\":\n prof_state = core.ProfilerState.kCPU\n else:\n prof_state = core.ProfilerState.kAll\n core.enable_profiler(prof_state)", "def profile(ctx):\n if ctx.invoked_subcommand is None:\n config = ctx.obj.configuration\n\n default = config.default_profile_name()\n names = config.profiles()\n for profile_name in names:\n profile = config.profile(profile_name)\n if profile_name == default:\n click.echo(\"Profile: %s (default)\" % profile_name)\n else:\n click.echo(\"Profile: %s\" % profile_name)\n click.echo(\"User: %s\" % profile['user'])\n click.echo(\"URL: %s\" % profile['url'])\n click.echo()", "def set_default_profile(self, profile_name):\n\n if profile_name in self._profileman.profile_names:\n self.set_config_value(ks_ini.DEFAULT_PROFILE, profile_name)", "def launch ():\n get_network_info()\n core.registerNew(job_aware_switch)", "def start_profiler(prefix, pcts=False):\n gevent_profiler.set_stats_output('%s-stats.txt' % prefix)\n gevent_profiler.set_summary_output('%s-summary.txt' % prefix)\n gevent_profiler.print_percentages(pcts)\n\n gevent_profiler.set_trace_output(None) #'%s-trace.txt' % prefix) @TODO make optional\n gevent_profiler.attach()", "def setup_skywalking():\n config.agent_instance_name = f'{config.agent_instance_name}-child({os.getpid()})'\n\n agent.start()\n # append pid-suffix to instance name\n logger.info(f'Apache SkyWalking Python agent started in pre-forked worker process PID-{os.getpid()}. '\n f'Service {config.agent_name}, instance name: {config.agent_instance_name}')", "def handle_profile_for_cli(ctx):\n profile = cloudless.profile.load_profile(ctx.obj['PROFILE'])\n if not profile:\n click.echo(\"Profile: \\\"%s\\\" not found.\" % ctx.obj['PROFILE'])\n click.echo(\"Try running \\\"cldls --profile %s init\\\".\" % ctx.obj['PROFILE'])\n sys.exit(1)\n ctx.obj['PROVIDER'] = profile[\"provider\"]\n ctx.obj['CREDENTIALS'] = profile[\"credentials\"]\n ctx.obj['CLIENT'] = cloudless.Client(provider=ctx.obj['PROVIDER'],\n credentials=ctx.obj['CREDENTIALS'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop the local agent for the default or specified profile.
def profile_stop_agent(id=None): if id is not None: os.environ["CONDUCTO_PROFILE"] = id container_name = agent_utils.agent_container_name() running = container_utils.get_running_containers() if f"{container_name}-old" in running: cmd = ["docker", "stop", f"{container_name}-old"] subprocess.run(cmd, stdout=subprocess.PIPE) if container_name in running: cmd = ["docker", "stop", container_name] subprocess.run(cmd, stdout=subprocess.PIPE) else: config = api.Config() print(f"No agent running for profile {config.default_profile}")
[ "def stop_cluster(self, profile):\n self.check_profile(profile)\n data = self.profiles[profile]\n if data['status'] == 'stopped':\n raise web.HTTPError(409, u'cluster not running')\n data = self.profiles[profile]\n cl = data['controller_launcher']\n esl = data['engine_set_launcher']\n if cl.running:\n cl.stop()\n if esl.running:\n esl.stop()\n # Return a temp info dict, the real one is updated in the on_stop\n # logic above.\n result = {\n 'profile': data['profile'],\n 'profile_dir': data['profile_dir'],\n 'status': 'stopped'\n }\n return result", "def cmd_stop_traffic(self, options, extra_vars):\n \n # uses the inventory created in ~/vars/f5aws/env/<env name> created by `init`\n inventory = '%s/%s/inventory/hosts' % (self.settings['env_path'], options.env_name)\n self.run_playbooks(['stop_traffic.yml'], inventory, options, extra_vars)\n print 'Stopped jmeter client.'", "def stop(self, instance: RuntimeInstance.Params, env: RuntimeEnvironment.Params, **kwargs):", "def stop():\n site = _env.hostout.options.get('hostname')\n if _env.hostout.options.get('remote-sudo') == 'true':\n _sudo('supervisorctl stop {0:s}:*'.format(site))\n else:\n _run('supervisorctl stop {0:s}:*'.format(site))", "def stop(self):\n self.logger.debug('Server - td-agent-bit - stop call.')\n self.change_service_status(\"stop\")", "def stop(status=\"\"):\n raise StopScript(status)", "def stop(opp):\n opp.services.call(ha.DOMAIN, SERVICE_OPENPEERPOWER_STOP)", "def stop(self):\n Multipass.stop(self.name)", "def StopSlave(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def stop(self):\n\t\tPopen([\"screen\", \"-S\", self.name, \"-X\", \"quit\"])", "def stop_vm():\n send_vm_command(VM_STOP)", "def test_multiple_stop():\n p = profiler.Profiler()\n p.start()\n p.stop()\n p.stop()", "def execute(self, env, args):\n\n env.task.stop()", "def listener_stop(context, name):\n context.execute_cmd(lambda: cmd_listener_stop(context, name))", "def stop(self):\n logger.info(\"Stopping Follw\")\n self.terminate = True\n self.location.stop()", "def stop(target):\n print('\\033[93m'+\" Stopping scripts on {}..\".format(target)+'\\033[0m')\n execute_remote(target, \"pkill -f remote_launch\")\n\n return True", "def test_stop(self):\n guest_name = \"some guest\"\n parameters_stop = {}\n self._mock_virsh.return_value.is_defined.return_value = True\n self._mock_virsh.return_value.is_running.return_value = True\n self._hyp.login()\n self._hyp.stop(guest_name, parameters_stop)\n self._mock_virsh.return_value.shutdown.assert_called_with(\n guest_name, timeout=mock.ANY)", "def stop_script(name):\n job = JOBS.get(name, None)\n try:\n runnable = job.job_func\n runnable.stop()\n except:\n pass\n try:\n schedule.cancel_job(job)\n except:\n pass\n print('Canceled script {}'.format(name))", "def kill(self):\n\n self.running = False\n\n try:\n # teardown robot\n self.strategy.teardown()\n except Exception:\n # method not implemented by strategy\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count number of boxes based on the product packaging
def count_boxes(packages: List[dict]) -> int: volume = sum([p["width"]*p["length"]*p["height"] for p in packages]) weight = sum([p["weight"] for p in packages]) return max(math.ceil(volume/BOX_VOLUME), math.ceil(weight/BOX_WEIGHT))
[ "def test_team_builder_config_product_groups_count_get(self):\n pass", "def quantity_size():", "def number_of_products():\n return NUMBER_OF_PRODUCTS", "def count_bag_contents(rules, outer):\n \n contents = rules[outer]\n \n # we count ourself\n count = 1\n \n for inner_bag in contents:\n qty = inner_bag[\"qty\"]\n inner_count = count_bag_contents(rules, inner_bag[\"color\"])\n count += (qty * inner_count)\n return count", "def boxcounts(self):\n return (self.boxes, self.counts)", "def __len__(self):\n\n count = 0\n\n for part in CPE.CPE_PART_KEYS:\n elements = self.get(part)\n for elem in elements:\n for ck in CPEComponent.CPE_COMP_KEYS_EXTENDED:\n comp = elem.get(ck)\n if not isinstance(comp, CPEComponentUndefined):\n count += 1\n\n return count", "def get_calc_unique_pieces(self):\n # TODO: Make this work with rebrickable inventories\n count = db.run_sql(\"SELECT COUNT(bl_inventories.quantity) FROM bl_inventories JOIN parts\"\n \" ON bl_inventories.piece_id = parts.id\"\n \" WHERE bl_inventories.set_id=?;\", (self.db_id,), one=True)\n return count", "def _get_count(self) -> \"size_t\" :\n return _core.ProductPreferencesCollection__get_count(self)", "def products_nb(self, cleaned_scraped, category):\n products_nb = 0\n for product in cleaned_scraped:\n if category == product[\"categories\"]:\n products_nb += 1\n if products_nb < MIN_PROD:\n cat_filled = False\n else:\n print(category, \"filled\")\n cat_filled = True\n return cat_filled", "def size_calc(self):\n #rospy.loginfo(\"box_size: {}\".format(self.box_size))\n width = self.flag_box[1][0] - self.flag_box[0][0]\n height = self.flag_box[1][1] - self.flag_box[0][1]\n # self.box_size = width*height\n #print(\"AREA\", width*height)\n box_area = width*height\n if box_area <= 320 and box_area >= 250:\n self.count += 1\n else:\n self.count == 0\n print(\"COUNT\", self.count)\n self.box_x = (self.flag_box[0][0]+self.flag_box[1][0])/2\n #rospy.loginfo(\"x: {} , y: {}\".format(self.box_x, box[0][1]))", "def test_count_packages(self):\n self._create_finished_release()\n result = orlo.queries.count_packages(**self.INCLUSIVE_ARGS).all()\n self.assertEqual(1, result[0][0])", "def content_pack_count(self):\n return self._content_pack_count", "def get_calc_piece_count(self):\n\n count = db.run_sql(\"SELECT SUM(bl_inventories.quantity) FROM bl_inventories \"\n \" WHERE bl_inventories.set_id=?;\", (self.db_id,), one=True)\n return count", "def getSizeInv(self, productBox):\n sizesSelectors = productBox.css(\"p.product-sizes>span\")\n sizes = {}\n for sizesSelector in sizesSelectors:\n inv = int(sizesSelector.attrib['data-stock'])\n if inv != 0:\n sizes[sizesSelector.css(\"::text\").get().strip().replace(\".\", \"_\")] = inv\n return sizes", "def numeroElements(self):\n count=0\n for c in self._components:\n count+=1\n return count", "def count(self):\r\n return sum([item.quantity for item in self.items])", "def shell_module_size(shell_blueprint):\n size = 0\n for i in range(0, len(shell_blueprint)):\n if shell_blueprint[i] in TailParts:\n break\n size += 1\n return size", "def _get_count(self) -> \"size_t\" :\n return _core.Palettes__get_count(self)", "def _get_count(self) -> \"size_t\" :\n return _core.MaterialLibraries__get_count(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the delivery cost for a specific address and list of products
def get_pricing(products: List[dict], address: dict) -> int: return count_boxes([p["package"] for p in products]) * get_shipping_cost(address)
[ "def cost_of(amount, item, hours, products):\n for items in products:\n if items[0] == item:\n return float(items[2]) * float(amount) * float(hours)", "def __get_order_cost(self, order_qty, pack_costs):\n total_cost = 0.00\n\n for pack, qty in order_qty.iteritems():\n total_cost += qty * pack_costs[pack]\n\n return total_cost", "def generate_shipping_price(location, days, drop_location):\n\n base_location_cost = 0\n for i in range(len(location)):\n base_location_cost += ord(location.lower()[i]) - 97\n\n for i in range(len(drop_location)):\n base_location_cost += ord(drop_location.lower()[i]) - 97\n\n #age_multiplier = 1\n # Select economy is car_type is not found\n \"\"\"if car_type not in car_types:\n car_type = car_types[0]\n \"\"\"\n return days * ((10 + base_location_cost))", "def total_cost(price, state, tax=.05):\n\n if state == 'CA':\n tax = .07\n total_cost = price + price * tax\n return total_cost", "def get_cost(prices, item, quantity):\n return quantity * prices[item]", "def calculate_order_total_cost(*, order: models.OrderBase, status: str) -> dict:\n from apps.orders.models import OrderAddOn, OrderCourse, OrderMenuItem\n\n total_estimated_cost = 0\n cost_food_internal = 0\n cost_food_external = 0\n cost_beverage = 0\n cost_labor = 0\n cost_rentals = 0\n\n if status == \"review\":\n logistics_set = order.logistics.all()\n addons_set = order.addons_staff.all().filter(\n logistics__isnull=True, package__isnull=True\n )\n elif status == \"is_active\":\n logistics_set = order.logistics.is_active()\n addons_set = order.addons_staff.is_active().filter(\n logistics__isnull=True, package__isnull=True\n )\n elif status == \"is_draft\":\n logistics_set = order.logistics.is_draft()\n addons_set = order.addons_staff.is_draft().filter(\n logistics__isnull=True, package__isnull=True\n )\n elif status == \"add_package\":\n logistics_set = order.logistics.is_active()\n addons_set = order.addons_staff.is_active().filter(\n logistics__isnull=True, package__isnull=True\n )\n else:\n logistics_set = order.logistics.all()\n addons_set = order.addons_staff.all().filter(\n logistics__isnull=True, package__isnull=True\n )\n\n # ––– PACKAGES ––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n for logistics in logistics_set:\n\n if status == \"review\":\n packages = logistics.packages.all()\n addons_logistics = logistics.addons_staff.all().filter(package__isnull=True)\n elif status == \"is_active\":\n packages = logistics.packages.is_active()\n addons_logistics = logistics.addons_staff.is_active().filter(\n package__isnull=True\n )\n elif status == \"is_draft\":\n packages = logistics.packages.is_draft()\n addons_logistics = logistics.addons_staff.is_draft().filter(\n package__isnull=True\n )\n elif status == \"add_package\":\n packages = logistics.packages.is_draft()\n addons_logistics = logistics.addons_staff.is_draft().filter(\n package__isnull=True\n )\n else:\n packages = logistics.packages.all()\n addons_logistics = logistics.addons_staff.all().filter(package__isnull=True)\n\n for orderpackage in packages:\n\n if status == \"review\":\n courses = orderpackage.courses.all()\n menu_items = orderpackage.items.all()\n addons = orderpackage.addons_staff.all().filter(logistics__isnull=True)\n elif status == \"is_active\":\n courses = orderpackage.courses.is_active()\n menu_items = orderpackage.items.is_active()\n addons = orderpackage.addons_staff.is_active().filter(\n logistics__isnull=True\n )\n elif status == \"is_draft\":\n courses = orderpackage.courses.is_draft()\n menu_items = orderpackage.items.is_draft()\n addons = orderpackage.addons_staff.is_draft().filter(\n logistics__isnull=True\n )\n elif status == \"add_package\":\n courses = orderpackage.courses.is_draft()\n menu_items = orderpackage.items.is_draft()\n addons = orderpackage.addons_staff.is_draft().filter(\n logistics__isnull=True\n )\n else:\n courses = orderpackage.courses.all()\n menu_items = orderpackage.items.all()\n addons = orderpackage.addons_staff.all().filter(logistics__isnull=True)\n\n package_estimated_cost = 0\n combined_per_person_cost = 0\n combined_fixed_cost = 0\n\n # ––– PACKAGE –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n package_estimated_cost += (\n orderpackage.price_numeric * logistics.guest_count\n ) + orderpackage.price_numeric_fixed\n combined_per_person_cost += orderpackage.price_numeric\n combined_fixed_cost += orderpackage.price_numeric_fixed\n\n \"\"\"\n (3) possibilities:\n - if selection_quantity == 0 // NO CHOICE\n - elif selection_quantity == 999 // ARBITRARY NUMBER OF CHOICES\n – else selection_quantity specific\n if there are courses -> then selection_quantity refers to courses\n else selection_quantity refers to menu_items\n \"\"\"\n\n # for now, over_limit costs refer only to menu selections\n if menu_items.count() > orderpackage.selection_quantity:\n package_estimated_cost += orderpackage.price_over_limit_numeric * (\n (menu_items.count() - orderpackage.selection_quantity)\n * logistics.guest_count\n )\n combined_per_person_cost += orderpackage.price_over_limit_numeric * (\n menu_items.count() - orderpackage.selection_quantity\n )\n\n # course\n if courses:\n for course in courses:\n package_estimated_cost += (\n course.course.price_numeric_per_person * logistics.guest_count\n ) + course.course.price_numeric_fixed\n combined_per_person_cost += course.course.price_numeric_per_person\n combined_fixed_cost += course.course.price_numeric_fixed\n else:\n pass\n\n # menu item\n for menu_item in menu_items:\n # if menu item modification\n if menu_item.modifications.count() > 0:\n per_person_cost = menu_item.menu_item.price_numeric_per_person\n for mod in menu_item.modifications.all():\n per_person_cost += mod.price_numeric_per_person\n package_estimated_cost += (\n per_person_cost * logistics.guest_count\n ) + menu_item.menu_item.price_numeric_fixed\n combined_per_person_cost += per_person_cost\n combined_fixed_cost += menu_item.menu_item.price_numeric_fixed\n else:\n package_estimated_cost += (\n menu_item.menu_item.price_numeric_per_person\n * logistics.guest_count\n ) + menu_item.menu_item.price_numeric_fixed\n combined_per_person_cost += (\n menu_item.menu_item.price_numeric_per_person\n )\n combined_fixed_cost += menu_item.menu_item.price_numeric_fixed\n\n \"\"\"\n package_estimated_cost to this point is only package, not addons\n since (conceivably) cost_type for a package_addon could set differently than that of package,\n apply package_estimated_cost to cost_type totals, then do so separately/individually for each addon\n \"\"\"\n\n if orderpackage.package.cost_type.name == \"Food (Internal)\":\n cost_food_internal += package_estimated_cost\n elif orderpackage.package.cost_type.name == \"Food (External)\":\n cost_food_external += package_estimated_cost\n elif orderpackage.package.cost_type.name == \"Alcohol and NA Beverages\":\n cost_beverage += package_estimated_cost\n elif orderpackage.package.cost_type.name == \"Labor\":\n cost_labor += package_estimated_cost\n elif orderpackage.package.cost_type.name == \"Equipment and Rentals\":\n cost_rentals += package_estimated_cost\n\n # package addons\n for addon in addons:\n addon_cost = (\n addon.price_numeric * logistics.guest_count\n ) + addon.price_numeric_fixed\n package_estimated_cost += addon_cost\n\n if addon.cost_type.name == \"Food (Internal)\":\n cost_food_internal += addon_cost\n elif addon.cost_type.name == \"Food (External)\":\n cost_food_external += addon_cost\n elif addon.cost_type.name == \"Alcohol and NA Beverages\":\n cost_beverage += addon_cost\n elif addon.cost_type.name == \"Labor\":\n cost_labor += addon_cost\n elif addon.cost_type.name == \"Equipment and Rentals\":\n cost_rentals += addon_cost\n\n total_estimated_cost += package_estimated_cost\n\n # ––– ADDONS (per logistics) ––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n\n for orderaddon in addons_logistics:\n orderaddon_cost = (\n orderaddon.price_numeric * orderaddon.logistics.guest_count\n ) + orderaddon.price_numeric_fixed\n total_estimated_cost += orderaddon_cost\n if orderaddon.cost_type.name == \"Food (Internal)\":\n cost_food_internal += orderaddon_cost\n elif orderaddon.cost_type.name == \"Food (External)\":\n cost_food_external += orderaddon_cost\n elif orderaddon.cost_type.name == \"Alcohol and NA Beverages\":\n cost_beverage += orderaddon_cost\n elif orderaddon.cost_type.name == \"Labor\":\n cost_labor += orderaddon_cost\n elif orderaddon.cost_type.name == \"Equipment and Rentals\":\n cost_rentals += orderaddon_cost\n\n # ––– ADDONS (per order) ––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n\n for orderaddon in addons_set:\n total_estimated_cost += orderaddon.price_numeric_fixed\n if orderaddon.cost_type.name == \"Food (Internal)\":\n cost_food_internal += orderaddon.price_numeric_fixed\n elif orderaddon.cost_type.name == \"Food (External)\":\n cost_food_external += orderaddon.price_numeric_fixed\n elif orderaddon.cost_type.name == \"Alcohol and NA Beverages\":\n cost_beverage += orderaddon.price_numeric_fixed\n elif orderaddon.cost_type.name == \"Labor\":\n cost_labor += orderaddon.price_numeric_fixed\n elif orderaddon.cost_type.name == \"Equipment and Rentals\":\n cost_rentals += orderaddon.price_numeric_fixed\n\n # return total_estimated_cost\n return {\n \"total_estimated_cost\": total_estimated_cost,\n \"cost_food_internal\": cost_food_internal,\n \"cost_food_external\": cost_food_external,\n \"cost_beverage\": cost_beverage,\n \"cost_labor\": cost_labor,\n \"cost_rentals\": cost_rentals,\n }", "def single_cost(self, coordinates):\n\n # set default price of a single family home\n self.single = 285000 \n\n # calculate percentage of extra housing worth per extra square meter space\n self.percentage_single = self.single * 0.03\n\n # make default total price of all single family houses\n self.total_single = self.single_amount * self.single\n\n # retrieve coordinates\n coordinateslist = coordinates\n\n # check the outline of every single family house for extra free space\n for coordinate in coordinateslist:\n single = 1 \n bungalow = 2 \n maison = 3 \n\n # free space is calculated by checking the distance between house and its surroundings.\n distance = 3 \n\n x_coordinaat = coordinate[0]\n y_coordinaat = coordinate[1]\n\n # check for free space around house until another house is found\n check = True\n while check == True:\n\n # the distance between house and its surroundings is increased by one for each run to check for more free space\n # the free space around a house is checked on each side (up, down, left and right)\n x = x_coordinaat - distance\n x_ver = x_coordinaat + 8 + distance\n y = y_coordinaat - distance\n y_ver = y_coordinaat + 8 + distance\n\n # reset coordinates when out of boundary, because extra free space is able to 'go over' the boundaries\n if x < 0:\n x = 0 \n\n if x_ver > 160:\n x_ver = 160\n\n if y < 0:\n y = 0\n\n if y_ver > 180:\n y_ver = 180\n\n # remove current house from the gridmap\n self.neighbourhood[(y_coordinaat - 2):(y_coordinaat + 10),(x_coordinaat - 2):(x_coordinaat + 10)] = 0\n \n # check for other house in given range of free space\n try:\n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n\n # when no house is found, recalculate total price and add one extra meter of free space\n else:\n self.total_single = self.total_single + self.percentage_single\n distance += 1\n \n # check for IndexError to be sure a coordinate does not go out of range\n except IndexError:\n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n else:\n self.total_single = self.total_single + self.percentage_single\n distance += 1\n\n # redraw house on the gridmap\n self.neighbourhood[(y_coordinaat - 2):(y_coordinaat + 10),(x_coordinaat - 2):(x_coordinaat + 10)] = 5\n self.neighbourhood[y_coordinaat:(y_coordinaat + 8),x_coordinaat:(x_coordinaat + 8)] = 1\n\n return self.total_single", "def recompute(self):\n\n try:\n finance = self.cart['finance']\n\n # Compute the product costs and get shipping class quantities to compute\n # shipping charges.\n shipping_classes = dict()\n prod_cost = Decimal(0)\n for line_item in self.cart['line_items']:\n prod_cost += line_item['price'] * line_item['quantity']\n product = Statics.products.get_id(line_item['product_id'])\n lab_product = Statics.lab_products.get_id(product['lab_product_id'])\n shipping_class_id = lab_product['shipping_class_id']\n if shipping_class_id not in shipping_classes:\n shipping_classes[shipping_class_id] = line_item['quantity']\n else:\n shipping_classes[shipping_class_id] += line_item['quantity']\n\n\n selected_shipping_cost = Decimal(0)\n state_id = self.cart['address']['ship_state_id']\n finance['shipping_costs'] = dict()\n if state_id:\n shipping_totals = Lab.shipping_compute(shipping_classes, state_id)['shipping_totals']\n for shipping_cost in shipping_totals:\n shipping_id = shipping_cost['shipping_id']\n total = shipping_cost['total']\n finance['shipping_costs'][shipping_id] = total\n if shipping_id == finance['shipping_id']:\n selected_shipping_cost = Decimal(total)\n\n # Handle promo (if it exists). We will be computing the value of the reward\n # portions of the promo. Note that with the exception of the shipping_credit,\n # you can't stack multiple rewards into a promo.\n prod_credit = Decimal(0.00)\n shipping_credit = Decimal(0.00)\n finance['promo_incomplete'] = False\n if hasattr(self, 'promo'):\n promo = self.promo.get_full()\n if promo['rew_percent']:\n prod_credit = Decimal((prod_cost * -promo['rew_percent']) / Decimal(100.0)).quantize(Decimal('.01'), rounding=ROUND_HALF_EVEN)\n elif promo['rew_dollar']:\n if promo['rew_dollar'] <= prod_cost:\n prod_credit = -promo['rew_dollar']\n else:\n prod_credit = -prod_cost\n finance['promo_incomplete'] = True\n elif promo['rew_product_id']:\n quantity = promo['rew_product_quantity']\n product_id = promo['rew_product_id']\n percent = promo['rew_product_percent']\n dollar = promo['rew_product_dollar']\n # We're going to build a list of prices here for every product\n # in the cart that matches this promo. Note that this list will\n # contain line_item quantity * matching line_items elements. Later\n # we will apply the promo to the correct number of items in the list.\n prices = []\n for line_item in self.cart['line_items']:\n if line_item['product_id'] == product_id:\n for i in range(line_item['quantity']):\n prices.append(line_item['price'])\n # put the highest prices first\n prices.sort()\n prices.reverse()\n if quantity > 0:\n prices = prices[0:quantity]\n if percent > 0:\n total = sum(prices)\n prod_credit = Decimal((total * -percent) / Decimal(100.0)).quantize(Decimal('.01'), rounding=ROUND_HALF_EVEN)\n elif dollar > 0:\n prices = map(lambda x: max(-dollar, -x), prices)\n prod_credit = sum(prices)\n else:\n print \"promo_id {} (type product) lacks a reward type.\".format(promo['promo_id'])\n elif promo['rew_promo_category_id']:\n quantity = promo['rew_promo_category_quantity']\n promo_category_id = promo['rew_promo_category_id']\n percent = promo['rew_promo_category_percent']\n dollar = promo['rew_promo_category_dollar']\n # We're going to build a list of prices here for every product\n # in the cart that matches this promo category. Note that this list will\n # contain line_item quantity * matching line_items elements. Later\n # we will apply the promo to the correct number of items in the list.\n prices = []\n for line_item in self.cart['line_items']:\n li_promo_category_id = Statics.products.get_id(line_item['product_id'])['promo_category_id']\n if li_promo_category_id == promo_category_id:\n for i in range(line_item['quantity']):\n prices.append(line_item['price'])\n # put the highest prices first\n prices.sort()\n prices.reverse()\n if quantity > 0:\n prices = prices[0:quantity]\n if percent > 0:\n total = sum(prices)\n prod_credit = Decimal((total * -percent) / Decimal(100.0)).quantize(Decimal('.01'), rounding=ROUND_HALF_EVEN)\n elif dollar > 0:\n prices = map(lambda x: max(-dollar, -x), prices)\n prod_credit = sum(prices)\n else:\n print \"promo_id {} (type promo_category) lacks a reward type.\".format(promo['promo_id'])\n elif promo['rew_shipping_credit'] <= 0:\n print \"promo_id {} lacks a reward\".format(promo['promo_id'])\n # Handle shipping\n if promo['rew_shipping_credit'] > 0:\n if promo['rew_shipping_credit'] <= selected_shipping_cost:\n shipping_credit = -promo['rew_shipping_credit']\n else:\n shipping_credit = -selected_shipping_cost\n\n\n sub_total = prod_cost + selected_shipping_cost + prod_credit + shipping_credit\n\n discount_cost = finance['discount_cost']\n try:\n tax = Statics.taxes.get_id(self.cart['address']['ship_state_id'])\n # The use of discount cost in this expression is questionable. XXX\n # Since discounts are only applied by support, I'm not going to work it out.\n tax_cost = Decimal(tax['tax'] * (sub_total + discount_cost) / 100).quantize(Decimal('.01'), rounding=ROUND_HALF_EVEN)\n except KeyError:\n tax = None\n tax_cost = Decimal(0)\n\n # apply discount last\n # discount is stored and displayed as a negative value\n if discount_cost + sub_total < 0:\n discount_cost = -sub_total\n tax_cost = Decimal(0)\n\n finance['prod_cost'] = prod_cost\n finance['prod_credit'] = prod_credit\n finance['shipping_cost'] = selected_shipping_cost\n finance['shipping_credit'] = shipping_credit\n finance['tax_cost'] = tax_cost\n finance['discount_cost'] = discount_cost\n finance['tax'] = tax\n finance['total_cost'] = sub_total + tax_cost + discount_cost\n\n # Should probably not do this if no change has occurred.\n\t c = get_cursor()\n c.execute(\"\"\"update cart \n set prod_cost = %s,\n prod_credit = %s,\n shipping_cost = %s,\n shipping_credit = %s,\n discount_cost = %s,\n tax_cost = %s,\n total_cost = %s,\n promo_incomplete = %s\n where cart_id = %s\"\"\",\n (prod_cost,\n prod_credit,\n selected_shipping_cost,\n shipping_credit,\n discount_cost,\n tax_cost,\n finance['total_cost'],\n 1 if finance['promo_incomplete'] else 0,\n self.cart['cart_id']))\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")", "def get_total_cost_of_shipments(scorecard):\n\tsupplier = frappe.get_doc('Supplier', scorecard.supplier)\n\n\t# Look up all PO Items with delivery dates between our dates\n\tdata = frappe.db.sql(\"\"\"\n\t\t\tSELECT\n\t\t\t\tSUM(po_item.base_amount)\n\t\t\tFROM\n\t\t\t\t`tabPurchase Order Item` po_item,\n\t\t\t\t`tabPurchase Order` po\n\t\t\tWHERE\n\t\t\t\tpo.supplier = %(supplier)s\n\t\t\t\tAND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s\n\t\t\t\tAND po_item.docstatus = 1\n\t\t\t\tAND po_item.parent = po.name\"\"\",\n\t\t\t\t{\"supplier\": supplier.name, \"start_date\": scorecard.start_date, \"end_date\": scorecard.end_date}, as_dict=0)[0][0]\n\n\tif data:\n\t\treturn data\n\telse:\n\t\treturn 0", "def schedule_delivery(order):\n\t#Step 1: Find trucks that will match\n\t\t#Matching Step 1: Within supplier and delivery radius\n\t\t#Matching Step 2: Find trucks that don't risk cross contamination\n\t\t#Mathching Step 3: Add product to closest truck\n\t#Step 2: Add this order to the inventory\n\t#Step 3: Add the delivery date\n\t#Step 4: Add an optimized route\n\ttrucks = db.trucks.find({})\n\tpotential_couriers = []\n\tfood_category = order['product']['category']\n\tmin_distance = float('inf')\n\toptimal_truck = None\n\tcontaminants = []\n\tif food_category in contamintation_categories:\n\t\tcontaminants = contamintation_categories[food_category]\n\tidealDelivery = order['idealDeliveryDate']\n\tsupplier_lat = order['product']['supplierLocation']['geolocation']['lat']\n\tsupplier_lng = order['product']['supplierLocation']['geolocation']['lng']\n\tdelivery_lat = order['product']['deliveryLocation']['geolocation']['lat']\n\tdelivery_lng = order['product']['deliveryLocation']['geolocation']['lng']\n\tfor truck in trucks:\n\t\ttruck_lat = truck['truckLocation']['geolocation']['lat']\n\t\ttruck_lng = truck['truckLocation']['geolocation']['lng']\n\t\tcollection_radius = truck['collectionRadius']\n\t\tdelivery_radius = truck['deliveryRadius']\n\t\tsupplier_distance = get_distance(supplier_lat,supplier_lng,truck_lat,truck_lng)\n\t\tdelivery_distance = get_distance(delivery_lat,delivery_lng,truck_lat,truck_lng)\n\t\tif supplier_distance <= collection_radius and delivery_distance <= delivery_radius:\n\t\t\tpotential_couriers.append({'truck':truck,'delivery_distance':delivery_distance})\n\tif not potential_couriers:\n\t\tprint('bitchhhh')\n\t\treturn None\n\tpotential_couriers = prevent_contamination(potential_couriers,idealDelivery,food_category)\n\tfor courier in potential_couriers:\n\t\t#option 1:if the order's food category is suitable for this courier, add to this truck\n\t\t#option 2: if product is sensitive \n\t\tdelivery_distance = courier['delivery_distance']\n\t\tif delivery_distance < min_distance:\n\t\t\toptimal_truck = courier['truck']\n\t\t\tmin_distance = delivery_distance\n\t#now we have the optimal truck\n\t#we can now add this order to the optimal trucks inventory for the ideal delivery date\n\t#find if truck has delivery scheduled idealDeliveryDate, if not then we can create a new delivery object, otherwise we add to the inventory\n\tif not optimal_truck:\n\t\tprint('ah shit')\n\t\treturn False\n\tnew_delivery = is_new(optimal_truck,idealDelivery) #this truck has delivery object with deliveryDate set as idealDelivery\n\tif new_delivery == True:\n\t\tprint('MOTHER FUCK')\n\t\tdelivery_object = {\n\t\t\t'deliveryDate':idealDelivery,\n\t\t\t'route':main(optimal_truck['truckLocation'],[order]),\n\t\t\t'inventory':[order],\n\t\t\t'status':'scheduled'\n\t\t}\n\t\tdelivery_inserted = db.deliveries.insert(delivery_object)\n\t\tdelivery = db.deliveries.find_one({'_id':delivery_inserted})\n\t\ttruck_updated = db.trucks.update_one({'_id':optimal_truck['_id']},{\"$push\":{'deliveries':delivery}})\n\t\tif truck_updated.modified_count == 1:\n\t\t\treturn delivery['deliveryDate']\n\t\t#add this delivery object to truck\n\telse:\n\t\tprint('WTF')\n\t\tdelivery = db.deliveries.find_one({'_id':new_delivery})\n\t\tadded_inventory = db.deliveries.update_one({'_id':new_delivery},{\"$push\":{'inventory':order}})\n\t\t#inventory has now been added, we can now update the deliveries route\n\t\t#we do this by passing truck location, and the inventory\n\t\troute = main(optimal_truck['truckLocation'],delivery['inventory'])\n\t\tprint(route)\n\t\tadded_route = db.deliveries.update_one({'_id':new_delivery},{\"$set\":{'route':route}})\n\t\tdelivery_now = db.db.deliveries.find_one({'_id':new_delivery})\n\t\tif added_route.modified_count == 1:\n\t\t\tprint('hello',new_delivery)\n\t\t\tupdated_truck = db.trucks.update_one({'_id':optimal_truck['_id'],\"deliveries._id\":new_delivery},{\"$set\":{\"deliveries.$\":delivery_now}})\n\t\t\tif updated_truck.modified_count == 1:\n\t\t\t\treturn delivery['deliveryDate']\n\t\telse:\n\t\t\tprint('BUCKETTTTS')\n\treturn False", "def get_fedex_shipping_cost(self):\n Currency = Pool().get('currency.currency')\n\n fedex_credentials = self.carrier.get_fedex_credentials()\n\n if not all([\n self.fedex_drop_off_type, self.fedex_packaging_type,\n self.fedex_service_type\n ]):\n self.raise_user_error('fedex_settings_missing')\n\n rate_request = RateService(fedex_credentials)\n requested_shipment = rate_request.RequestedShipment\n\n requested_shipment.DropoffType = self.fedex_drop_off_type.value\n requested_shipment.ServiceType = self.fedex_service_type.value\n requested_shipment.PackagingType = self.fedex_packaging_type.value\n requested_shipment.PreferredCurrency = self.cost_currency.code\n\n # Shipper and Recipient\n requested_shipment.Shipper.AccountNumber = \\\n fedex_credentials.AccountNumber\n # From location is the warehouse location. So it must be filled.\n if not self.warehouse.address:\n self.raise_user_error('warehouse_address_required')\n self.warehouse.address.set_fedex_address(requested_shipment.Shipper)\n self.delivery_address.set_fedex_address(requested_shipment.Recipient)\n\n # Shipping Charges Payment\n shipping_charges = requested_shipment.ShippingChargesPayment\n shipping_charges.PaymentType = 'SENDER'\n shipping_charges.Payor.ResponsibleParty = requested_shipment.Shipper\n\n # Express Freight Detail\n fright_detail = requested_shipment.ExpressFreightDetail\n fright_detail.PackingListEnclosed = 1\n fright_detail.ShippersLoadAndCount = 2\n fright_detail.BookingConfirmationNumber = 'Ref-%s' % self.reference\n\n # Customs Clearance Detail\n self.get_fedex_customs_details(rate_request)\n\n # Label Specification\n requested_shipment.LabelSpecification.LabelFormatType = 'COMMON2D'\n requested_shipment.LabelSpecification.ImageType = 'PNG'\n requested_shipment.LabelSpecification.LabelStockType = 'PAPER_4X6'\n\n requested_shipment.RateRequestTypes = ['ACCOUNT']\n\n self.get_fedex_items_details(rate_request)\n\n try:\n response = rate_request.send_request(int(self.id))\n except RequestError, exc:\n self.raise_user_error(\n 'fedex_shipping_cost_error', error_args=(exc.message, )\n )\n\n currency, = Currency.search([\n ('code', '=', str(\n response.RateReplyDetails[0].RatedShipmentDetails[0].\n ShipmentRateDetail.TotalNetCharge.Currency\n ))\n ])\n\n return Decimal(str(\n response.RateReplyDetails[0].RatedShipmentDetails[0].ShipmentRateDetail.TotalNetCharge.Amount # noqa\n )), currency.id", "def checkout(skus):\n if not skus:\n return 0\n\n total_cost = 0\n item_prices, item_deals = load_prices()\n items_counter = Counter(skus)\n\n ordered_deals = get_ordered_deals(item_prices, item_deals)\n\n deals_cost, items_counter = evaluate_deals(items_counter, ordered_deals)\n total_cost += deals_cost\n\n remaining_cost = evaluate_remaining_items(\n items_counter, item_prices\n )\n total_cost += remaining_cost\n\n return total_cost", "def evaluate_exchange_map_cost(exchanges, distances,\n supply_site_code, sku_code) -> float:\n exchange_grid = exchanges[exchanges['Supply Site Code']\n == supply_site_code]\n exchange_grid = exchange_grid[exchange_grid['SKU'] == sku_code]\n\n cost = 0\n origin_index = exchange_grid.columns.to_list().index('Origin')\n destiny_index = exchange_grid.columns.to_list().index('Destiny')\n amount_index = exchange_grid.columns.to_list().index('Amount')\n for row in exchange_grid.itertuples():\n origin = row[origin_index + 1]\n destiny = row[destiny_index + 1]\n amount = row[amount_index + 1]\n\n new_cost = amount * evaluate_distance(distances, origin,\n destiny, supply_site_code)\n cost = cost + new_cost\n return cost", "def cost_of_booking(a_property, start_date, end_date):\n \n #convert the input dates into datetime elements\n input_start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n input_end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n \n #convert the property's start date into a datetime element\n property_start_date = datetime.datetime.strptime(a_property['start_date'], \"%Y-%m-%d\")\n\n #find the difference between the start and end dates given and the start date of the property\n start_delta = input_start_date - property_start_date\n end_delta = input_end_date - property_start_date\n\n #check that the input dates are in bounds\n if start_delta.days < len(a_property['availability']) and end_delta.days < len(a_property['availability']):\n\n #create a list from the property's price string\n price = a_property['price'].split(',')\n\n #initialize the total cost to 0\n total_cost = 0\n\n #iterate over the property's availability for the given date range\n #update the total cost if it is available\n #if it is not available on any date in that range, return 0\n for index, avail in enumerate(a_property['availability'][start_delta.days:end_delta.days + 1]):\n if avail == \"Y\":\n total_cost += int(price[index + start_delta.days])\n else:\n return 0\n\n return total_cost\n else:\n return \"Whoops! We don't have booking data available for those dates.\"", "def calculate_item_cost(item, state, tax=.05):\n if state == 'CA':\n tax = .07\n print item * (1 + tax)", "def calc_costs(region, cost_structure, backhaul, backhaul_quantity,\n global_parameters, country_parameters):\n all_sites = region['upgraded_mno_sites'] + region['new_mno_sites']\n geotype = region['geotype'].split(' ')[0]\n\n total_cost = 0\n cost_by_asset = []\n\n for asset_name1, cost in cost_structure.items():\n for asset_name2, type_of_cost in COST_TYPE.items():\n if asset_name1 == asset_name2:\n\n if asset_name1 == 'backhaul' and backhaul_quantity == 0:\n continue\n\n if asset_name1 == 'regional_node' and backhaul == 'microwave':\n continue\n\n if asset_name1 == 'regional_edge' and backhaul == 'microwave':\n continue\n\n if type_of_cost == 'capex_and_opex':\n\n cost = discount_capex_and_opex(cost, global_parameters,\n country_parameters)\n\n if asset_name1 == 'single_sector_antenna':\n cost = cost * global_parameters['sectorization']\n\n if asset_name1 == 'cots_processing':\n\n split = 'cots_processing_split_{}'.format(geotype)\n quantity = int(math.ceil(all_sites / global_parameters[split]))\n cost = cost * quantity\n\n if asset_name1 == 'low_latency_switch':\n quantity = int(math.ceil(all_sites /\n global_parameters['low_latency_switch_split']))\n cost = cost * quantity\n\n if asset_name1 == 'rack':\n quantity = int(math.ceil(all_sites /\n global_parameters['rack_split']))\n cost = cost * quantity\n\n if asset_name1 == 'cloud_power_supply_converter':\n quantity = int(math.ceil(all_sites /\n global_parameters['cloud_power_supply_converter_split']))\n cost = cost * quantity\n\n if asset_name1 == 'cloud_backhaul':\n quantity = int(math.ceil(all_sites /\n global_parameters['cloud_backhaul_split']))\n cost = (cost * quantity) / all_sites\n\n elif type_of_cost == 'capex':\n cost = cost * (1 + (country_parameters['financials']['wacc'] / 100))\n\n elif type_of_cost == 'opex':\n cost = discount_opex(cost, global_parameters, country_parameters)\n\n else:\n return 'Did not recognize cost type'\n\n total_cost += cost\n\n cost_by_asset.append({\n 'asset': asset_name1,\n 'cost': cost,\n })\n\n cost_by_asset = {item['asset']: item['cost'] for item in cost_by_asset}\n\n ran = [\n 'single_sector_antenna',\n 'single_remote_radio_unit',\n 'io_fronthaul',\n 'processing',\n 'io_s1_x2',\n 'control_unit',\n 'cooling_fans',\n 'distributed_power_supply_converter',\n 'bbu_cabinet',\n 'cots_processing',\n 'io_n2_n3',\n 'low_latency_switch',\n 'rack',\n 'cloud_power_supply_converter',\n 'power',\n ]\n\n backhaul_fronthaul = [\n 'fronthaul',\n 'backhaul',\n 'cloud_backhaul',\n ]\n\n civils = [\n 'tower',\n 'civil_materials',\n 'transportation',\n 'installation',\n 'site_rental',\n 'power_generator_battery_system',\n ]\n\n core = [\n 'regional_node',\n 'regional_edge',\n 'core_node',\n 'core_edge',\n ]\n\n ran_cost = 0\n backhaul_fronthaul_cost = 0\n civils_cost = 0\n core_cost = 0\n\n for key, value in cost_by_asset.items():\n if key in ran:\n ran_cost += value\n if key in backhaul_fronthaul:\n backhaul_fronthaul_cost += value\n if key in civils:\n civils_cost += value\n if key in core:\n core_cost += value\n\n cost_by_asset = {\n 'ran': ran_cost,\n 'backhaul_fronthaul': backhaul_fronthaul_cost,\n 'civils': civils_cost,\n 'core_network': core_cost,\n }\n\n return int(round(total_cost)), cost_by_asset", "def calculate_package_cost(*, orderpackage: models.OrderPackage, status: str) -> dict:\n\n from apps.orders.models import OrderCourse, OrderMenuItem, OrderAddOn\n\n package_estimated_cost = 0\n cost_food_internal = 0\n cost_food_external = 0\n cost_beverage = 0\n cost_labor = 0\n cost_rentals = 0\n\n if status == \"review\":\n courses = orderpackage.courses.all()\n menu_items = orderpackage.items.all()\n addons = orderpackage.addons_staff.all().filter(logistics__isnull=True)\n elif status == \"is_active\":\n courses = orderpackage.courses.is_active()\n menu_items = orderpackage.items.is_active()\n addons = orderpackage.addons_staff.is_active().filter(logistics__isnull=True)\n elif status == \"is_draft\":\n courses = orderpackage.courses.is_draft()\n menu_items = orderpackage.items.is_draft()\n addons = orderpackage.addons_staff.is_draft().filter(logistics__isnull=True)\n else:\n courses = orderpackage.courses.all()\n menu_items = orderpackage.items.all()\n addons = orderpackage.addons_staff.all().filter(logistics__isnull=True)\n\n combined_per_person_cost = 0\n combined_fixed_cost = 0\n\n # ––– PACKAGE –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n package_estimated_cost += (\n orderpackage.price_numeric * orderpackage.logistics.guest_count\n ) + orderpackage.price_numeric_fixed\n combined_per_person_cost += orderpackage.price_numeric\n combined_fixed_cost += orderpackage.price_numeric_fixed\n\n \"\"\"\n (3) possibilities:\n - if selection_quantity == 0 // NO CHOICE\n - elif selection_quantity == 999 // ARBITRARY NUMBER OF CHOICES\n – else selection_quantity specific\n if there are courses -> then selection_quantity refers to courses\n else selection_quantity refers to menu_items\n \"\"\"\n\n # for now, over_limit costs refer only to menu selections\n if menu_items.count() > orderpackage.selection_quantity:\n package_estimated_cost += orderpackage.price_over_limit_numeric * (\n (menu_items.count() - orderpackage.selection_quantity)\n * orderpackage.logistics.guest_count\n )\n combined_per_person_cost += orderpackage.price_over_limit_numeric * (\n menu_items.count() - orderpackage.selection_quantity\n )\n\n # course\n if courses:\n for course in courses:\n package_estimated_cost += (\n course.course.price_numeric_per_person\n * orderpackage.logistics.guest_count\n ) + course.course.price_numeric_fixed\n combined_per_person_cost += course.course.price_numeric_per_person\n combined_fixed_cost += course.course.price_numeric_fixed\n else:\n pass\n\n # menu item\n for menu_item in menu_items:\n if menu_item.modifications.count() > 0:\n per_person_cost = menu_item.menu_item.price_numeric_per_person\n for mod in menu_item.modifications.all():\n per_person_cost += mod.price_numeric_per_person\n package_estimated_cost += (\n per_person_cost * orderpackage.logistics.guest_count\n ) + menu_item.menu_item.price_numeric_fixed\n combined_per_person_cost += per_person_cost\n combined_fixed_cost += menu_item.menu_item.price_numeric_fixed\n else:\n package_estimated_cost += (\n menu_item.menu_item.price_numeric_per_person\n * orderpackage.logistics.guest_count\n ) + menu_item.menu_item.price_numeric_fixed\n combined_per_person_cost += menu_item.menu_item.price_numeric_per_person\n combined_fixed_cost += menu_item.menu_item.price_numeric_fixed\n\n # ––– PRICE DESCRIPTIVE –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n if (combined_per_person_cost > orderpackage.price_numeric) or (\n combined_fixed_cost > orderpackage.price_numeric_fixed\n ):\n if combined_fixed_cost > 0:\n price_descriptive = \"{0} per person, plus {1} fee\".format(\n combined_per_person_cost, combined_fixed_cost\n )\n else:\n price_descriptive = \"{0} per person\".format(combined_per_person_cost)\n else:\n if len(orderpackage.price_descriptive_short) > 0:\n price_descriptive = orderpackage.price_descriptive_short\n else:\n price_descriptive = orderpackage.price_descriptive\n\n # ––– ADDONS ––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n addons_staff = []\n\n for addon in addons:\n addon_cost = (\n addon.price_numeric * orderpackage.logistics.guest_count\n ) + addon.price_numeric_fixed\n\n temp = {}\n\n temp[\"id\"] = addon.id\n temp[\"name\"] = addon.name\n temp[\"cost\"] = addon_cost\n if addon.note:\n temp[\"note\"] = addon.note\n else:\n temp[\"note\"] = price_descriptive\n temp[\"price_numeric\"] = addon.price_numeric\n temp[\"price_numeric_fixed\"] = addon.price_numeric_fixed\n temp[\"cost_type_id\"] = addon.cost_type_id\n temp[\"package_id\"] = addon.package_id\n temp[\"logistics_id\"] = addon.logistics_id\n addons_staff.append(temp)\n\n if addon.cost_type.name == \"Food (Internal)\":\n cost_food_internal += addon_cost\n elif addon.cost_type.name == \"Food (External)\":\n cost_food_external += addon_cost\n elif addon.cost_type.name == \"Alcohol and NA Beverages\":\n cost_beverage += addon_cost\n elif addon.cost_type.name == \"Labor\":\n cost_labor += addon_cost\n elif addon.cost_type.name == \"Equipment and Rentals\":\n cost_rentals += addon_cost\n\n if orderpackage.package.cost_type.name == \"Food (Internal)\":\n cost_food_internal += package_estimated_cost\n elif orderpackage.package.cost_type.name == \"Food (External)\":\n cost_food_external += package_estimated_cost\n elif orderpackage.package.cost_type.name == \"Alcohol and NA Beverages\":\n cost_beverage += package_estimated_cost\n elif orderpackage.package.cost_type.name == \"Labor\":\n cost_labor += package_estimated_cost\n elif orderpackage.package.cost_type.name == \"Equipment and Rentals\":\n cost_rentals += package_estimated_cost\n\n return {\n \"cost\": package_estimated_cost,\n \"guest_count\": orderpackage.logistics.guest_count,\n \"price_descriptive\": price_descriptive,\n \"addons_staff\": addons_staff,\n \"cost_food_internal\": cost_food_internal,\n \"cost_food_external\": cost_food_external,\n \"cost_beverage\": cost_beverage,\n \"cost_labor\": cost_labor,\n \"cost_rentals\": cost_rentals,\n }", "def cart_edit_address():\n session = connect()\n try:\n user_id = current_user.id\n address = get_address(current_user.address_id)\n except AttributeError:\n return \"Error getting user data\"\n items = session.query(CartView).filter_by(user_id=user_id).all()\n # Calculate totals\n subtotal = 0.0\n for item in items:\n subtotal += float(item.price) * item.quantity\n if subtotal > 0:\n fee = DELIVERY_FEE\n else:\n fee = 0\n tax = (subtotal + fee) * 0.07\n total = subtotal + fee + tax\n subtotal = \"{0:.2f}\".format(subtotal)\n fee = \"{0:.2f}\".format(fee)\n tax = \"{0:.2f}\".format(tax)\n total = \"{0:.2f}\".format(total)\n if address is None:\n delivery_time = 'Please enter an address to '\n delivery_time += 'calculate estimated delivery time.'\n else:\n delivery_time = 'Your estimated delivery time is currently '\n delivery_time += '{0:.0f}'.format(get_delivery_time()/60) + ' minutes.'\n return render_template('cart.html', items=items, subtotal=subtotal,\n fee=fee, tax=tax, total=total, user=current_user, address=address, \n delivery_time=delivery_time, edit_address=True, title=\"Checkout\")", "def compute_price_component_produce(self, product, quantity):\n# price_unit = product.compute_production_cost_price(quantity=quantity) or 0.0\n price_unit = product.production_cost_price\n return price_unit" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lambda function handler for /backend/pricing
def handler(event, _): # Verify that this is a request with IAM credentials if iam_user_id(event) is None: logger.warning({"message": "User ARN not found in event"}) return response("Unauthorized", 403) # Extract the request body try: body = json.loads(event["body"]) except Exception as exc: # pylint: disable=broad-except logger.warning("Exception caught: %s", exc) return response("Failed to parse JSON body", 400) for key in ["products", "address"]: if key not in body: logger.info({ "message": "Missing '{}' in body".format(key), "body": body }) return response("Missing '{}' in body".format(key), 400) # Calculate the delivery pricing pricing = get_pricing(body["products"], body["address"]) logger.debug({ "message": "Estimated delivery pricing to {}".format(pricing), "pricing": pricing }) # Send the response back return response({ "pricing": pricing })
[ "def ex_get_pricing(self):\r\n action = '/pricing/'\r\n response = self.connection.request(action=action, method='GET')\r\n return response.object", "def lambda_handler(event, context):\n logger = LOGGER('__cur_cost_usage__').config()\n\n budget = AccountBudget()\n # Get the list of AWS accounts from budget file\n accounts = budget.get_aws_accounts()\n\n util = Utils()\n util.clean_up_tmp_dir()\n\n prom_conf = OutputConfigParser().parse_output_config('prometheus')\n prom_endpoint = \"%s:%s\" % (prom_conf['gateway'], prom_conf['port'])\n\n cur_conf = OutputConfigParser().parse_output_config('cur')\n s3_bucket = cur_conf['bucket']\n daily_file_pattern = cur_conf['daily_file_pattern']\n monthly_file_pattern = cur_conf['monthly_file_pattern']\n\n #201912*filefomart.csv\n #TODO file pattern suffix\n daily_cur_file = \"%s_%s.csv000\" % (util.get_current_month_year(), daily_file_pattern)\n monthly_cur_file = \"%s_%s.csv000\" % (util.get_current_month_year(), monthly_file_pattern)\n\n # Daily cost usage report\n try:\n downloaded_file = util.download_s3_file(bucket=s3_bucket, filename=daily_cur_file)\n logger.info(\"Downloaded file name %s\", downloaded_file)\n except Exception as error:\n logger.exception(\"Unable to download file %s\", error)\n return\n\n # TODO Column name change\n columns = ['usagestartdate_date', 'aws_account_number', 'environment', 'aws_account_name',\n 'aws_service_code', 'operation', 'component', 'app',\n 'appenv', 'user', 'bu', 'cost_total']\n\n try:\n daily_usage_df = pd.read_csv(downloaded_file, dtype=str, header=None)\n # set the column names\n daily_usage_df.columns = columns\n\n # Convert cost_total column to float\n convert_dict = {'cost_total': float}\n daily_usage_df = daily_usage_df.astype(convert_dict)\n except Exception as error:\n logger.error(\"Unable to read daily usage CSV File %s \", error)\n return\n\n # Process latest set of records\n last_record_date = \"1970-01-01\"\n for lastrecord in getattr(daily_usage_df.tail(1), 'usagestartdate_date'):\n last_record_date = lastrecord\n\n today = util.get_day_month_year()\n\n latest_df = daily_usage_df[daily_usage_df['usagestartdate_date'] == last_record_date]\n accounts_df = latest_df[latest_df['aws_account_number'].isin(accounts)]\n\n cur_spend = CostUsageReportSpend()\n cur_spend.account_month_to_date_spend(accounts_df, today, prom_endpoint)\n\n # Clean up /tmp dir before processing monthly cur file.\n util.clean_up_tmp_dir()\n\n # Monthly cost and usage report, seperate function\n try:\n downloaded_file = util.download_s3_file(bucket=s3_bucket, filename=monthly_cur_file)\n logger.info(\"Downloaded file name %s\", downloaded_file)\n except Exception as error:\n logger.exception(\"Unable to download file, %s\", error)\n return\n\n # TODO Column name change\n columns = ['month_of_year', 'fiscal_quarter_of_year', 'as_of_date', 'bu', 'application_name',\n 'aws_account_number', 'environment', 'account_name', 'aws_service_code', 'operation',\n 'component', 'user_app', 'appenv', 'user', 'finance_part', 'monthly_cost_to_date',\n 'projected_month_end_cost', 'quarterly_cost_to_date', 'projected_quarter_end_cost']\n\n try:\n monthly_spend_df = pd.read_csv(downloaded_file, dtype=str, header=None)\n monthly_spend_df.columns = columns\n\n convert_dict = {'monthly_cost_to_date': float,\n 'projected_month_end_cost': float,\n 'quarterly_cost_to_date': float,\n 'projected_quarter_end_cost': float\n }\n monthly_spend_df = monthly_spend_df.astype(convert_dict)\n except Exception as error:\n logger.exception(\"Unable to read CSV File, %s\", error)\n return\n\n accounts_df = monthly_spend_df[monthly_spend_df['aws_account_number'].isin(accounts)]\n\n cur_projected = CostUsageReportProjected()\n\n # Process monthly/projected spend cost by account id\n process = Process(target=cur_projected.account_monthly_projected_spend, args=(accounts_df, prom_endpoint))\n cur_projected.processes.append(process)\n\n # start all processes\n for process in cur_projected.processes:\n process.start()\n\n # Wait for thread completion and ensure all threads have finished\n for process in cur_projected.processes:\n process.join()", "def lambda_handler(event, context):\n pre_authorization_token = event.get(\"chargeId\")\n customer_id = event.get(\"customerId\")\n\n if not pre_authorization_token:\n metrics.add_metric(name=\"InvalidPaymentRequest\", unit=MetricUnit.Count, value=1)\n logger.error({\"operation\": \"input_validation\", \"details\": event})\n raise ValueError(\"Invalid Charge ID\")\n\n try:\n logger.debug(\n f\"Collecting payment from customer {customer_id} using {pre_authorization_token} token\"\n )\n ret = collect_payment(pre_authorization_token)\n metrics.add_metric(name=\"SuccessfulPayment\", unit=MetricUnit.Count, value=1)\n tracer.put_annotation(\"PaymentStatus\", \"SUCCESS\")\n\n return ret # Step Functions can append multiple values if you return a single dict\n except PaymentException as err:\n metrics.add_metric(name=\"FailedPayment\", unit=MetricUnit.Count, value=1)\n tracer.put_annotation(\"PaymentStatus\", \"FAILED\")\n logger.exception({\"operation\": \"payment_collection\"})\n raise", "def USD_handler(payment_data: PaymentSchema):\n payment = {'shop_currency': payment_data.currency, 'shop_amount': payment_data.amount, 'payer_currency': payment_data.currency, 'shop_id': app.config['SHOP_ID'], 'shop_order_id': payment_data.shop_order_id}\n sign = generate_sign(payment)\n url = \"https://core.piastrix.com/bill/create\"\n data = {\"description\": payment_data.description,\n \"payer_currency\": payment_data.currency,\n \"shop_amount\": payment_data.amount,\n \"shop_currency\": payment_data.currency,\n \"shop_id\": app.config['SHOP_ID'],\n \"shop_order_id\": payment_data.shop_order_id,\n \"sign\": sign\n }\n response = requests.post(url, json=data)\n response_data = response.json()\n app.logger.info(f'Payment - {payment_data.shop_order_id} handler - USD responce - {response_data}')\n if response_data['result']:\n return redirect(f\"{response_data['data']['url']}\")\n else:\n return response_data", "def query_price(request):\n pricing_id = request.POST.get('pricing_id', 0)\n list_type = request.POST.get('list_type', '')\n pricing = get_object_or_404(DirectoryPricing, pk=pricing_id)\n price = pricing.get_price_for_user(request.user, list_type=list_type)\n return HttpResponse(simplejson.dumps({'price': price}))", "def EUR_handler(payment_data: PaymentSchema):\n payment = {'amount': payment_data.amount, 'currency': payment_data.currency, 'shop_id': app.config['SHOP_ID'], 'shop_order_id': payment_data.shop_order_id}\n sign = generate_sign(payment)\n app.logger.info(f'Payment - {payment_data.shop_order_id} handler - EUR')\n return render_template('pay.html', payment=payment, sign=sign, description=payment_data.description)", "def lambda_handler(args: Dict, _) -> Dict:\n logger.debug(\"Received API Gateway request: %s\", args)\n api_gateway_request = APIGatewayV2HTTPEvent.parse_obj(args)\n source_ip = api_gateway_request.request_context.http.source_ip\n dns_request = construct_dns_request(api_gateway_request)\n logger.debug(\"Sending DNS request: %s\", dns_request)\n dns_response: QueryMessage = loop.run_until_complete(dns_client.query(dns_request, source_ip))\n logger.debug(\"Received DNS response: %s\", dns_response)\n response = construct_api_gateway_response(dns_response)\n logger.debug(\"Returning API Gateway response: %s\", response)\n return response", "def __price(symbol: str, callback: function):\n pass", "def price_change():\n with db.session.connection(execution_options={\"schema_translate_map\":{\"tenant\":session['schema']}}):\n shift_id = int(request.form.get(\"shift\"))\n cost_price = request.form.get(\"cost_price\")\n selling_price = request.form.get(\"selling_price\")\n cost_price = request.form.get(\"cost_price\")\n product_id = request.form.get(\"product\")\n product= Product.query.filter_by(id=product_id).first()\n try:\n \n price = Price.query.filter(and_(Price.shift_id==shift_id,Price.product_id==product.id)).first()\n price.cost_price = cost_price\n price.selling_price= selling_price\n product.selling_price = selling_price\n product.cost_price = cost_price\n db.session.commit()\n flash('Done','info')\n return redirect(url_for('readings_entry'))\n except:\n\n db.session.rollback()\n flash('Something is wrong, try again','warning')\n return redirect(url_for('readings_entry'))", "def test_price_endpoint_available(self):\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def RUB_handler(payment_data: PaymentSchema):\n payment = {'amount': payment_data.amount, 'currency': payment_data.currency, 'payway': 'advcash_rub','shop_id': app.config['SHOP_ID'], 'shop_order_id': payment_data.shop_order_id}\n sign = generate_sign(payment)\n url = \"https://core.piastrix.com/invoice/create\"\n data = {\"description\": payment_data.description,\n \"amount\": payment_data.amount,\n \"currency\": payment_data.currency,\n \"payway\": 'advcash_rub',\n \"shop_id\": app.config['SHOP_ID'],\n \"shop_order_id\": payment_data.shop_order_id,\n \"sign\": sign\n }\n response = requests.post(url, json=data)\n response_data = response.json()\n app.logger.info(f'Payment - {payment_data.shop_order_id} handler - RUB responce - {response_data}')\n if response_data['result']:\n return render_template('invoice.html', url=response_data['data']['url'], data=response_data['data']['data'])\n else:\n return response_data", "def currency_custom_handler(currency):\n pass", "def handler(event, context):\n\n print(event)\n print(context)\n\n return {\n \"body\": json.dumps('Hello World!')\n}", "def lambda_handler(event, context):\n\n response_iterator = LIST_LAYER_PAGINATOR.paginate()\n for layers_value in response_iterator:\n layers = layers_value['Layers']\n\n for layer in layers:\n response_iterator = LIST_LAYER_VERSIONS_PAGINATOR.paginate(\n LayerName=layer['LayerArn']\n )\n for layer_value in response_iterator:\n response = layer_value\n\n for i in response['LayerVersions']:\n\n layer_arn = re.split(r':', i['LayerVersionArn'])\n layer_arn = \":\".join(layer_arn[:-1])\n\n response = LAMBDA_CLIENT.get_layer_version(\n LayerName=layer_arn,\n VersionNumber=i['Version']\n )\n temp = {\n 'LayerArn': response['LayerArn'],\n 'Version': response['Version'],\n 'CodeSize': str(round(float(response['Content']['CodeSize']) / 1024 / 1024, 2)),\n 'Compatible Runtimes': response['CompatibleRuntimes']\n }\n TABLE.add_row([temp['LayerArn'], temp['Version'],\n temp['CodeSize'], temp['Compatible Runtimes']])\n ALL_LAYERS.append(json.dumps(temp))\n\n # Print PrettyTable\n print(TABLE)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('See function logs')\n }", "def webhook(request):\n # Listen to messages from strip\n # Will return a event\n # Setup\n wh_secret = settings.STRIPE_WH_SECRET\n stripe.api_key = settings.STRIPE_SECRET_KEY\n\n # Get the webhook data and verify its signature\n payload = request.body\n sig_header = request.META['HTTP_STRIPE_SIGNATURE']\n event = None\n\n try:\n event = stripe.Webhook.construct_event(\n payload, sig_header, wh_secret\n )\n except ValueError as e:\n # Invalid payload\n return HttpResponse(status=400)\n except stripe.error.SignatureVerificationError as e:\n # Invalid signature\n return HttpResponse(status=400)\n except Exception as e:\n return HttpResponse(content=e, status=400)\n\n # Set up a webhook handler/Make instance of stripewh_handler\n handler = StripeWH_Handler(request)\n # class calculator:\n # def add(a,b):\n # return a+b\n\n # cals = calculator\n\n # print(cals.add(2,2))\n\n # Map webhook events to relevant handler functions in webhook handler\n event_map = {\n 'payment_intent.succeeded': handler.handle_payment_intent_succeeded,\n 'payment_intent.payment_failed': handler.handle_payment_intent_payment_failed,\n }\n # Get the webhook type from Stripe\n print(event['type'])\n # event returns a dictionary\n # {\n # \"created\": 1326853478,\n # \"livemode\": false,\n # \"id\": \"evt_00000000000000\",\n # \"type\": \"invoice.updated\",\n # }\n event_type = event['type']\n # If there's a handler for it, get it from the event map\n if event_type in event_map:\n event_handler = event_map[event_type]\n # Use the generic one by default\n else:\n event_handler = handler.handle_event\n # Call the event handler with the event from webhook handler\n # same as : handler.handle_payment_intent_payment_failed(event), will trigger webhook handler\n response = event_handler(event)\n return response", "def index_handler(request):\n\n return render(request, 'spendtrackapp/plan_index.html', {\n 'page_title': 'Plan | SpendTrackApp',\n 'categories': Category.objects.all(),\n 'current_plans': Plan.get_current_plans(request.user),\n })", "def lambda_handler(event, context):\n if not is_booking_request_valid(event):\n raise ValueError(\"Invalid booking request\")\n\n try:\n ret = reserve_booking(event)\n except BookingReservationException as e:\n raise BookingReservationException(e)\n\n # Step Functions use the return to append `bookingId` key into the overall output\n return ret['bookingId']", "def add_handler(request):\n\n form = PlanForm(get_post(request))\n if not form.is_valid():\n return JsonResponse(form.errors, status=400)\n\n plan = form.save()\n return JsonResponse({\n 'id': plan.id,\n 'total': plan.total\n })", "def lambda_handler(event, context):\n a = 1\n b = 0\n\n return (a/b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deregisters an existing participant
def participants_deregister(id): query = "UPDATE user SET registered=0 WHERE user_id='{0}'".format(id) connection = app.config["PYMYSQL_CONNECTION"] # submit query and retrieve values with connection.cursor() as cursor: cursor.execute(query) return "done.", 200
[ "def remove_participant(self, participant_id):\n del self._participants[participant_id]", "def test_auth_delete_participant(self):\n pass", "async def deregister(self, ctx: Context):\n if ctx.channel.name != self._monitor_channel:\n return\n author_id = str(ctx.message.author.id)\n if author_id not in self._working_discord_mc_mapping:\n fmt = '<@!{}> You not currently have a Minecraft account reigstered.'\n await ctx.channel.send(fmt.format(author_id))\n return\n registered_uuid = uuid.UUID(self._working_discord_mc_mapping[author_id]['uuid'])\n await self._remove_user_from_whitelist(registered_uuid)\n # always remove entry from dc mc map\n self._working_discord_mc_mapping.pop(author_id)\n async with aiofiles.open(self._discord_mc_map_file_path, 'w') as dc_mc_map:\n await dc_mc_map.write(json.dumps(self._working_discord_mc_mapping, indent=4))\n # demote user to lower mc role\n # remove managed role\n await ctx.message.author.remove_roles(Object(self._managed_role_id), reason='Deregister')\n # reload whitelist\n await self._send_to_minecraft_console('whitelist reload')\n # inform user deregister was successful\n fmt = '<@!{}> Minecraft account successfully deregistered.'\n await ctx.channel.send(fmt.format(author_id))", "async def shutdown(self):\n self._task.cancel()\n\n if self._registration_resource is None:\n return\n\n try:\n await self._context.request(\n Message(code=DELETE, uri=self._registration_resource)\n ).response_raising\n except Exception as e:\n self.log.error(\"Error deregistering from the RD\", exc_info=e)", "def remove_conference(self):\n conference_id = int(input(\"Entrez l'id de la conference :\"))\n self.model.delete_conference(conference_id)", "async def destroy(tournament, participant_id):\n await api.fetch(\n \"DELETE\",\n f\"tournaments/{tournament}/participants/{participant_id}\")", "def remove_invite(self, redditor):\n if isinstance(redditor, Redditor):\n fullname = redditor.fullname\n else:\n fullname = redditor\n data = {'id': fullname}\n url = API_PATH['live_remove_invite'].format(id=self.thread.id)\n self.thread._reddit.post(url, data=data)", "def deRegister(self, playerID):\n if oidIsValid(playerID):\n if self.playersColl.find_one_and_delete({'_id': playerID}) != None:\n result = {'status': \"ok\"}\n else:\n result = {'status': \"ko\", 'reason': \"unknown playerID\"}\n else:\n result = {'status': \"ko\", 'reason': \"invalid playerID\"}\n return result", "def delete_registration(self, args):\n if \"id\" not in args:\n return bad_request(\"id not invalid\")\n try:\n register = self.get_registration_by_id(args[\"id\"])\n if register is not None:\n register.delete()\n hackathon = register.hackathon\n self.__update_register_stat(hackathon)\n\n team = self.team_manager.get_team_by_user_and_hackathon(register.user, hackathon)\n if not team:\n self.log.warn(\"team of this registered user is not found!\")\n return ok()\n self.team_manager.quit_team_forcedly(team, register.user)\n\n return ok()\n except Exception as ex:\n self.log.error(ex)\n return internal_server_error(\"failed in delete register: %s\" % args[\"id\"])", "def deregisterUserEvent(*args, **kwargs):\n \n pass", "def remove_invite(self, redditor: str | praw.models.Redditor):\n fullname = redditor.fullname if isinstance(redditor, Redditor) else redditor\n data = {\"id\": fullname}\n url = API_PATH[\"live_remove_invite\"].format(id=self.thread.id)\n self.thread._reddit.post(url, data=data)", "def remove_participant(thread, user):\n thread.threadparticipant_set.filter(user=user).delete()\n set_user_unread_private_threads_sync(user)\n\n remove_thread_participant.send(thread, user=user)", "def unregister_player(self, p_id):\n self.cur.execute(\"delete players where p_id = %s;\", (p_id, ))\n self.cur.execute(\"commit;\")", "def remove(uid):", "def remove_from_participant_group(worker):\n worker.source.bot.botbinding_set.objects.get(bot=worker.source.bot).delete()\n worker.source.bot.send_message(\n worker.source.participant_group,\n \"The connection was successfully stopped.\",\n reply_to_message_id=worker.source.message[\"message_id\"],\n )", "def deregister_heartbeat(self, obj):\n if obj in self.heartbeat_users:\n del self.heartbeat_users[self.heartbeat_users.index(obj)]\n else:\n dbg.debug(\"object %s, not in heartbeat_users, tried to deregister heartbeat!\" % obj, 2)", "def test_auth_delete_participant_contact(self):\n pass", "def dismiss(self) -> None:\n super().__del__()\n Person.teachers -= 1\n print(f'The {self} has been dismissed')", "def remove_email(self, address):\n if address == self.email_address:\n raise CannotRemovePrimaryEmail()\n with self.db.get_cursor() as c:\n self.app.add_event(c, 'participant', dict(id=self.id, action='remove', values=dict(email=address)))\n c.run(\"DELETE FROM emails WHERE participant_id=%s AND address=%s\",\n (self.id, address))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns whether the participant exist, and whether it belongs in an existing group already
def participants_is_already_grouped(id): connection = app.config["PYMYSQL_CONNECTION"] query = "SELECT group_id FROM user WHERE user_id='{0}'".format(id) # submit query and retrieve values with connection.cursor() as cursor: cursor.execute(query) query_result = cursor.fetchall() output = {"user_exist": 0,"group_id": None} print("query_result: {0}".format(query_result)) if query_result != (): output["user_exist"] = 1 if query_result != [{}]: output["group_id"] = query_result[0]["group_id"] return jsonify(output), 200
[ "def GroupExists(self, groupname):\n return groupname in self._groups", "def group_exists(c, runner, group):\n return group in groups(c, runner=runner)", "def group_exists(self, group_name):\n try:\n self.rmc.resource_groups.get(group_name)\n return True\n except CloudError:\n return False", "def __resource_group_exists(args):\n\n resource_client = __create_resource_management_client()\n\n try:\n resource_client.resource_groups.get(args.resource_group_name)\n except ResourceNotFoundError:\n return False\n\n return True", "def IsGroup(self) -> bool:", "def _check_group_exists(group_id):\n group = _server.Group.fetch(group_id)\n if group:\n raise _errors.GroupError(\"Group (%s) already exists.\" % (group_id, ))", "def group_itr_exists(self, group):\n ret_val = self._group_itr_exists(group)\n return ret_val", "async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:\n q = \"\"\"select count(id) from contact_groups where id=%s\"\"\"\n return await _object_exists(dbcon, q, (contact_group_id,))", "def exists(self):\n action = self.daofactory(classname=\"Workflow.Exists\")\n result = action.execute(spec=self.spec, owner=self.dn,\n group_name=self.vogroup,\n role_name=self.vorole,\n name=self.name, task=self.task,\n conn=self.getDBConn(),\n transaction=self.existingTransaction())\n\n return result", "def group_already_exists_error(self, group_name):\n return any(\n self.filtered_errors(\n lambda error: error[\"message\"]\n == self.group_already_exists_error_format.format(\n group_name,\n ),\n )\n )", "def _intermediary_account_exists(self):\n party_details = get_counterpartys_intermediary_details(self.acm_obj)\n if party_details.get('NAME'):\n return True\n return False", "def check_if_group_id_exists(self, id):\n query = \"SELECT * FROM epicgroups WHERE id = '{}'\".format(id)\n self.cursor.execute(query)\n return self.cursor.fetchall()", "def check_group(current_user):\n if Student.objects.get(user_id=current_user).student_group_id is None:\n return False\n else:\n return True", "def __is_member_of(self, group, recursive=False):\n return group in self.get_memberOfs(recursive=recursive)", "def in_group(self, group, dn=False):\n if dn:\n return group in self.groups()\n return group.check_member(self)", "def already_exists(candidate, returned_api_collection):\r\n _exists = []\r\n \r\n for _dictionary in returned_api_collection:\r\n _subgroup = set(flatten_iterable(candidate))\r\n _group = set(flatten_iterable(_dictionary))\r\n if _subgroup.issubset(_group):\r\n _exists.append(True)\r\n \r\n return _exists.count(True) == 1", "def isUserInGroup(user, group_name): \n return user.groups.filter(name=group_name).count() > 0", "def _validate_partner_exists(self):\n odooclient = odoo_client.get_odoo_client()\n try:\n self.partner = odooclient.partners.get(self.partner_id)[0]\n self.add_note('Contact id %s exists. (%s)'\n % (self.partner_id, self.partner.name))\n return True\n except IndexError:\n self.add_note('Partner with id %s does not exist.'\n % self.partner_id)\n return False", "def in_group(user, group):\n import re\n if re.search(',', group):\n group_list = group.split(',')\n else:\n group_list = [group]\n user_groups = []\n for group in user.groups.all():\n user_groups.append(str(group.name))\n if filter(lambda x: x in user_groups, group_list):\n return True\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Name of configuration mode to use. Modes are predefined configurations of security controls, extension allowlists and guest configuration, maintained by Microsoft.
def config_mode(self) -> str: return pulumi.get(self, "config_mode")
[ "def config_mode(self):\n\n pass", "def user_mode(self):\n return self.config['USER_MODE'].lower()", "def get_custom_mode_name(system,custom_mode):\n\n return \"%d\" % custom_mode", "def runmode(self) -> RunMode:\n return RunMode(self._config.get('runmode', RunMode.OTHER))", "def get_mode(self,):\n return self.current_mode", "def get_mode_type(self, mode: str) -> str:\n for letter, modes in self.chanmodes.items():\n if mode in modes:\n return letter\n raise ModeTypeUnknown(mode)", "def get_base_mode_name(mode_id):\n if mode_id==ADHOC_MANUAL_BASE_MODE:\n return \"CUSTOM_MANUAL\"\n elif mode_id in BASE_MODE_NAME:\n return BASE_MODE_NAME[mode_id]\n else:\n return \"Unknown mode: %d\" % mode_id", "def get_mode(self):\n return self.mode", "def get_mode(self):\n\t\tpath = self.request.path\n\t\tif path[:-1] == '/' + self._name or path[:-1] == '/' + self._name + 's':\n\t\t\treturn \"index\"\n\t\telif re.match(r'([0-9]+)(?:\\.(.+))?', path.split('/')[-1]):\n\t\t\treturn \"show\"\n\t\telse:\n\t\t\treturn path.split('/')[-1]", "def mode(self) -> GameMode:\n return self._game.mode", "def mode(self):\n # type: () -> SrtMode\n return self._mode", "def env_mode():\n if os.environ.get('DEV_MODE') is not None:\n return 'DEV'\n if os.environ.get('STAGING_MODE') is not None:\n return 'STAGING'", "def change_mode(mode_name: str):\n global mode, previous_mode\n previous_mode = mode\n if mode_name == 'editing':\n mode = 'editing'\n elif mode_name == 'playing':\n mode = 'playing'\n elif mode_name == 'level_selection':\n mode = 'level_selection'", "def mode(ctx, mode, touch_eject, autoeject_timeout, chalresp_timeout, force):\n dev = ctx.obj['dev']\n if autoeject_timeout:\n touch_eject = True\n autoeject = autoeject_timeout if touch_eject else None\n\n if mode is not None:\n if mode.transports != TRANSPORT.CCID:\n autoeject = None\n if touch_eject:\n ctx.fail('--touch-eject can only be used when setting'\n ' CCID-only mode')\n\n if not force:\n if mode == dev.mode:\n click.echo('Mode is already {}, nothing to do...'.format(mode))\n ctx.exit()\n elif not dev.has_mode(mode):\n click.echo('Mode {} is not supported on this YubiKey!'\n .format(mode))\n ctx.fail('Use --force to attempt to set it anyway.')\n force or click.confirm('Set mode of YubiKey to {}?'.format(mode),\n abort=True, err=True)\n\n try:\n dev.set_mode(mode, chalresp_timeout, autoeject)\n if not dev.can_write_config:\n click.echo(\n 'Mode set! You must remove and re-insert your YubiKey '\n 'for this change to take effect.')\n except ModeSwitchError as e:\n logger.debug('Failed to switch mode', exc_info=e)\n click.echo('Failed to switch mode on the YubiKey. Make sure your '\n 'YubiKey does not have an access code set.')\n\n else:\n click.echo('Current connection mode is: {}'.format(dev.mode))\n supported = ', '.join(t.name for t in TRANSPORT\n .split(dev.config.usb_supported))\n click.echo('Supported USB interfaces are: {}'.format(supported))", "def input_system_mode_config(self):\n print(\"\\nSystem Configuration:\")\n print(\"---------------------\\n\")\n print(\"System mode. Available options are:\\n\")\n print(textwrap.fill(\n \"1) duplex-direct - two node redundant configuration. \"\n \"Management and cluster-host networks \"\n \"are directly connected to peer ports\", 80))\n print(textwrap.fill(\n \"2) duplex - two node redundant configuration. \", 80))\n\n print(textwrap.fill(\n \"3) simplex - single node non-redundant configuration.\", 80))\n\n value_mapping = {\n \"1\": sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT,\n \"2\": sysinv_constants.SYSTEM_MODE_DUPLEX,\n '3': sysinv_constants.SYSTEM_MODE_SIMPLEX\n }\n user_input = prompt_for(\n \"System mode [duplex-direct]: \", '1',\n lambda text: \"Invalid choice\" if text not in value_mapping\n else None\n )\n self.system_mode = value_mapping[user_input.lower()]", "def system_mode(self):\n try:\n return SYSTEM_MODES[self._data['uiData']['SystemSwitchPosition']]\n except KeyError:\n raise APIError(\n 'Unknown system mode %i' % (\n self._data['uiData']['SystemSwitchPosition']))", "def get_config_path(self, mode=None):\n if mode is None:\n return os.path.join(self.get_config_folder(), self.run_id + '.config')\n else:\n return os.path.join(self.get_config_folder(), self.run_id + '_' + mode + '.config')", "def mode(self):\n ret = self._get_attr(\"mode\")\n return GuestMouseEventMode(ret)", "def getClientMode(self):\n return self.request('getClientMode')", "def modes(self):\n return self.get_attr_set('modes')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Specified whether the guest configuration service is enabled or disabled.
def guest_configuration_enabled(self) -> str: return pulumi.get(self, "guest_configuration_enabled")
[ "def hw_virt_ex_enabled(self):\n ret = self._get_attr(\"HWVirtExEnabled\")\n return ret", "def supports_configuration_admin(self):\n return # boolean", "def dhcp_enabled(self):\n ret = self._get_attr(\"DHCPEnabled\")\n return ret", "def enabled(cls):\n return backend_setting(cls, cls.SETTINGS_KEY_NAME) and\\\n backend_setting(cls, cls.SETTINGS_SECRET_NAME)", "def hw_virt_ex_vpid_enabled(self):\n ret = self._get_attr(\"HWVirtExVPIDEnabled\")\n return ret", "def hpet_enabled(self):\n ret = self._get_attr(\"HPETEnabled\")\n return ret", "def get_2g_guest_access_enabled(self):\n response = self._get_methods(\n c.SERVICE_WLAN_CONFIGURATION,\n self.guest_2g_methods,\n )\n return h.zero_or_one_dict_to_boolean(response)", "def _is_smv_run_config(self):\n pass", "def disabled(kls):\n from wouso.core.config.models import BoolSetting\n\n return BoolSetting.get('setting-%s' % kls.name()).get_value() is False", "def _aag_config_disabled(self, args: parser_extensions.Namespace):\n if flags.Get(args, 'disable_aag_config'):\n return True\n if flags.Get(args, 'enable_aag_config'):\n return False\n return None", "def is_managing_services(self):\n return \"true\" == _get_from_dictionary(self.ldap_properties, \"ambari.ldap.manage_services\")", "def _is_disabled(self, name):\n conf = getattr(self.bot.config, self._resource_name)\n disabled = conf.get(\"disable\", [])\n enabled = conf.get(\"enable\", [])\n return name not in enabled and (disabled is True or name in disabled)", "def _helper_disabled(self):\n disabled = ADDON.getSetting('disabled')\n if not disabled:\n ADDON.setSetting('disabled', 'false') # create default entry\n disabled = 'false'\n\n if disabled == 'true':\n self._log('inputstreamhelper is disabled in settings.xml.')\n return True\n else:\n self._log('inputstreamhelper is enabled. You can disable inputstreamhelper by setting \\\"disabled\\\" to \\\"true\\\" in settings.xml (Note: only recommended for developers knowing what they\\'re doing!)')\n return False", "def enable_dhcp(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_dhcp\")", "def is_configured(self):\n for (command_class, index), intended_value \\\n in self.CONFIGURATION.iteritems():\n ccv = self._device.get_command_class_value(command_class, index)\n if ccv.value != intended_value:\n return False\n\n return True", "def supports_parameter_smart_configuration(self):\n return # boolean", "def is_enabled(self):\n return getattr(self._thread_locals, 'enabled', True)", "def get_5g_guest_access_enabled(self):\n response = self._get_methods(\n c.SERVICE_WLAN_CONFIGURATION,\n self.guest_5g_methods,\n )\n return h.zero_or_one_dict_to_boolean(response)", "def get_xenstore_disk_config_value(self):\n command = 'xenstore-read vm-data/auto-disk-config'\n output = self.ssh_client.exec_command(command)\n return output.strip().lower() == 'true'", "def is_iscsi_boot_supported(self):\n return utils.is_operation_allowed(\n 'PATCH', self,\n ['@Redfish.Settings', 'SettingsObject'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Specifies the URL of the proxy to be used.
def proxy_url(self) -> str: return pulumi.get(self, "proxy_url")
[ "def set_http_proxy(self, proxy_url):\r\n result = self._parse_proxy_url(proxy_url=proxy_url)\r\n scheme = result[0]\r\n host = result[1]\r\n port = result[2]\r\n username = result[3]\r\n password = result[4]\r\n\r\n self.proxy_scheme = scheme\r\n self.proxy_host = host\r\n self.proxy_port = port\r\n self.proxy_username = username\r\n self.proxy_password = password\r\n self.http_proxy_used = True\r\n\r\n self._setup_http_proxy()", "def set_proxy(self, host, port):\n self.proxy = {\n 'host': host,\n 'port': port\n }", "def setproxy(self,proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n self.__proxy = (proxytype,addr,port,rdns,username,password)", "def set_server_url(self, url: str):\n self.url = url", "def enable_https_proxy(self, value):\n self._set_property('enable_https_proxy', value)", "def proxy(self):\n if self._proxy is not None:\n if self._proxy[:7] == \"http://\":\n self._proxy = {'http://': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['http://'])\n elif self._proxy[:8] == \"https://\":\n self._proxy = {'https://': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['https://'])\n elif self._proxy[:3] == \"ftp\":\n self._proxy = {'ftp': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['ftp'])\n else:\n self._proxy = \"\"\n return self._proxy", "def set_proxy_host(self, proxy_host):\n CheckValue.check_str(proxy_host, 'proxy_host')\n self._proxy_host = proxy_host\n return self", "def set_external_url(url):", "def url(self, url):\n self._url = url", "def setdefaultproxy(proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n global _defaultproxy\r\n _defaultproxy = (proxytype,addr,port,rdns,username,password)", "def proxy_url(self, maxwidth, url):\n if self.local:\n return url\n else:\n return resize_url(url, maxwidth)", "async def set_url(self, url: str):\n self.preview_embed.url = url", "def set_proxy(proxy: str) -> bool:\n resp = get_config()\n if not resp:\n return False\n data = resp[\"result\"]\n path = resp[\"path\"]\n data[\"proxy\"] = proxy\n with open(path, \"w\") as file:\n json.dump(data, file, sort_keys=True, indent=\"\")\n return True", "def set_proxy_port(self, proxy_port):\n CheckValue.check_int_ge_zero(proxy_port, 'proxy_port')\n self._proxy_port = proxy_port\n return self", "def __init__(self,\n service_url=None,\n service_port=None,\n bzedge_conf_file=None,\n timeout=DEFAULT_HTTP_TIMEOUT,\n **kwargs):\n\n super(Proxy, self).__init__(service_url=service_url,\n service_port=service_port,\n bzedge_conf_file=bzedge_conf_file,\n timeout=timeout,\n **kwargs)", "def configuration_url(self) -> str:\n protocol: str = \"https://\" if self.ssl else \"http://\"\n return f\"{protocol}{self.ip}:{self.port}\"", "def set_url(self, url):\n self._attributes[VOPROV['url']] = {url}", "def server_url(self, value):\n self._url = value\n self.wait_for_page()", "def set_proxy_username(self, proxy_username):\n CheckValue.check_str(proxy_username, 'proxy_username')\n self._proxy_username = proxy_username\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The error additional info.
def additional_info(self) -> Sequence['outputs.ErrorAdditionalInfoResponse']: return pulumi.get(self, "additional_info")
[ "def _error_details(self):\n return ErrorDetails(\n protocol=self._protocol_error,\n noniterable_str=self._noniterable_str_error,\n typed_dict=self._typed_dict_error\n )", "def _err_description(self) -> str:\n return ''", "def errormessage(self):\n return self._errormessage", "def get_error(self):\n\n return (self.error_msg, self.error_state)", "def errorMessage(self):\n\n if self.lastError:\n return self.lastError[\"M\"]\n else:\n return \"\"", "def error_details(self):\n\n # TODO There is no attempt so far to eliminate duplicates.\n # Duplicates could be eliminated based on exception type\n # and message or exception type and file name/line number\n # presuming the latter are available. Right now the file\n # name and line number aren't captured so can't rely on it.\n\n # TODO There are no constraints in place on what keys/values\n # can be in params dictionaries. Need to convert values to\n # strings at some point.\n\n if not self.errors:\n return\n\n for error in self.errors:\n params = {}\n params[\"stack_trace\"] = error.stack_trace\n\n intrinsics = {'spanId': error.span_id, 'error.expected': error.expected}\n intrinsics.update(self.trace_intrinsics)\n params['intrinsics'] = intrinsics\n\n params['agentAttributes'] = {}\n for attr in self.agent_attributes:\n if attr.destinations & DST_ERROR_COLLECTOR:\n params['agentAttributes'][attr.name] = attr.value\n\n params['userAttributes'] = {}\n for attr in self.user_attributes:\n if attr.destinations & DST_ERROR_COLLECTOR:\n params['userAttributes'][attr.name] = attr.value\n\n # add error specific custom params to this error's userAttributes\n\n err_attrs = create_user_attributes(error.custom_params,\n self.settings.attribute_filter)\n for attr in err_attrs:\n if attr.destinations & DST_ERROR_COLLECTOR:\n params['userAttributes'][attr.name] = attr.value\n\n yield newrelic.core.error_collector.TracedError(\n start_time=error.timestamp,\n path=self.path,\n message=error.message,\n type=error.type,\n parameters=params)", "def custom_error_details(hint: str, description: str, message: str) -> dict:\n intermediate = {\n \"hint\": hint,\n \"description\": description,\n \"message\": message,\n }\n return {key: val for key, val in intermediate.items() if val is not None}", "def error(self) -> str:\n error_file = ErrorFile()\n return f'-e \"{error_file.path}\"'", "def add_detected_error(self,e):\n exc_tb = sys.exc_info()[2]\n exc_type = sys.exc_info()[0]\n exc_line = exc_tb.tb_lineno\n f_name = traceback.extract_tb(exc_tb,1)[0][2]\n t_err_msg = \"{} | Exception Type: {} | At Function: {} | Line No: {} | Error Message: {}\"\n t_err_msg = t_err_msg.format(self.host, exc_type, f_name, exc_line, e)\n self.add_cmnt_msg(t_err_msg, \"Error\")", "def error_data(self):\n return self._error_data", "def error(self):\n return self.args[0]", "def _exc_info_to_string(self, err):\n exctype, value, tb = err\n msgLines = traceback.format_exception(exctype, value, tb)\n return ''.join(msgLines)", "def error_info(self):\n ret = self._get_attr(\"errorInfo\")\n return IVirtualBoxErrorInfo(ret)", "def get_custom_error(self):\n\n return self.custom_error", "def description(self) -> str:\n return self._error_description", "def overall_error(self):\n return self._overall_error", "def error_string(self):\n if 1 <= self._error_reason <= 3:\n reason_string = self._error_reason_strings[self._error_reason-1]\n return reason_string.format(self.error_data)\n else:\n return \"Reason {} Data {}\".format(\n self.error_reason, hexlify(self.error_data))", "def __get_error(self):\n return self.__frame_error", "def _get_extra_info(self):\n # pylint: disable=no-self-use\n return None", "def error_report(self) -> str:\n warning = f\"{self.ename} failed. See below for details \\n\"\n if self.error:\n warning += (\n f\"{self.entity.type} {self.ename} produced the following error \\n\"\n )\n warning += f\"Error: {self.error} \\n\"\n if self.output:\n warning += f\"Output: {self.output} \\n\"\n warning += f\"Job status at failure: {self.status} \\n\"\n warning += f\"Launcher status at failure: {self.raw_status} \\n\"\n warning += f\"Job returncode: {self.returncode} \\n\"\n warning += f\"Error and output file located at: {self.entity.path}\"\n return warning" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }